rethook.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "rethook: " fmt
  3. #include <linux/bug.h>
  4. #include <linux/kallsyms.h>
  5. #include <linux/kprobes.h>
  6. #include <linux/preempt.h>
  7. #include <linux/rethook.h>
  8. #include <linux/slab.h>
  9. /* Return hook list (shadow stack by list) */
  10. /*
  11. * This function is called from delayed_put_task_struct() when a task is
  12. * dead and cleaned up to recycle any kretprobe instances associated with
  13. * this task. These left over instances represent probed functions that
  14. * have been called but will never return.
  15. */
  16. void rethook_flush_task(struct task_struct *tk)
  17. {
  18. struct rethook_node *rhn;
  19. struct llist_node *node;
  20. node = __llist_del_all(&tk->rethooks);
  21. while (node) {
  22. rhn = container_of(node, struct rethook_node, llist);
  23. node = node->next;
  24. preempt_disable();
  25. rethook_recycle(rhn);
  26. preempt_enable();
  27. }
  28. }
  29. static void rethook_free_rcu(struct rcu_head *head)
  30. {
  31. struct rethook *rh = container_of(head, struct rethook, rcu);
  32. objpool_fini(&rh->pool);
  33. }
  34. /**
  35. * rethook_stop() - Stop using a rethook.
  36. * @rh: the struct rethook to stop.
  37. *
  38. * Stop using a rethook to prepare for freeing it. If you want to wait for
  39. * all running rethook handler before calling rethook_free(), you need to
  40. * call this first and wait RCU, and call rethook_free().
  41. */
  42. void rethook_stop(struct rethook *rh)
  43. {
  44. rcu_assign_pointer(rh->handler, NULL);
  45. }
  46. /**
  47. * rethook_free() - Free struct rethook.
  48. * @rh: the struct rethook to be freed.
  49. *
  50. * Free the rethook. Before calling this function, user must ensure the
  51. * @rh::data is cleaned if needed (or, the handler can access it after
  52. * calling this function.) This function will set the @rh to be freed
  53. * after all rethook_node are freed (not soon). And the caller must
  54. * not touch @rh after calling this.
  55. */
  56. void rethook_free(struct rethook *rh)
  57. {
  58. rethook_stop(rh);
  59. call_rcu(&rh->rcu, rethook_free_rcu);
  60. }
  61. static int rethook_init_node(void *nod, void *context)
  62. {
  63. struct rethook_node *node = nod;
  64. node->rethook = context;
  65. return 0;
  66. }
  67. static int rethook_fini_pool(struct objpool_head *head, void *context)
  68. {
  69. kfree(context);
  70. return 0;
  71. }
  72. static inline rethook_handler_t rethook_get_handler(struct rethook *rh)
  73. {
  74. return (rethook_handler_t)rcu_dereference_check(rh->handler,
  75. rcu_read_lock_any_held());
  76. }
  77. /**
  78. * rethook_alloc() - Allocate struct rethook.
  79. * @data: a data to pass the @handler when hooking the return.
  80. * @handler: the return hook callback function, must NOT be NULL
  81. * @size: node size: rethook node and additional data
  82. * @num: number of rethook nodes to be preallocated
  83. *
  84. * Allocate and initialize a new rethook with @data and @handler.
  85. * Return pointer of new rethook, or error codes for failures.
  86. *
  87. * Note that @handler == NULL means this rethook is going to be freed.
  88. */
  89. struct rethook *rethook_alloc(void *data, rethook_handler_t handler,
  90. int size, int num)
  91. {
  92. struct rethook *rh;
  93. if (!handler || num <= 0 || size < sizeof(struct rethook_node))
  94. return ERR_PTR(-EINVAL);
  95. rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
  96. if (!rh)
  97. return ERR_PTR(-ENOMEM);
  98. rh->data = data;
  99. rcu_assign_pointer(rh->handler, handler);
  100. /* initialize the objpool for rethook nodes */
  101. if (objpool_init(&rh->pool, num, size, GFP_KERNEL, rh,
  102. rethook_init_node, rethook_fini_pool)) {
  103. kfree(rh);
  104. return ERR_PTR(-ENOMEM);
  105. }
  106. return rh;
  107. }
  108. static void free_rethook_node_rcu(struct rcu_head *head)
  109. {
  110. struct rethook_node *node = container_of(head, struct rethook_node, rcu);
  111. struct rethook *rh = node->rethook;
  112. objpool_drop(node, &rh->pool);
  113. }
  114. /**
  115. * rethook_recycle() - return the node to rethook.
  116. * @node: The struct rethook_node to be returned.
  117. *
  118. * Return back the @node to @node::rethook. If the @node::rethook is already
  119. * marked as freed, this will free the @node.
  120. */
  121. void rethook_recycle(struct rethook_node *node)
  122. {
  123. rethook_handler_t handler;
  124. handler = rethook_get_handler(node->rethook);
  125. if (likely(handler))
  126. objpool_push(node, &node->rethook->pool);
  127. else
  128. call_rcu(&node->rcu, free_rethook_node_rcu);
  129. }
  130. NOKPROBE_SYMBOL(rethook_recycle);
  131. /**
  132. * rethook_try_get() - get an unused rethook node.
  133. * @rh: The struct rethook which pools the nodes.
  134. *
  135. * Get an unused rethook node from @rh. If the node pool is empty, this
  136. * will return NULL. Caller must disable preemption.
  137. */
  138. struct rethook_node *rethook_try_get(struct rethook *rh)
  139. {
  140. rethook_handler_t handler = rethook_get_handler(rh);
  141. /* Check whether @rh is going to be freed. */
  142. if (unlikely(!handler))
  143. return NULL;
  144. #if defined(CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING) || defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
  145. /*
  146. * This expects the caller will set up a rethook on a function entry.
  147. * When the function returns, the rethook will eventually be reclaimed
  148. * or released in the rethook_recycle() with call_rcu().
  149. * This means the caller must be run in the RCU-availabe context.
  150. */
  151. if (unlikely(!rcu_is_watching()))
  152. return NULL;
  153. #endif
  154. return (struct rethook_node *)objpool_pop(&rh->pool);
  155. }
  156. NOKPROBE_SYMBOL(rethook_try_get);
  157. /**
  158. * rethook_hook() - Hook the current function return.
  159. * @node: The struct rethook node to hook the function return.
  160. * @regs: The struct pt_regs for the function entry.
  161. * @mcount: True if this is called from mcount(ftrace) context.
  162. *
  163. * Hook the current running function return. This must be called when the
  164. * function entry (or at least @regs must be the registers of the function
  165. * entry.) @mcount is used for identifying the context. If this is called
  166. * from ftrace (mcount) callback, @mcount must be set true. If this is called
  167. * from the real function entry (e.g. kprobes) @mcount must be set false.
  168. * This is because the way to hook the function return depends on the context.
  169. */
  170. void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount)
  171. {
  172. arch_rethook_prepare(node, regs, mcount);
  173. __llist_add(&node->llist, &current->rethooks);
  174. }
  175. NOKPROBE_SYMBOL(rethook_hook);
  176. /* This assumes the 'tsk' is the current task or is not running. */
  177. static unsigned long __rethook_find_ret_addr(struct task_struct *tsk,
  178. struct llist_node **cur)
  179. {
  180. struct rethook_node *rh = NULL;
  181. struct llist_node *node = *cur;
  182. if (!node)
  183. node = tsk->rethooks.first;
  184. else
  185. node = node->next;
  186. while (node) {
  187. rh = container_of(node, struct rethook_node, llist);
  188. if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) {
  189. *cur = node;
  190. return rh->ret_addr;
  191. }
  192. node = node->next;
  193. }
  194. return 0;
  195. }
  196. NOKPROBE_SYMBOL(__rethook_find_ret_addr);
  197. /**
  198. * rethook_find_ret_addr -- Find correct return address modified by rethook
  199. * @tsk: Target task
  200. * @frame: A frame pointer
  201. * @cur: a storage of the loop cursor llist_node pointer for next call
  202. *
  203. * Find the correct return address modified by a rethook on @tsk in unsigned
  204. * long type.
  205. * The @tsk must be 'current' or a task which is not running. @frame is a hint
  206. * to get the currect return address - which is compared with the
  207. * rethook::frame field. The @cur is a loop cursor for searching the
  208. * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
  209. * first call, but '@cur' itself must NOT NULL.
  210. *
  211. * Returns found address value or zero if not found.
  212. */
  213. unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
  214. struct llist_node **cur)
  215. {
  216. struct rethook_node *rhn = NULL;
  217. unsigned long ret;
  218. if (WARN_ON_ONCE(!cur))
  219. return 0;
  220. if (tsk != current && task_is_running(tsk))
  221. return 0;
  222. do {
  223. ret = __rethook_find_ret_addr(tsk, cur);
  224. if (!ret)
  225. break;
  226. rhn = container_of(*cur, struct rethook_node, llist);
  227. } while (rhn->frame != frame);
  228. return ret;
  229. }
  230. NOKPROBE_SYMBOL(rethook_find_ret_addr);
  231. void __weak arch_rethook_fixup_return(struct pt_regs *regs,
  232. unsigned long correct_ret_addr)
  233. {
  234. /*
  235. * Do nothing by default. If the architecture which uses a
  236. * frame pointer to record real return address on the stack,
  237. * it should fill this function to fixup the return address
  238. * so that stacktrace works from the rethook handler.
  239. */
  240. }
  241. /* This function will be called from each arch-defined trampoline. */
  242. unsigned long rethook_trampoline_handler(struct pt_regs *regs,
  243. unsigned long frame)
  244. {
  245. struct llist_node *first, *node = NULL;
  246. unsigned long correct_ret_addr;
  247. rethook_handler_t handler;
  248. struct rethook_node *rhn;
  249. correct_ret_addr = __rethook_find_ret_addr(current, &node);
  250. if (!correct_ret_addr) {
  251. pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n");
  252. BUG_ON(1);
  253. }
  254. instruction_pointer_set(regs, correct_ret_addr);
  255. /*
  256. * These loops must be protected from rethook_free_rcu() because those
  257. * are accessing 'rhn->rethook'.
  258. */
  259. preempt_disable_notrace();
  260. /*
  261. * Run the handler on the shadow stack. Do not unlink the list here because
  262. * stackdump inside the handlers needs to decode it.
  263. */
  264. first = current->rethooks.first;
  265. while (first) {
  266. rhn = container_of(first, struct rethook_node, llist);
  267. if (WARN_ON_ONCE(rhn->frame != frame))
  268. break;
  269. handler = rethook_get_handler(rhn->rethook);
  270. if (handler)
  271. handler(rhn, rhn->rethook->data,
  272. correct_ret_addr, regs);
  273. if (first == node)
  274. break;
  275. first = first->next;
  276. }
  277. /* Fixup registers for returning to correct address. */
  278. arch_rethook_fixup_return(regs, correct_ret_addr);
  279. /* Unlink used shadow stack */
  280. first = current->rethooks.first;
  281. current->rethooks.first = node->next;
  282. node->next = NULL;
  283. while (first) {
  284. rhn = container_of(first, struct rethook_node, llist);
  285. first = first->next;
  286. rethook_recycle(rhn);
  287. }
  288. preempt_enable_notrace();
  289. return correct_ret_addr;
  290. }
  291. NOKPROBE_SYMBOL(rethook_trampoline_handler);