test_kprobes.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * test_kprobes.c - simple sanity test for k*probes
  4. *
  5. * Copyright IBM Corp. 2008
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/random.h>
  10. #include <kunit/test.h>
  11. #define div_factor 3
  12. static u32 rand1, preh_val, posth_val;
  13. static u32 (*target)(u32 value);
  14. static u32 (*recursed_target)(u32 value);
  15. static u32 (*target2)(u32 value);
  16. static struct kunit *current_test;
  17. static unsigned long (*internal_target)(void);
  18. static unsigned long (*stacktrace_target)(void);
  19. static unsigned long (*stacktrace_driver)(void);
  20. static unsigned long target_return_address[2];
  21. static noinline u32 kprobe_target(u32 value)
  22. {
  23. return (value / div_factor);
  24. }
  25. static noinline u32 kprobe_recursed_target(u32 value)
  26. {
  27. return (value / div_factor);
  28. }
  29. static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  30. {
  31. KUNIT_EXPECT_FALSE(current_test, preemptible());
  32. preh_val = recursed_target(rand1);
  33. return 0;
  34. }
  35. static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
  36. unsigned long flags)
  37. {
  38. u32 expval = recursed_target(rand1);
  39. KUNIT_EXPECT_FALSE(current_test, preemptible());
  40. KUNIT_EXPECT_EQ(current_test, preh_val, expval);
  41. posth_val = preh_val + div_factor;
  42. }
  43. static struct kprobe kp = {
  44. .symbol_name = "kprobe_target",
  45. .pre_handler = kp_pre_handler,
  46. .post_handler = kp_post_handler
  47. };
  48. static void test_kprobe(struct kunit *test)
  49. {
  50. current_test = test;
  51. KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
  52. target(rand1);
  53. unregister_kprobe(&kp);
  54. KUNIT_EXPECT_NE(test, 0, preh_val);
  55. KUNIT_EXPECT_NE(test, 0, posth_val);
  56. }
  57. static noinline u32 kprobe_target2(u32 value)
  58. {
  59. return (value / div_factor) + 1;
  60. }
  61. static noinline unsigned long kprobe_stacktrace_internal_target(void)
  62. {
  63. if (!target_return_address[0])
  64. target_return_address[0] = (unsigned long)__builtin_return_address(0);
  65. return target_return_address[0];
  66. }
  67. static noinline unsigned long kprobe_stacktrace_target(void)
  68. {
  69. if (!target_return_address[1])
  70. target_return_address[1] = (unsigned long)__builtin_return_address(0);
  71. if (internal_target)
  72. internal_target();
  73. return target_return_address[1];
  74. }
  75. static noinline unsigned long kprobe_stacktrace_driver(void)
  76. {
  77. if (stacktrace_target)
  78. stacktrace_target();
  79. /* This is for preventing inlining the function */
  80. return (unsigned long)__builtin_return_address(0);
  81. }
  82. static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
  83. {
  84. preh_val = (rand1 / div_factor) + 1;
  85. return 0;
  86. }
  87. static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
  88. unsigned long flags)
  89. {
  90. KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
  91. posth_val = preh_val + div_factor;
  92. }
  93. static struct kprobe kp2 = {
  94. .symbol_name = "kprobe_target2",
  95. .pre_handler = kp_pre_handler2,
  96. .post_handler = kp_post_handler2
  97. };
  98. static void test_kprobes(struct kunit *test)
  99. {
  100. struct kprobe *kps[2] = {&kp, &kp2};
  101. current_test = test;
  102. /* addr and flags should be cleard for reusing kprobe. */
  103. kp.addr = NULL;
  104. kp.flags = 0;
  105. KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
  106. preh_val = 0;
  107. posth_val = 0;
  108. target(rand1);
  109. KUNIT_EXPECT_NE(test, 0, preh_val);
  110. KUNIT_EXPECT_NE(test, 0, posth_val);
  111. preh_val = 0;
  112. posth_val = 0;
  113. target2(rand1);
  114. KUNIT_EXPECT_NE(test, 0, preh_val);
  115. KUNIT_EXPECT_NE(test, 0, posth_val);
  116. unregister_kprobes(kps, 2);
  117. }
  118. static struct kprobe kp_missed = {
  119. .symbol_name = "kprobe_recursed_target",
  120. .pre_handler = kp_pre_handler,
  121. .post_handler = kp_post_handler,
  122. };
  123. static void test_kprobe_missed(struct kunit *test)
  124. {
  125. current_test = test;
  126. preh_val = 0;
  127. posth_val = 0;
  128. KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
  129. recursed_target(rand1);
  130. KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
  131. KUNIT_EXPECT_NE(test, 0, preh_val);
  132. KUNIT_EXPECT_NE(test, 0, posth_val);
  133. unregister_kprobe(&kp_missed);
  134. }
  135. #ifdef CONFIG_KRETPROBES
  136. static u32 krph_val;
  137. static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
  138. {
  139. KUNIT_EXPECT_FALSE(current_test, preemptible());
  140. krph_val = (rand1 / div_factor);
  141. return 0;
  142. }
  143. static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
  144. {
  145. unsigned long ret = regs_return_value(regs);
  146. KUNIT_EXPECT_FALSE(current_test, preemptible());
  147. KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
  148. KUNIT_EXPECT_NE(current_test, krph_val, 0);
  149. krph_val = rand1;
  150. return 0;
  151. }
  152. static struct kretprobe rp = {
  153. .handler = return_handler,
  154. .entry_handler = entry_handler,
  155. .kp.symbol_name = "kprobe_target"
  156. };
  157. static void test_kretprobe(struct kunit *test)
  158. {
  159. current_test = test;
  160. KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
  161. target(rand1);
  162. unregister_kretprobe(&rp);
  163. KUNIT_EXPECT_EQ(test, krph_val, rand1);
  164. }
  165. static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
  166. {
  167. unsigned long ret = regs_return_value(regs);
  168. KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
  169. KUNIT_EXPECT_NE(current_test, krph_val, 0);
  170. krph_val = rand1;
  171. return 0;
  172. }
  173. static struct kretprobe rp2 = {
  174. .handler = return_handler2,
  175. .entry_handler = entry_handler,
  176. .kp.symbol_name = "kprobe_target2"
  177. };
  178. static void test_kretprobes(struct kunit *test)
  179. {
  180. struct kretprobe *rps[2] = {&rp, &rp2};
  181. current_test = test;
  182. /* addr and flags should be cleard for reusing kprobe. */
  183. rp.kp.addr = NULL;
  184. rp.kp.flags = 0;
  185. KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
  186. krph_val = 0;
  187. target(rand1);
  188. KUNIT_EXPECT_EQ(test, krph_val, rand1);
  189. krph_val = 0;
  190. target2(rand1);
  191. KUNIT_EXPECT_EQ(test, krph_val, rand1);
  192. unregister_kretprobes(rps, 2);
  193. }
  194. #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
  195. #define STACK_BUF_SIZE 16
  196. static unsigned long stack_buf[STACK_BUF_SIZE];
  197. static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
  198. {
  199. unsigned long retval = regs_return_value(regs);
  200. int i, ret;
  201. KUNIT_EXPECT_FALSE(current_test, preemptible());
  202. KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
  203. /*
  204. * Test stacktrace inside the kretprobe handler, this will involves
  205. * kretprobe trampoline, but must include correct return address
  206. * of the target function.
  207. */
  208. ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
  209. KUNIT_EXPECT_NE(current_test, ret, 0);
  210. for (i = 0; i < ret; i++) {
  211. if (stack_buf[i] == target_return_address[1])
  212. break;
  213. }
  214. KUNIT_EXPECT_NE(current_test, i, ret);
  215. #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
  216. /*
  217. * Test stacktrace from pt_regs at the return address. Thus the stack
  218. * trace must start from the target return address.
  219. */
  220. ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
  221. KUNIT_EXPECT_NE(current_test, ret, 0);
  222. KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
  223. #endif
  224. return 0;
  225. }
  226. static struct kretprobe rp3 = {
  227. .handler = stacktrace_return_handler,
  228. .kp.symbol_name = "kprobe_stacktrace_target"
  229. };
  230. static void test_stacktrace_on_kretprobe(struct kunit *test)
  231. {
  232. unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
  233. current_test = test;
  234. rp3.kp.addr = NULL;
  235. rp3.kp.flags = 0;
  236. /*
  237. * Run the stacktrace_driver() to record correct return address in
  238. * stacktrace_target() and ensure stacktrace_driver() call is not
  239. * inlined by checking the return address of stacktrace_driver()
  240. * and the return address of this function is different.
  241. */
  242. KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
  243. KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
  244. KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
  245. unregister_kretprobe(&rp3);
  246. }
  247. static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
  248. {
  249. unsigned long retval = regs_return_value(regs);
  250. int i, ret;
  251. KUNIT_EXPECT_FALSE(current_test, preemptible());
  252. KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
  253. /*
  254. * Test stacktrace inside the kretprobe handler for nested case.
  255. * The unwinder will find the kretprobe_trampoline address on the
  256. * return address, and kretprobe must solve that.
  257. */
  258. ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
  259. KUNIT_EXPECT_NE(current_test, ret, 0);
  260. for (i = 0; i < ret - 1; i++) {
  261. if (stack_buf[i] == target_return_address[0]) {
  262. KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
  263. break;
  264. }
  265. }
  266. KUNIT_EXPECT_NE(current_test, i, ret);
  267. #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
  268. /* Ditto for the regs version. */
  269. ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
  270. KUNIT_EXPECT_NE(current_test, ret, 0);
  271. KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
  272. KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
  273. #endif
  274. return 0;
  275. }
  276. static struct kretprobe rp4 = {
  277. .handler = stacktrace_internal_return_handler,
  278. .kp.symbol_name = "kprobe_stacktrace_internal_target"
  279. };
  280. static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
  281. {
  282. unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
  283. struct kretprobe *rps[2] = {&rp3, &rp4};
  284. current_test = test;
  285. rp3.kp.addr = NULL;
  286. rp3.kp.flags = 0;
  287. //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
  288. KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
  289. KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
  290. unregister_kretprobes(rps, 2);
  291. }
  292. #endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
  293. #endif /* CONFIG_KRETPROBES */
  294. static int kprobes_test_init(struct kunit *test)
  295. {
  296. target = kprobe_target;
  297. target2 = kprobe_target2;
  298. recursed_target = kprobe_recursed_target;
  299. stacktrace_target = kprobe_stacktrace_target;
  300. internal_target = kprobe_stacktrace_internal_target;
  301. stacktrace_driver = kprobe_stacktrace_driver;
  302. rand1 = get_random_u32_above(div_factor);
  303. return 0;
  304. }
  305. static struct kunit_case kprobes_testcases[] = {
  306. KUNIT_CASE(test_kprobe),
  307. KUNIT_CASE(test_kprobes),
  308. KUNIT_CASE(test_kprobe_missed),
  309. #ifdef CONFIG_KRETPROBES
  310. KUNIT_CASE(test_kretprobe),
  311. KUNIT_CASE(test_kretprobes),
  312. #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
  313. KUNIT_CASE(test_stacktrace_on_kretprobe),
  314. KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
  315. #endif
  316. #endif
  317. {}
  318. };
  319. static struct kunit_suite kprobes_test_suite = {
  320. .name = "kprobes_test",
  321. .init = kprobes_test_init,
  322. .test_cases = kprobes_testcases,
  323. };
  324. kunit_test_suites(&kprobes_test_suite);
  325. MODULE_DESCRIPTION("simple sanity test for k*probes");
  326. MODULE_LICENSE("GPL");