trace_selftest.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Include in trace.c */
  3. #include <uapi/linux/sched/types.h>
  4. #include <linux/stringify.h>
  5. #include <linux/kthread.h>
  6. #include <linux/delay.h>
  7. #include <linux/slab.h>
  8. static inline int trace_valid_entry(struct trace_entry *entry)
  9. {
  10. switch (entry->type) {
  11. case TRACE_FN:
  12. case TRACE_CTX:
  13. case TRACE_WAKE:
  14. case TRACE_STACK:
  15. case TRACE_PRINT:
  16. case TRACE_BRANCH:
  17. case TRACE_GRAPH_ENT:
  18. case TRACE_GRAPH_RET:
  19. return 1;
  20. }
  21. return 0;
  22. }
  23. static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  24. {
  25. struct ring_buffer_event *event;
  26. struct trace_entry *entry;
  27. unsigned int loops = 0;
  28. while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  29. entry = ring_buffer_event_data(event);
  30. /*
  31. * The ring buffer is a size of trace_buf_size, if
  32. * we loop more than the size, there's something wrong
  33. * with the ring buffer.
  34. */
  35. if (loops++ > trace_buf_size) {
  36. printk(KERN_CONT ".. bad ring buffer ");
  37. goto failed;
  38. }
  39. if (!trace_valid_entry(entry)) {
  40. printk(KERN_CONT ".. invalid entry %d ",
  41. entry->type);
  42. goto failed;
  43. }
  44. }
  45. return 0;
  46. failed:
  47. /* disable tracing */
  48. tracing_disabled = 1;
  49. printk(KERN_CONT ".. corrupted trace buffer .. ");
  50. return -1;
  51. }
  52. /*
  53. * Test the trace buffer to see if all the elements
  54. * are still sane.
  55. */
  56. static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  57. {
  58. unsigned long flags, cnt = 0;
  59. int cpu, ret = 0;
  60. /* Don't allow flipping of max traces now */
  61. local_irq_save(flags);
  62. arch_spin_lock(&buf->tr->max_lock);
  63. cnt = ring_buffer_entries(buf->buffer);
  64. /*
  65. * The trace_test_buffer_cpu runs a while loop to consume all data.
  66. * If the calling tracer is broken, and is constantly filling
  67. * the buffer, this will run forever, and hard lock the box.
  68. * We disable the ring buffer while we do this test to prevent
  69. * a hard lock up.
  70. */
  71. tracing_off();
  72. for_each_possible_cpu(cpu) {
  73. ret = trace_test_buffer_cpu(buf, cpu);
  74. if (ret)
  75. break;
  76. }
  77. tracing_on();
  78. arch_spin_unlock(&buf->tr->max_lock);
  79. local_irq_restore(flags);
  80. if (count)
  81. *count = cnt;
  82. return ret;
  83. }
  84. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  85. {
  86. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  87. trace->name, init_ret);
  88. }
  89. #ifdef CONFIG_FUNCTION_TRACER
  90. #ifdef CONFIG_DYNAMIC_FTRACE
  91. static int trace_selftest_test_probe1_cnt;
  92. static void trace_selftest_test_probe1_func(unsigned long ip,
  93. unsigned long pip,
  94. struct ftrace_ops *op,
  95. struct ftrace_regs *fregs)
  96. {
  97. trace_selftest_test_probe1_cnt++;
  98. }
  99. static int trace_selftest_test_probe2_cnt;
  100. static void trace_selftest_test_probe2_func(unsigned long ip,
  101. unsigned long pip,
  102. struct ftrace_ops *op,
  103. struct ftrace_regs *fregs)
  104. {
  105. trace_selftest_test_probe2_cnt++;
  106. }
  107. static int trace_selftest_test_probe3_cnt;
  108. static void trace_selftest_test_probe3_func(unsigned long ip,
  109. unsigned long pip,
  110. struct ftrace_ops *op,
  111. struct ftrace_regs *fregs)
  112. {
  113. trace_selftest_test_probe3_cnt++;
  114. }
  115. static int trace_selftest_test_global_cnt;
  116. static void trace_selftest_test_global_func(unsigned long ip,
  117. unsigned long pip,
  118. struct ftrace_ops *op,
  119. struct ftrace_regs *fregs)
  120. {
  121. trace_selftest_test_global_cnt++;
  122. }
  123. static int trace_selftest_test_dyn_cnt;
  124. static void trace_selftest_test_dyn_func(unsigned long ip,
  125. unsigned long pip,
  126. struct ftrace_ops *op,
  127. struct ftrace_regs *fregs)
  128. {
  129. trace_selftest_test_dyn_cnt++;
  130. }
  131. static struct ftrace_ops test_probe1 = {
  132. .func = trace_selftest_test_probe1_func,
  133. };
  134. static struct ftrace_ops test_probe2 = {
  135. .func = trace_selftest_test_probe2_func,
  136. };
  137. static struct ftrace_ops test_probe3 = {
  138. .func = trace_selftest_test_probe3_func,
  139. };
  140. static void print_counts(void)
  141. {
  142. printk("(%d %d %d %d %d) ",
  143. trace_selftest_test_probe1_cnt,
  144. trace_selftest_test_probe2_cnt,
  145. trace_selftest_test_probe3_cnt,
  146. trace_selftest_test_global_cnt,
  147. trace_selftest_test_dyn_cnt);
  148. }
  149. static void reset_counts(void)
  150. {
  151. trace_selftest_test_probe1_cnt = 0;
  152. trace_selftest_test_probe2_cnt = 0;
  153. trace_selftest_test_probe3_cnt = 0;
  154. trace_selftest_test_global_cnt = 0;
  155. trace_selftest_test_dyn_cnt = 0;
  156. }
  157. static int trace_selftest_ops(struct trace_array *tr, int cnt)
  158. {
  159. int save_ftrace_enabled = ftrace_enabled;
  160. struct ftrace_ops *dyn_ops;
  161. char *func1_name;
  162. char *func2_name;
  163. int len1;
  164. int len2;
  165. int ret = -1;
  166. printk(KERN_CONT "PASSED\n");
  167. pr_info("Testing dynamic ftrace ops #%d: ", cnt);
  168. ftrace_enabled = 1;
  169. reset_counts();
  170. /* Handle PPC64 '.' name */
  171. func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  172. func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
  173. len1 = strlen(func1_name);
  174. len2 = strlen(func2_name);
  175. /*
  176. * Probe 1 will trace function 1.
  177. * Probe 2 will trace function 2.
  178. * Probe 3 will trace functions 1 and 2.
  179. */
  180. ftrace_set_filter(&test_probe1, func1_name, len1, 1);
  181. ftrace_set_filter(&test_probe2, func2_name, len2, 1);
  182. ftrace_set_filter(&test_probe3, func1_name, len1, 1);
  183. ftrace_set_filter(&test_probe3, func2_name, len2, 0);
  184. register_ftrace_function(&test_probe1);
  185. register_ftrace_function(&test_probe2);
  186. register_ftrace_function(&test_probe3);
  187. /* First time we are running with main function */
  188. if (cnt > 1) {
  189. ftrace_init_array_ops(tr, trace_selftest_test_global_func);
  190. register_ftrace_function(tr->ops);
  191. }
  192. DYN_FTRACE_TEST_NAME();
  193. print_counts();
  194. if (trace_selftest_test_probe1_cnt != 1)
  195. goto out;
  196. if (trace_selftest_test_probe2_cnt != 0)
  197. goto out;
  198. if (trace_selftest_test_probe3_cnt != 1)
  199. goto out;
  200. if (cnt > 1) {
  201. if (trace_selftest_test_global_cnt == 0)
  202. goto out;
  203. }
  204. DYN_FTRACE_TEST_NAME2();
  205. print_counts();
  206. if (trace_selftest_test_probe1_cnt != 1)
  207. goto out;
  208. if (trace_selftest_test_probe2_cnt != 1)
  209. goto out;
  210. if (trace_selftest_test_probe3_cnt != 2)
  211. goto out;
  212. /* Add a dynamic probe */
  213. dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
  214. if (!dyn_ops) {
  215. printk("MEMORY ERROR ");
  216. goto out;
  217. }
  218. dyn_ops->func = trace_selftest_test_dyn_func;
  219. register_ftrace_function(dyn_ops);
  220. trace_selftest_test_global_cnt = 0;
  221. DYN_FTRACE_TEST_NAME();
  222. print_counts();
  223. if (trace_selftest_test_probe1_cnt != 2)
  224. goto out_free;
  225. if (trace_selftest_test_probe2_cnt != 1)
  226. goto out_free;
  227. if (trace_selftest_test_probe3_cnt != 3)
  228. goto out_free;
  229. if (cnt > 1) {
  230. if (trace_selftest_test_global_cnt == 0)
  231. goto out_free;
  232. }
  233. if (trace_selftest_test_dyn_cnt == 0)
  234. goto out_free;
  235. DYN_FTRACE_TEST_NAME2();
  236. print_counts();
  237. if (trace_selftest_test_probe1_cnt != 2)
  238. goto out_free;
  239. if (trace_selftest_test_probe2_cnt != 2)
  240. goto out_free;
  241. if (trace_selftest_test_probe3_cnt != 4)
  242. goto out_free;
  243. /* Remove trace function from probe 3 */
  244. func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
  245. len1 = strlen(func1_name);
  246. ftrace_set_filter(&test_probe3, func1_name, len1, 0);
  247. DYN_FTRACE_TEST_NAME();
  248. print_counts();
  249. if (trace_selftest_test_probe1_cnt != 3)
  250. goto out_free;
  251. if (trace_selftest_test_probe2_cnt != 2)
  252. goto out_free;
  253. if (trace_selftest_test_probe3_cnt != 4)
  254. goto out_free;
  255. if (cnt > 1) {
  256. if (trace_selftest_test_global_cnt == 0)
  257. goto out_free;
  258. }
  259. if (trace_selftest_test_dyn_cnt == 0)
  260. goto out_free;
  261. DYN_FTRACE_TEST_NAME2();
  262. print_counts();
  263. if (trace_selftest_test_probe1_cnt != 3)
  264. goto out_free;
  265. if (trace_selftest_test_probe2_cnt != 3)
  266. goto out_free;
  267. if (trace_selftest_test_probe3_cnt != 5)
  268. goto out_free;
  269. ret = 0;
  270. out_free:
  271. unregister_ftrace_function(dyn_ops);
  272. kfree(dyn_ops);
  273. out:
  274. /* Purposely unregister in the same order */
  275. unregister_ftrace_function(&test_probe1);
  276. unregister_ftrace_function(&test_probe2);
  277. unregister_ftrace_function(&test_probe3);
  278. if (cnt > 1)
  279. unregister_ftrace_function(tr->ops);
  280. ftrace_reset_array_ops(tr);
  281. /* Make sure everything is off */
  282. reset_counts();
  283. DYN_FTRACE_TEST_NAME();
  284. DYN_FTRACE_TEST_NAME();
  285. if (trace_selftest_test_probe1_cnt ||
  286. trace_selftest_test_probe2_cnt ||
  287. trace_selftest_test_probe3_cnt ||
  288. trace_selftest_test_global_cnt ||
  289. trace_selftest_test_dyn_cnt)
  290. ret = -1;
  291. ftrace_enabled = save_ftrace_enabled;
  292. return ret;
  293. }
  294. /* Test dynamic code modification and ftrace filters */
  295. static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  296. struct trace_array *tr,
  297. int (*func)(void))
  298. {
  299. int save_ftrace_enabled = ftrace_enabled;
  300. unsigned long count;
  301. char *func_name;
  302. int ret;
  303. /* The ftrace test PASSED */
  304. printk(KERN_CONT "PASSED\n");
  305. pr_info("Testing dynamic ftrace: ");
  306. /* enable tracing, and record the filter function */
  307. ftrace_enabled = 1;
  308. /* passed in by parameter to fool gcc from optimizing */
  309. func();
  310. /*
  311. * Some archs *cough*PowerPC*cough* add characters to the
  312. * start of the function names. We simply put a '*' to
  313. * accommodate them.
  314. */
  315. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  316. /* filter only on our function */
  317. ftrace_set_global_filter(func_name, strlen(func_name), 1);
  318. /* enable tracing */
  319. ret = tracer_init(trace, tr);
  320. if (ret) {
  321. warn_failed_init_tracer(trace, ret);
  322. goto out;
  323. }
  324. /* Sleep for a 1/10 of a second */
  325. msleep(100);
  326. /* we should have nothing in the buffer */
  327. ret = trace_test_buffer(&tr->array_buffer, &count);
  328. if (ret)
  329. goto out;
  330. if (count) {
  331. ret = -1;
  332. printk(KERN_CONT ".. filter did not filter .. ");
  333. goto out;
  334. }
  335. /* call our function again */
  336. func();
  337. /* sleep again */
  338. msleep(100);
  339. /* stop the tracing. */
  340. tracing_stop();
  341. ftrace_enabled = 0;
  342. /* check the trace buffer */
  343. ret = trace_test_buffer(&tr->array_buffer, &count);
  344. ftrace_enabled = 1;
  345. tracing_start();
  346. /* we should only have one item */
  347. if (!ret && count != 1) {
  348. trace->reset(tr);
  349. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  350. ret = -1;
  351. goto out;
  352. }
  353. /* Test the ops with global tracing running */
  354. ret = trace_selftest_ops(tr, 1);
  355. trace->reset(tr);
  356. out:
  357. ftrace_enabled = save_ftrace_enabled;
  358. /* Enable tracing on all functions again */
  359. ftrace_set_global_filter(NULL, 0, 1);
  360. /* Test the ops with global tracing off */
  361. if (!ret)
  362. ret = trace_selftest_ops(tr, 2);
  363. return ret;
  364. }
  365. static int trace_selftest_recursion_cnt;
  366. static void trace_selftest_test_recursion_func(unsigned long ip,
  367. unsigned long pip,
  368. struct ftrace_ops *op,
  369. struct ftrace_regs *fregs)
  370. {
  371. /*
  372. * This function is registered without the recursion safe flag.
  373. * The ftrace infrastructure should provide the recursion
  374. * protection. If not, this will crash the kernel!
  375. */
  376. if (trace_selftest_recursion_cnt++ > 10)
  377. return;
  378. DYN_FTRACE_TEST_NAME();
  379. }
  380. static void trace_selftest_test_recursion_safe_func(unsigned long ip,
  381. unsigned long pip,
  382. struct ftrace_ops *op,
  383. struct ftrace_regs *fregs)
  384. {
  385. /*
  386. * We said we would provide our own recursion. By calling
  387. * this function again, we should recurse back into this function
  388. * and count again. But this only happens if the arch supports
  389. * all of ftrace features and nothing else is using the function
  390. * tracing utility.
  391. */
  392. if (trace_selftest_recursion_cnt++)
  393. return;
  394. DYN_FTRACE_TEST_NAME();
  395. }
  396. static struct ftrace_ops test_rec_probe = {
  397. .func = trace_selftest_test_recursion_func,
  398. .flags = FTRACE_OPS_FL_RECURSION,
  399. };
  400. static struct ftrace_ops test_recsafe_probe = {
  401. .func = trace_selftest_test_recursion_safe_func,
  402. };
  403. static int
  404. trace_selftest_function_recursion(void)
  405. {
  406. int save_ftrace_enabled = ftrace_enabled;
  407. char *func_name;
  408. int len;
  409. int ret;
  410. /* The previous test PASSED */
  411. pr_cont("PASSED\n");
  412. pr_info("Testing ftrace recursion: ");
  413. /* enable tracing, and record the filter function */
  414. ftrace_enabled = 1;
  415. /* Handle PPC64 '.' name */
  416. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  417. len = strlen(func_name);
  418. ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
  419. if (ret) {
  420. pr_cont("*Could not set filter* ");
  421. goto out;
  422. }
  423. ret = register_ftrace_function(&test_rec_probe);
  424. if (ret) {
  425. pr_cont("*could not register callback* ");
  426. goto out;
  427. }
  428. DYN_FTRACE_TEST_NAME();
  429. unregister_ftrace_function(&test_rec_probe);
  430. ret = -1;
  431. /*
  432. * Recursion allows for transitions between context,
  433. * and may call the callback twice.
  434. */
  435. if (trace_selftest_recursion_cnt != 1 &&
  436. trace_selftest_recursion_cnt != 2) {
  437. pr_cont("*callback not called once (or twice) (%d)* ",
  438. trace_selftest_recursion_cnt);
  439. goto out;
  440. }
  441. trace_selftest_recursion_cnt = 1;
  442. pr_cont("PASSED\n");
  443. pr_info("Testing ftrace recursion safe: ");
  444. ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
  445. if (ret) {
  446. pr_cont("*Could not set filter* ");
  447. goto out;
  448. }
  449. ret = register_ftrace_function(&test_recsafe_probe);
  450. if (ret) {
  451. pr_cont("*could not register callback* ");
  452. goto out;
  453. }
  454. DYN_FTRACE_TEST_NAME();
  455. unregister_ftrace_function(&test_recsafe_probe);
  456. ret = -1;
  457. if (trace_selftest_recursion_cnt != 2) {
  458. pr_cont("*callback not called expected 2 times (%d)* ",
  459. trace_selftest_recursion_cnt);
  460. goto out;
  461. }
  462. ret = 0;
  463. out:
  464. ftrace_enabled = save_ftrace_enabled;
  465. return ret;
  466. }
  467. #else
  468. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  469. # define trace_selftest_function_recursion() ({ 0; })
  470. #endif /* CONFIG_DYNAMIC_FTRACE */
  471. static enum {
  472. TRACE_SELFTEST_REGS_START,
  473. TRACE_SELFTEST_REGS_FOUND,
  474. TRACE_SELFTEST_REGS_NOT_FOUND,
  475. } trace_selftest_regs_stat;
  476. static void trace_selftest_test_regs_func(unsigned long ip,
  477. unsigned long pip,
  478. struct ftrace_ops *op,
  479. struct ftrace_regs *fregs)
  480. {
  481. struct pt_regs *regs = ftrace_get_regs(fregs);
  482. if (regs)
  483. trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
  484. else
  485. trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
  486. }
  487. static struct ftrace_ops test_regs_probe = {
  488. .func = trace_selftest_test_regs_func,
  489. .flags = FTRACE_OPS_FL_SAVE_REGS,
  490. };
  491. static int
  492. trace_selftest_function_regs(void)
  493. {
  494. int save_ftrace_enabled = ftrace_enabled;
  495. char *func_name;
  496. int len;
  497. int ret;
  498. int supported = 0;
  499. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  500. supported = 1;
  501. #endif
  502. /* The previous test PASSED */
  503. pr_cont("PASSED\n");
  504. pr_info("Testing ftrace regs%s: ",
  505. !supported ? "(no arch support)" : "");
  506. /* enable tracing, and record the filter function */
  507. ftrace_enabled = 1;
  508. /* Handle PPC64 '.' name */
  509. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  510. len = strlen(func_name);
  511. ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
  512. /*
  513. * If DYNAMIC_FTRACE is not set, then we just trace all functions.
  514. * This test really doesn't care.
  515. */
  516. if (ret && ret != -ENODEV) {
  517. pr_cont("*Could not set filter* ");
  518. goto out;
  519. }
  520. ret = register_ftrace_function(&test_regs_probe);
  521. /*
  522. * Now if the arch does not support passing regs, then this should
  523. * have failed.
  524. */
  525. if (!supported) {
  526. if (!ret) {
  527. pr_cont("*registered save-regs without arch support* ");
  528. goto out;
  529. }
  530. test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
  531. ret = register_ftrace_function(&test_regs_probe);
  532. }
  533. if (ret) {
  534. pr_cont("*could not register callback* ");
  535. goto out;
  536. }
  537. DYN_FTRACE_TEST_NAME();
  538. unregister_ftrace_function(&test_regs_probe);
  539. ret = -1;
  540. switch (trace_selftest_regs_stat) {
  541. case TRACE_SELFTEST_REGS_START:
  542. pr_cont("*callback never called* ");
  543. goto out;
  544. case TRACE_SELFTEST_REGS_FOUND:
  545. if (supported)
  546. break;
  547. pr_cont("*callback received regs without arch support* ");
  548. goto out;
  549. case TRACE_SELFTEST_REGS_NOT_FOUND:
  550. if (!supported)
  551. break;
  552. pr_cont("*callback received NULL regs* ");
  553. goto out;
  554. }
  555. ret = 0;
  556. out:
  557. ftrace_enabled = save_ftrace_enabled;
  558. return ret;
  559. }
  560. /*
  561. * Simple verification test of ftrace function tracer.
  562. * Enable ftrace, sleep 1/10 second, and then read the trace
  563. * buffer to see if all is in order.
  564. */
  565. __init int
  566. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  567. {
  568. int save_ftrace_enabled = ftrace_enabled;
  569. unsigned long count;
  570. int ret;
  571. #ifdef CONFIG_DYNAMIC_FTRACE
  572. if (ftrace_filter_param) {
  573. printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
  574. return 0;
  575. }
  576. #endif
  577. /* make sure msleep has been recorded */
  578. msleep(1);
  579. /* start the tracing */
  580. ftrace_enabled = 1;
  581. ret = tracer_init(trace, tr);
  582. if (ret) {
  583. warn_failed_init_tracer(trace, ret);
  584. goto out;
  585. }
  586. /* Sleep for a 1/10 of a second */
  587. msleep(100);
  588. /* stop the tracing. */
  589. tracing_stop();
  590. ftrace_enabled = 0;
  591. /* check the trace buffer */
  592. ret = trace_test_buffer(&tr->array_buffer, &count);
  593. ftrace_enabled = 1;
  594. trace->reset(tr);
  595. tracing_start();
  596. if (!ret && !count) {
  597. printk(KERN_CONT ".. no entries found ..");
  598. ret = -1;
  599. goto out;
  600. }
  601. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  602. DYN_FTRACE_TEST_NAME);
  603. if (ret)
  604. goto out;
  605. ret = trace_selftest_function_recursion();
  606. if (ret)
  607. goto out;
  608. ret = trace_selftest_function_regs();
  609. out:
  610. ftrace_enabled = save_ftrace_enabled;
  611. /* kill ftrace totally if we failed */
  612. if (ret)
  613. ftrace_kill();
  614. return ret;
  615. }
  616. #endif /* CONFIG_FUNCTION_TRACER */
  617. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  618. #ifdef CONFIG_DYNAMIC_FTRACE
  619. #define CHAR_NUMBER 123
  620. #define SHORT_NUMBER 12345
  621. #define WORD_NUMBER 1234567890
  622. #define LONG_NUMBER 1234567890123456789LL
  623. #define ERRSTR_BUFLEN 128
  624. struct fgraph_fixture {
  625. struct fgraph_ops gops;
  626. int store_size;
  627. const char *store_type_name;
  628. char error_str_buf[ERRSTR_BUFLEN];
  629. char *error_str;
  630. };
  631. static __init int store_entry(struct ftrace_graph_ent *trace,
  632. struct fgraph_ops *gops)
  633. {
  634. struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
  635. const char *type = fixture->store_type_name;
  636. int size = fixture->store_size;
  637. void *p;
  638. p = fgraph_reserve_data(gops->idx, size);
  639. if (!p) {
  640. snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
  641. "Failed to reserve %s\n", type);
  642. return 0;
  643. }
  644. switch (size) {
  645. case 1:
  646. *(char *)p = CHAR_NUMBER;
  647. break;
  648. case 2:
  649. *(short *)p = SHORT_NUMBER;
  650. break;
  651. case 4:
  652. *(int *)p = WORD_NUMBER;
  653. break;
  654. case 8:
  655. *(long long *)p = LONG_NUMBER;
  656. break;
  657. }
  658. return 1;
  659. }
  660. static __init void store_return(struct ftrace_graph_ret *trace,
  661. struct fgraph_ops *gops)
  662. {
  663. struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
  664. const char *type = fixture->store_type_name;
  665. long long expect = 0;
  666. long long found = -1;
  667. int size;
  668. char *p;
  669. p = fgraph_retrieve_data(gops->idx, &size);
  670. if (!p) {
  671. snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
  672. "Failed to retrieve %s\n", type);
  673. return;
  674. }
  675. if (fixture->store_size > size) {
  676. snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
  677. "Retrieved size %d is smaller than expected %d\n",
  678. size, (int)fixture->store_size);
  679. return;
  680. }
  681. switch (fixture->store_size) {
  682. case 1:
  683. expect = CHAR_NUMBER;
  684. found = *(char *)p;
  685. break;
  686. case 2:
  687. expect = SHORT_NUMBER;
  688. found = *(short *)p;
  689. break;
  690. case 4:
  691. expect = WORD_NUMBER;
  692. found = *(int *)p;
  693. break;
  694. case 8:
  695. expect = LONG_NUMBER;
  696. found = *(long long *)p;
  697. break;
  698. }
  699. if (found != expect) {
  700. snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
  701. "%s returned not %lld but %lld\n", type, expect, found);
  702. return;
  703. }
  704. fixture->error_str = NULL;
  705. }
  706. static int __init init_fgraph_fixture(struct fgraph_fixture *fixture)
  707. {
  708. char *func_name;
  709. int len;
  710. snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
  711. "Failed to execute storage %s\n", fixture->store_type_name);
  712. fixture->error_str = fixture->error_str_buf;
  713. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  714. len = strlen(func_name);
  715. return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1);
  716. }
  717. /* Test fgraph storage for each size */
  718. static int __init test_graph_storage_single(struct fgraph_fixture *fixture)
  719. {
  720. int size = fixture->store_size;
  721. int ret;
  722. pr_cont("PASSED\n");
  723. pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size));
  724. ret = init_fgraph_fixture(fixture);
  725. if (ret && ret != -ENODEV) {
  726. pr_cont("*Could not set filter* ");
  727. return -1;
  728. }
  729. ret = register_ftrace_graph(&fixture->gops);
  730. if (ret) {
  731. pr_warn("Failed to init store_bytes fgraph tracing\n");
  732. return -1;
  733. }
  734. DYN_FTRACE_TEST_NAME();
  735. unregister_ftrace_graph(&fixture->gops);
  736. if (fixture->error_str) {
  737. pr_cont("*** %s ***", fixture->error_str);
  738. return -1;
  739. }
  740. return 0;
  741. }
  742. static struct fgraph_fixture store_bytes[4] __initdata = {
  743. [0] = {
  744. .gops = {
  745. .entryfunc = store_entry,
  746. .retfunc = store_return,
  747. },
  748. .store_size = 1,
  749. .store_type_name = "byte",
  750. },
  751. [1] = {
  752. .gops = {
  753. .entryfunc = store_entry,
  754. .retfunc = store_return,
  755. },
  756. .store_size = 2,
  757. .store_type_name = "short",
  758. },
  759. [2] = {
  760. .gops = {
  761. .entryfunc = store_entry,
  762. .retfunc = store_return,
  763. },
  764. .store_size = 4,
  765. .store_type_name = "word",
  766. },
  767. [3] = {
  768. .gops = {
  769. .entryfunc = store_entry,
  770. .retfunc = store_return,
  771. },
  772. .store_size = 8,
  773. .store_type_name = "long long",
  774. },
  775. };
  776. static __init int test_graph_storage_multi(void)
  777. {
  778. struct fgraph_fixture *fixture;
  779. bool printed = false;
  780. int i, j, ret;
  781. pr_cont("PASSED\n");
  782. pr_info("Testing multiple fgraph storage on a function: ");
  783. for (i = 0; i < ARRAY_SIZE(store_bytes); i++) {
  784. fixture = &store_bytes[i];
  785. ret = init_fgraph_fixture(fixture);
  786. if (ret && ret != -ENODEV) {
  787. pr_cont("*Could not set filter* ");
  788. printed = true;
  789. goto out2;
  790. }
  791. }
  792. for (j = 0; j < ARRAY_SIZE(store_bytes); j++) {
  793. fixture = &store_bytes[j];
  794. ret = register_ftrace_graph(&fixture->gops);
  795. if (ret) {
  796. pr_warn("Failed to init store_bytes fgraph tracing\n");
  797. printed = true;
  798. goto out1;
  799. }
  800. }
  801. DYN_FTRACE_TEST_NAME();
  802. out1:
  803. while (--j >= 0) {
  804. fixture = &store_bytes[j];
  805. unregister_ftrace_graph(&fixture->gops);
  806. if (fixture->error_str && !printed) {
  807. pr_cont("*** %s ***", fixture->error_str);
  808. printed = true;
  809. }
  810. }
  811. out2:
  812. while (--i >= 0) {
  813. fixture = &store_bytes[i];
  814. ftrace_free_filter(&fixture->gops.ops);
  815. if (fixture->error_str && !printed) {
  816. pr_cont("*** %s ***", fixture->error_str);
  817. printed = true;
  818. }
  819. }
  820. return printed ? -1 : 0;
  821. }
  822. /* Test the storage passed across function_graph entry and return */
  823. static __init int test_graph_storage(void)
  824. {
  825. int ret;
  826. ret = test_graph_storage_single(&store_bytes[0]);
  827. if (ret)
  828. return ret;
  829. ret = test_graph_storage_single(&store_bytes[1]);
  830. if (ret)
  831. return ret;
  832. ret = test_graph_storage_single(&store_bytes[2]);
  833. if (ret)
  834. return ret;
  835. ret = test_graph_storage_single(&store_bytes[3]);
  836. if (ret)
  837. return ret;
  838. ret = test_graph_storage_multi();
  839. if (ret)
  840. return ret;
  841. return 0;
  842. }
  843. #else
  844. static inline int test_graph_storage(void) { return 0; }
  845. #endif /* CONFIG_DYNAMIC_FTRACE */
  846. /* Maximum number of functions to trace before diagnosing a hang */
  847. #define GRAPH_MAX_FUNC_TEST 100000000
  848. static unsigned int graph_hang_thresh;
  849. /* Wrap the real function entry probe to avoid possible hanging */
  850. static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
  851. struct fgraph_ops *gops)
  852. {
  853. /* This is harmlessly racy, we want to approximately detect a hang */
  854. if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
  855. ftrace_graph_stop();
  856. printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
  857. if (ftrace_dump_on_oops_enabled()) {
  858. ftrace_dump(DUMP_ALL);
  859. /* ftrace_dump() disables tracing */
  860. tracing_on();
  861. }
  862. return 0;
  863. }
  864. return trace_graph_entry(trace, gops);
  865. }
  866. static struct fgraph_ops fgraph_ops __initdata = {
  867. .entryfunc = &trace_graph_entry_watchdog,
  868. .retfunc = &trace_graph_return,
  869. };
  870. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  871. static struct ftrace_ops direct;
  872. #endif
  873. /*
  874. * Pretty much the same than for the function tracer from which the selftest
  875. * has been borrowed.
  876. */
  877. __init int
  878. trace_selftest_startup_function_graph(struct tracer *trace,
  879. struct trace_array *tr)
  880. {
  881. int ret;
  882. unsigned long count;
  883. char *func_name __maybe_unused;
  884. #ifdef CONFIG_DYNAMIC_FTRACE
  885. if (ftrace_filter_param) {
  886. printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
  887. return 0;
  888. }
  889. #endif
  890. /*
  891. * Simulate the init() callback but we attach a watchdog callback
  892. * to detect and recover from possible hangs
  893. */
  894. tracing_reset_online_cpus(&tr->array_buffer);
  895. fgraph_ops.private = tr;
  896. ret = register_ftrace_graph(&fgraph_ops);
  897. if (ret) {
  898. warn_failed_init_tracer(trace, ret);
  899. goto out;
  900. }
  901. tracing_start_cmdline_record();
  902. /* Sleep for a 1/10 of a second */
  903. msleep(100);
  904. /* Have we just recovered from a hang? */
  905. if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
  906. disable_tracing_selftest("recovering from a hang");
  907. ret = -1;
  908. goto out;
  909. }
  910. tracing_stop();
  911. /* check the trace buffer */
  912. ret = trace_test_buffer(&tr->array_buffer, &count);
  913. /* Need to also simulate the tr->reset to remove this fgraph_ops */
  914. tracing_stop_cmdline_record();
  915. unregister_ftrace_graph(&fgraph_ops);
  916. tracing_start();
  917. if (!ret && !count) {
  918. printk(KERN_CONT ".. no entries found ..");
  919. ret = -1;
  920. goto out;
  921. }
  922. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  923. /*
  924. * These tests can take some time to run. Make sure on non PREEMPT
  925. * kernels, we do not trigger the softlockup detector.
  926. */
  927. cond_resched();
  928. tracing_reset_online_cpus(&tr->array_buffer);
  929. fgraph_ops.private = tr;
  930. /*
  931. * Some archs *cough*PowerPC*cough* add characters to the
  932. * start of the function names. We simply put a '*' to
  933. * accommodate them.
  934. */
  935. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  936. ftrace_set_global_filter(func_name, strlen(func_name), 1);
  937. /*
  938. * Register direct function together with graph tracer
  939. * and make sure we get graph trace.
  940. */
  941. ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
  942. ret = register_ftrace_direct(&direct,
  943. (unsigned long)ftrace_stub_direct_tramp);
  944. if (ret)
  945. goto out;
  946. cond_resched();
  947. ret = register_ftrace_graph(&fgraph_ops);
  948. if (ret) {
  949. warn_failed_init_tracer(trace, ret);
  950. goto out;
  951. }
  952. DYN_FTRACE_TEST_NAME();
  953. count = 0;
  954. tracing_stop();
  955. /* check the trace buffer */
  956. ret = trace_test_buffer(&tr->array_buffer, &count);
  957. unregister_ftrace_graph(&fgraph_ops);
  958. ret = unregister_ftrace_direct(&direct,
  959. (unsigned long)ftrace_stub_direct_tramp,
  960. true);
  961. if (ret)
  962. goto out;
  963. cond_resched();
  964. tracing_start();
  965. if (!ret && !count) {
  966. ret = -1;
  967. goto out;
  968. }
  969. /* Enable tracing on all functions again */
  970. ftrace_set_global_filter(NULL, 0, 1);
  971. #endif
  972. ret = test_graph_storage();
  973. /* Don't test dynamic tracing, the function tracer already did */
  974. out:
  975. /* Stop it if we failed */
  976. if (ret)
  977. ftrace_graph_stop();
  978. return ret;
  979. }
  980. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  981. #ifdef CONFIG_IRQSOFF_TRACER
  982. int
  983. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  984. {
  985. unsigned long save_max = tr->max_latency;
  986. unsigned long count;
  987. int ret;
  988. /* start the tracing */
  989. ret = tracer_init(trace, tr);
  990. if (ret) {
  991. warn_failed_init_tracer(trace, ret);
  992. return ret;
  993. }
  994. /* reset the max latency */
  995. tr->max_latency = 0;
  996. /* disable interrupts for a bit */
  997. local_irq_disable();
  998. udelay(100);
  999. local_irq_enable();
  1000. /*
  1001. * Stop the tracer to avoid a warning subsequent
  1002. * to buffer flipping failure because tracing_stop()
  1003. * disables the tr and max buffers, making flipping impossible
  1004. * in case of parallels max irqs off latencies.
  1005. */
  1006. trace->stop(tr);
  1007. /* stop the tracing. */
  1008. tracing_stop();
  1009. /* check both trace buffers */
  1010. ret = trace_test_buffer(&tr->array_buffer, NULL);
  1011. if (!ret)
  1012. ret = trace_test_buffer(&tr->max_buffer, &count);
  1013. trace->reset(tr);
  1014. tracing_start();
  1015. if (!ret && !count) {
  1016. printk(KERN_CONT ".. no entries found ..");
  1017. ret = -1;
  1018. }
  1019. tr->max_latency = save_max;
  1020. return ret;
  1021. }
  1022. #endif /* CONFIG_IRQSOFF_TRACER */
  1023. #ifdef CONFIG_PREEMPT_TRACER
  1024. int
  1025. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  1026. {
  1027. unsigned long save_max = tr->max_latency;
  1028. unsigned long count;
  1029. int ret;
  1030. /*
  1031. * Now that the big kernel lock is no longer preemptible,
  1032. * and this is called with the BKL held, it will always
  1033. * fail. If preemption is already disabled, simply
  1034. * pass the test. When the BKL is removed, or becomes
  1035. * preemptible again, we will once again test this,
  1036. * so keep it in.
  1037. */
  1038. if (preempt_count()) {
  1039. printk(KERN_CONT "can not test ... force ");
  1040. return 0;
  1041. }
  1042. /* start the tracing */
  1043. ret = tracer_init(trace, tr);
  1044. if (ret) {
  1045. warn_failed_init_tracer(trace, ret);
  1046. return ret;
  1047. }
  1048. /* reset the max latency */
  1049. tr->max_latency = 0;
  1050. /* disable preemption for a bit */
  1051. preempt_disable();
  1052. udelay(100);
  1053. preempt_enable();
  1054. /*
  1055. * Stop the tracer to avoid a warning subsequent
  1056. * to buffer flipping failure because tracing_stop()
  1057. * disables the tr and max buffers, making flipping impossible
  1058. * in case of parallels max preempt off latencies.
  1059. */
  1060. trace->stop(tr);
  1061. /* stop the tracing. */
  1062. tracing_stop();
  1063. /* check both trace buffers */
  1064. ret = trace_test_buffer(&tr->array_buffer, NULL);
  1065. if (!ret)
  1066. ret = trace_test_buffer(&tr->max_buffer, &count);
  1067. trace->reset(tr);
  1068. tracing_start();
  1069. if (!ret && !count) {
  1070. printk(KERN_CONT ".. no entries found ..");
  1071. ret = -1;
  1072. }
  1073. tr->max_latency = save_max;
  1074. return ret;
  1075. }
  1076. #endif /* CONFIG_PREEMPT_TRACER */
  1077. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  1078. int
  1079. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  1080. {
  1081. unsigned long save_max = tr->max_latency;
  1082. unsigned long count;
  1083. int ret;
  1084. /*
  1085. * Now that the big kernel lock is no longer preemptible,
  1086. * and this is called with the BKL held, it will always
  1087. * fail. If preemption is already disabled, simply
  1088. * pass the test. When the BKL is removed, or becomes
  1089. * preemptible again, we will once again test this,
  1090. * so keep it in.
  1091. */
  1092. if (preempt_count()) {
  1093. printk(KERN_CONT "can not test ... force ");
  1094. return 0;
  1095. }
  1096. /* start the tracing */
  1097. ret = tracer_init(trace, tr);
  1098. if (ret) {
  1099. warn_failed_init_tracer(trace, ret);
  1100. goto out_no_start;
  1101. }
  1102. /* reset the max latency */
  1103. tr->max_latency = 0;
  1104. /* disable preemption and interrupts for a bit */
  1105. preempt_disable();
  1106. local_irq_disable();
  1107. udelay(100);
  1108. preempt_enable();
  1109. /* reverse the order of preempt vs irqs */
  1110. local_irq_enable();
  1111. /*
  1112. * Stop the tracer to avoid a warning subsequent
  1113. * to buffer flipping failure because tracing_stop()
  1114. * disables the tr and max buffers, making flipping impossible
  1115. * in case of parallels max irqs/preempt off latencies.
  1116. */
  1117. trace->stop(tr);
  1118. /* stop the tracing. */
  1119. tracing_stop();
  1120. /* check both trace buffers */
  1121. ret = trace_test_buffer(&tr->array_buffer, NULL);
  1122. if (ret)
  1123. goto out;
  1124. ret = trace_test_buffer(&tr->max_buffer, &count);
  1125. if (ret)
  1126. goto out;
  1127. if (!ret && !count) {
  1128. printk(KERN_CONT ".. no entries found ..");
  1129. ret = -1;
  1130. goto out;
  1131. }
  1132. /* do the test by disabling interrupts first this time */
  1133. tr->max_latency = 0;
  1134. tracing_start();
  1135. trace->start(tr);
  1136. preempt_disable();
  1137. local_irq_disable();
  1138. udelay(100);
  1139. preempt_enable();
  1140. /* reverse the order of preempt vs irqs */
  1141. local_irq_enable();
  1142. trace->stop(tr);
  1143. /* stop the tracing. */
  1144. tracing_stop();
  1145. /* check both trace buffers */
  1146. ret = trace_test_buffer(&tr->array_buffer, NULL);
  1147. if (ret)
  1148. goto out;
  1149. ret = trace_test_buffer(&tr->max_buffer, &count);
  1150. if (!ret && !count) {
  1151. printk(KERN_CONT ".. no entries found ..");
  1152. ret = -1;
  1153. goto out;
  1154. }
  1155. out:
  1156. tracing_start();
  1157. out_no_start:
  1158. trace->reset(tr);
  1159. tr->max_latency = save_max;
  1160. return ret;
  1161. }
  1162. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  1163. #ifdef CONFIG_NOP_TRACER
  1164. int
  1165. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  1166. {
  1167. /* What could possibly go wrong? */
  1168. return 0;
  1169. }
  1170. #endif
  1171. #ifdef CONFIG_SCHED_TRACER
  1172. struct wakeup_test_data {
  1173. struct completion is_ready;
  1174. int go;
  1175. };
  1176. static int trace_wakeup_test_thread(void *data)
  1177. {
  1178. /* Make this a -deadline thread */
  1179. static const struct sched_attr attr = {
  1180. .sched_policy = SCHED_DEADLINE,
  1181. .sched_runtime = 100000ULL,
  1182. .sched_deadline = 10000000ULL,
  1183. .sched_period = 10000000ULL
  1184. };
  1185. struct wakeup_test_data *x = data;
  1186. sched_setattr(current, &attr);
  1187. /* Make it know we have a new prio */
  1188. complete(&x->is_ready);
  1189. /* now go to sleep and let the test wake us up */
  1190. set_current_state(TASK_INTERRUPTIBLE);
  1191. while (!x->go) {
  1192. schedule();
  1193. set_current_state(TASK_INTERRUPTIBLE);
  1194. }
  1195. complete(&x->is_ready);
  1196. set_current_state(TASK_INTERRUPTIBLE);
  1197. /* we are awake, now wait to disappear */
  1198. while (!kthread_should_stop()) {
  1199. schedule();
  1200. set_current_state(TASK_INTERRUPTIBLE);
  1201. }
  1202. __set_current_state(TASK_RUNNING);
  1203. return 0;
  1204. }
  1205. int
  1206. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  1207. {
  1208. unsigned long save_max = tr->max_latency;
  1209. struct task_struct *p;
  1210. struct wakeup_test_data data;
  1211. unsigned long count;
  1212. int ret;
  1213. memset(&data, 0, sizeof(data));
  1214. init_completion(&data.is_ready);
  1215. /* create a -deadline thread */
  1216. p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
  1217. if (IS_ERR(p)) {
  1218. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  1219. return -1;
  1220. }
  1221. /* make sure the thread is running at -deadline policy */
  1222. wait_for_completion(&data.is_ready);
  1223. /* start the tracing */
  1224. ret = tracer_init(trace, tr);
  1225. if (ret) {
  1226. warn_failed_init_tracer(trace, ret);
  1227. return ret;
  1228. }
  1229. /* reset the max latency */
  1230. tr->max_latency = 0;
  1231. while (task_is_runnable(p)) {
  1232. /*
  1233. * Sleep to make sure the -deadline thread is asleep too.
  1234. * On virtual machines we can't rely on timings,
  1235. * but we want to make sure this test still works.
  1236. */
  1237. msleep(100);
  1238. }
  1239. init_completion(&data.is_ready);
  1240. data.go = 1;
  1241. /* memory barrier is in the wake_up_process() */
  1242. wake_up_process(p);
  1243. /* Wait for the task to wake up */
  1244. wait_for_completion(&data.is_ready);
  1245. /* stop the tracing. */
  1246. tracing_stop();
  1247. /* check both trace buffers */
  1248. ret = trace_test_buffer(&tr->array_buffer, NULL);
  1249. if (!ret)
  1250. ret = trace_test_buffer(&tr->max_buffer, &count);
  1251. trace->reset(tr);
  1252. tracing_start();
  1253. tr->max_latency = save_max;
  1254. /* kill the thread */
  1255. kthread_stop(p);
  1256. if (!ret && !count) {
  1257. printk(KERN_CONT ".. no entries found ..");
  1258. ret = -1;
  1259. }
  1260. return ret;
  1261. }
  1262. #endif /* CONFIG_SCHED_TRACER */
  1263. #ifdef CONFIG_BRANCH_TRACER
  1264. int
  1265. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  1266. {
  1267. unsigned long count;
  1268. int ret;
  1269. /* start the tracing */
  1270. ret = tracer_init(trace, tr);
  1271. if (ret) {
  1272. warn_failed_init_tracer(trace, ret);
  1273. return ret;
  1274. }
  1275. /* Sleep for a 1/10 of a second */
  1276. msleep(100);
  1277. /* stop the tracing. */
  1278. tracing_stop();
  1279. /* check the trace buffer */
  1280. ret = trace_test_buffer(&tr->array_buffer, &count);
  1281. trace->reset(tr);
  1282. tracing_start();
  1283. if (!ret && !count) {
  1284. printk(KERN_CONT ".. no entries found ..");
  1285. ret = -1;
  1286. }
  1287. return ret;
  1288. }
  1289. #endif /* CONFIG_BRANCH_TRACER */