trace_fprobe.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Fprobe-based tracing events
  4. * Copyright (C) 2022 Google LLC.
  5. */
  6. #define pr_fmt(fmt) "trace_fprobe: " fmt
  7. #include <asm/ptrace.h>
  8. #include <linux/fprobe.h>
  9. #include <linux/module.h>
  10. #include <linux/rculist.h>
  11. #include <linux/security.h>
  12. #include <linux/tracepoint.h>
  13. #include <linux/uaccess.h>
  14. #include "trace_dynevent.h"
  15. #include "trace_probe.h"
  16. #include "trace_probe_kernel.h"
  17. #include "trace_probe_tmpl.h"
  18. #define FPROBE_EVENT_SYSTEM "fprobes"
  19. #define TRACEPOINT_EVENT_SYSTEM "tracepoints"
  20. #define RETHOOK_MAXACTIVE_MAX 4096
  21. #define TRACEPOINT_STUB ERR_PTR(-ENOENT)
  22. static int trace_fprobe_create(const char *raw_command);
  23. static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
  24. static int trace_fprobe_release(struct dyn_event *ev);
  25. static bool trace_fprobe_is_busy(struct dyn_event *ev);
  26. static bool trace_fprobe_match(const char *system, const char *event,
  27. int argc, const char **argv, struct dyn_event *ev);
  28. static struct dyn_event_operations trace_fprobe_ops = {
  29. .create = trace_fprobe_create,
  30. .show = trace_fprobe_show,
  31. .is_busy = trace_fprobe_is_busy,
  32. .free = trace_fprobe_release,
  33. .match = trace_fprobe_match,
  34. };
  35. /*
  36. * Fprobe event core functions
  37. */
  38. struct trace_fprobe {
  39. struct dyn_event devent;
  40. struct fprobe fp;
  41. const char *symbol;
  42. struct tracepoint *tpoint;
  43. struct module *mod;
  44. struct trace_probe tp;
  45. };
  46. static bool is_trace_fprobe(struct dyn_event *ev)
  47. {
  48. return ev->ops == &trace_fprobe_ops;
  49. }
  50. static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
  51. {
  52. return container_of(ev, struct trace_fprobe, devent);
  53. }
  54. /**
  55. * for_each_trace_fprobe - iterate over the trace_fprobe list
  56. * @pos: the struct trace_fprobe * for each entry
  57. * @dpos: the struct dyn_event * to use as a loop cursor
  58. */
  59. #define for_each_trace_fprobe(pos, dpos) \
  60. for_each_dyn_event(dpos) \
  61. if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
  62. static bool trace_fprobe_is_return(struct trace_fprobe *tf)
  63. {
  64. return tf->fp.exit_handler != NULL;
  65. }
  66. static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
  67. {
  68. return tf->tpoint != NULL;
  69. }
  70. static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
  71. {
  72. return tf->symbol ? tf->symbol : "unknown";
  73. }
  74. static bool trace_fprobe_is_busy(struct dyn_event *ev)
  75. {
  76. struct trace_fprobe *tf = to_trace_fprobe(ev);
  77. return trace_probe_is_enabled(&tf->tp);
  78. }
  79. static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
  80. int argc, const char **argv)
  81. {
  82. char buf[MAX_ARGSTR_LEN + 1];
  83. if (!argc)
  84. return true;
  85. snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
  86. if (strcmp(buf, argv[0]))
  87. return false;
  88. argc--; argv++;
  89. return trace_probe_match_command_args(&tf->tp, argc, argv);
  90. }
  91. static bool trace_fprobe_match(const char *system, const char *event,
  92. int argc, const char **argv, struct dyn_event *ev)
  93. {
  94. struct trace_fprobe *tf = to_trace_fprobe(ev);
  95. if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
  96. return false;
  97. if (system && strcmp(trace_probe_group_name(&tf->tp), system))
  98. return false;
  99. return trace_fprobe_match_command_head(tf, argc, argv);
  100. }
  101. static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
  102. {
  103. return fprobe_is_registered(&tf->fp);
  104. }
  105. /*
  106. * Note that we don't verify the fetch_insn code, since it does not come
  107. * from user space.
  108. */
  109. static int
  110. process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
  111. void *dest, void *base)
  112. {
  113. struct pt_regs *regs = rec;
  114. unsigned long val;
  115. int ret;
  116. retry:
  117. /* 1st stage: get value from context */
  118. switch (code->op) {
  119. case FETCH_OP_STACK:
  120. val = regs_get_kernel_stack_nth(regs, code->param);
  121. break;
  122. case FETCH_OP_STACKP:
  123. val = kernel_stack_pointer(regs);
  124. break;
  125. case FETCH_OP_RETVAL:
  126. val = regs_return_value(regs);
  127. break;
  128. #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
  129. case FETCH_OP_ARG:
  130. val = regs_get_kernel_argument(regs, code->param);
  131. break;
  132. case FETCH_OP_EDATA:
  133. val = *(unsigned long *)((unsigned long)edata + code->offset);
  134. break;
  135. #endif
  136. case FETCH_NOP_SYMBOL: /* Ignore a place holder */
  137. code++;
  138. goto retry;
  139. default:
  140. ret = process_common_fetch_insn(code, &val);
  141. if (ret < 0)
  142. return ret;
  143. }
  144. code++;
  145. return process_fetch_insn_bottom(code, val, dest, base);
  146. }
  147. NOKPROBE_SYMBOL(process_fetch_insn)
  148. /* function entry handler */
  149. static nokprobe_inline void
  150. __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
  151. struct pt_regs *regs,
  152. struct trace_event_file *trace_file)
  153. {
  154. struct fentry_trace_entry_head *entry;
  155. struct trace_event_call *call = trace_probe_event_call(&tf->tp);
  156. struct trace_event_buffer fbuffer;
  157. int dsize;
  158. if (WARN_ON_ONCE(call != trace_file->event_call))
  159. return;
  160. if (trace_trigger_soft_disabled(trace_file))
  161. return;
  162. dsize = __get_data_size(&tf->tp, regs, NULL);
  163. entry = trace_event_buffer_reserve(&fbuffer, trace_file,
  164. sizeof(*entry) + tf->tp.size + dsize);
  165. if (!entry)
  166. return;
  167. fbuffer.regs = regs;
  168. entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
  169. entry->ip = entry_ip;
  170. store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
  171. trace_event_buffer_commit(&fbuffer);
  172. }
  173. static void
  174. fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
  175. struct pt_regs *regs)
  176. {
  177. struct event_file_link *link;
  178. trace_probe_for_each_link_rcu(link, &tf->tp)
  179. __fentry_trace_func(tf, entry_ip, regs, link->file);
  180. }
  181. NOKPROBE_SYMBOL(fentry_trace_func);
  182. /* function exit handler */
  183. static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
  184. unsigned long ret_ip, struct pt_regs *regs,
  185. void *entry_data)
  186. {
  187. struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
  188. if (tf->tp.entry_arg)
  189. store_trace_entry_data(entry_data, &tf->tp, regs);
  190. return 0;
  191. }
  192. NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
  193. static nokprobe_inline void
  194. __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
  195. unsigned long ret_ip, struct pt_regs *regs,
  196. void *entry_data, struct trace_event_file *trace_file)
  197. {
  198. struct fexit_trace_entry_head *entry;
  199. struct trace_event_buffer fbuffer;
  200. struct trace_event_call *call = trace_probe_event_call(&tf->tp);
  201. int dsize;
  202. if (WARN_ON_ONCE(call != trace_file->event_call))
  203. return;
  204. if (trace_trigger_soft_disabled(trace_file))
  205. return;
  206. dsize = __get_data_size(&tf->tp, regs, entry_data);
  207. entry = trace_event_buffer_reserve(&fbuffer, trace_file,
  208. sizeof(*entry) + tf->tp.size + dsize);
  209. if (!entry)
  210. return;
  211. fbuffer.regs = regs;
  212. entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
  213. entry->func = entry_ip;
  214. entry->ret_ip = ret_ip;
  215. store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
  216. trace_event_buffer_commit(&fbuffer);
  217. }
  218. static void
  219. fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
  220. unsigned long ret_ip, struct pt_regs *regs, void *entry_data)
  221. {
  222. struct event_file_link *link;
  223. trace_probe_for_each_link_rcu(link, &tf->tp)
  224. __fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data, link->file);
  225. }
  226. NOKPROBE_SYMBOL(fexit_trace_func);
  227. #ifdef CONFIG_PERF_EVENTS
  228. static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
  229. struct pt_regs *regs)
  230. {
  231. struct trace_event_call *call = trace_probe_event_call(&tf->tp);
  232. struct fentry_trace_entry_head *entry;
  233. struct hlist_head *head;
  234. int size, __size, dsize;
  235. int rctx;
  236. head = this_cpu_ptr(call->perf_events);
  237. if (hlist_empty(head))
  238. return 0;
  239. dsize = __get_data_size(&tf->tp, regs, NULL);
  240. __size = sizeof(*entry) + tf->tp.size + dsize;
  241. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  242. size -= sizeof(u32);
  243. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  244. if (!entry)
  245. return 0;
  246. entry->ip = entry_ip;
  247. memset(&entry[1], 0, dsize);
  248. store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
  249. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  250. head, NULL);
  251. return 0;
  252. }
  253. NOKPROBE_SYMBOL(fentry_perf_func);
  254. static void
  255. fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
  256. unsigned long ret_ip, struct pt_regs *regs,
  257. void *entry_data)
  258. {
  259. struct trace_event_call *call = trace_probe_event_call(&tf->tp);
  260. struct fexit_trace_entry_head *entry;
  261. struct hlist_head *head;
  262. int size, __size, dsize;
  263. int rctx;
  264. head = this_cpu_ptr(call->perf_events);
  265. if (hlist_empty(head))
  266. return;
  267. dsize = __get_data_size(&tf->tp, regs, entry_data);
  268. __size = sizeof(*entry) + tf->tp.size + dsize;
  269. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  270. size -= sizeof(u32);
  271. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  272. if (!entry)
  273. return;
  274. entry->func = entry_ip;
  275. entry->ret_ip = ret_ip;
  276. store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
  277. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  278. head, NULL);
  279. }
  280. NOKPROBE_SYMBOL(fexit_perf_func);
  281. #endif /* CONFIG_PERF_EVENTS */
  282. static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
  283. unsigned long ret_ip, struct pt_regs *regs,
  284. void *entry_data)
  285. {
  286. struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
  287. int ret = 0;
  288. if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
  289. fentry_trace_func(tf, entry_ip, regs);
  290. #ifdef CONFIG_PERF_EVENTS
  291. if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
  292. ret = fentry_perf_func(tf, entry_ip, regs);
  293. #endif
  294. return ret;
  295. }
  296. NOKPROBE_SYMBOL(fentry_dispatcher);
  297. static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
  298. unsigned long ret_ip, struct pt_regs *regs,
  299. void *entry_data)
  300. {
  301. struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
  302. if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
  303. fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
  304. #ifdef CONFIG_PERF_EVENTS
  305. if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
  306. fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
  307. #endif
  308. }
  309. NOKPROBE_SYMBOL(fexit_dispatcher);
  310. static void free_trace_fprobe(struct trace_fprobe *tf)
  311. {
  312. if (tf) {
  313. trace_probe_cleanup(&tf->tp);
  314. kfree(tf->symbol);
  315. kfree(tf);
  316. }
  317. }
  318. /*
  319. * Allocate new trace_probe and initialize it (including fprobe).
  320. */
  321. static struct trace_fprobe *alloc_trace_fprobe(const char *group,
  322. const char *event,
  323. const char *symbol,
  324. struct tracepoint *tpoint,
  325. struct module *mod,
  326. int maxactive,
  327. int nargs, bool is_return)
  328. {
  329. struct trace_fprobe *tf;
  330. int ret = -ENOMEM;
  331. tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
  332. if (!tf)
  333. return ERR_PTR(ret);
  334. tf->symbol = kstrdup(symbol, GFP_KERNEL);
  335. if (!tf->symbol)
  336. goto error;
  337. if (is_return)
  338. tf->fp.exit_handler = fexit_dispatcher;
  339. else
  340. tf->fp.entry_handler = fentry_dispatcher;
  341. tf->tpoint = tpoint;
  342. tf->mod = mod;
  343. tf->fp.nr_maxactive = maxactive;
  344. ret = trace_probe_init(&tf->tp, event, group, false, nargs);
  345. if (ret < 0)
  346. goto error;
  347. dyn_event_init(&tf->devent, &trace_fprobe_ops);
  348. return tf;
  349. error:
  350. free_trace_fprobe(tf);
  351. return ERR_PTR(ret);
  352. }
  353. static struct trace_fprobe *find_trace_fprobe(const char *event,
  354. const char *group)
  355. {
  356. struct dyn_event *pos;
  357. struct trace_fprobe *tf;
  358. for_each_trace_fprobe(tf, pos)
  359. if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
  360. strcmp(trace_probe_group_name(&tf->tp), group) == 0)
  361. return tf;
  362. return NULL;
  363. }
  364. static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
  365. {
  366. if (trace_fprobe_is_registered(tf))
  367. enable_fprobe(&tf->fp);
  368. return 0;
  369. }
  370. static void __disable_trace_fprobe(struct trace_probe *tp)
  371. {
  372. struct trace_fprobe *tf;
  373. list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
  374. if (!trace_fprobe_is_registered(tf))
  375. continue;
  376. disable_fprobe(&tf->fp);
  377. }
  378. }
  379. /*
  380. * Enable trace_probe
  381. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  382. */
  383. static int enable_trace_fprobe(struct trace_event_call *call,
  384. struct trace_event_file *file)
  385. {
  386. struct trace_probe *tp;
  387. struct trace_fprobe *tf;
  388. bool enabled;
  389. int ret = 0;
  390. tp = trace_probe_primary_from_call(call);
  391. if (WARN_ON_ONCE(!tp))
  392. return -ENODEV;
  393. enabled = trace_probe_is_enabled(tp);
  394. /* This also changes "enabled" state */
  395. if (file) {
  396. ret = trace_probe_add_file(tp, file);
  397. if (ret)
  398. return ret;
  399. } else
  400. trace_probe_set_flag(tp, TP_FLAG_PROFILE);
  401. if (!enabled) {
  402. list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
  403. /* TODO: check the fprobe is gone */
  404. __enable_trace_fprobe(tf);
  405. }
  406. }
  407. return 0;
  408. }
  409. /*
  410. * Disable trace_probe
  411. * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  412. */
  413. static int disable_trace_fprobe(struct trace_event_call *call,
  414. struct trace_event_file *file)
  415. {
  416. struct trace_probe *tp;
  417. tp = trace_probe_primary_from_call(call);
  418. if (WARN_ON_ONCE(!tp))
  419. return -ENODEV;
  420. if (file) {
  421. if (!trace_probe_get_file_link(tp, file))
  422. return -ENOENT;
  423. if (!trace_probe_has_single_file(tp))
  424. goto out;
  425. trace_probe_clear_flag(tp, TP_FLAG_TRACE);
  426. } else
  427. trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
  428. if (!trace_probe_is_enabled(tp))
  429. __disable_trace_fprobe(tp);
  430. out:
  431. if (file)
  432. /*
  433. * Synchronization is done in below function. For perf event,
  434. * file == NULL and perf_trace_event_unreg() calls
  435. * tracepoint_synchronize_unregister() to ensure synchronize
  436. * event. We don't need to care about it.
  437. */
  438. trace_probe_remove_file(tp, file);
  439. return 0;
  440. }
  441. /* Event entry printers */
  442. static enum print_line_t
  443. print_fentry_event(struct trace_iterator *iter, int flags,
  444. struct trace_event *event)
  445. {
  446. struct fentry_trace_entry_head *field;
  447. struct trace_seq *s = &iter->seq;
  448. struct trace_probe *tp;
  449. field = (struct fentry_trace_entry_head *)iter->ent;
  450. tp = trace_probe_primary_from_call(
  451. container_of(event, struct trace_event_call, event));
  452. if (WARN_ON_ONCE(!tp))
  453. goto out;
  454. trace_seq_printf(s, "%s: (", trace_probe_name(tp));
  455. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  456. goto out;
  457. trace_seq_putc(s, ')');
  458. if (trace_probe_print_args(s, tp->args, tp->nr_args,
  459. (u8 *)&field[1], field) < 0)
  460. goto out;
  461. trace_seq_putc(s, '\n');
  462. out:
  463. return trace_handle_return(s);
  464. }
  465. static enum print_line_t
  466. print_fexit_event(struct trace_iterator *iter, int flags,
  467. struct trace_event *event)
  468. {
  469. struct fexit_trace_entry_head *field;
  470. struct trace_seq *s = &iter->seq;
  471. struct trace_probe *tp;
  472. field = (struct fexit_trace_entry_head *)iter->ent;
  473. tp = trace_probe_primary_from_call(
  474. container_of(event, struct trace_event_call, event));
  475. if (WARN_ON_ONCE(!tp))
  476. goto out;
  477. trace_seq_printf(s, "%s: (", trace_probe_name(tp));
  478. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  479. goto out;
  480. trace_seq_puts(s, " <- ");
  481. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  482. goto out;
  483. trace_seq_putc(s, ')');
  484. if (trace_probe_print_args(s, tp->args, tp->nr_args,
  485. (u8 *)&field[1], field) < 0)
  486. goto out;
  487. trace_seq_putc(s, '\n');
  488. out:
  489. return trace_handle_return(s);
  490. }
  491. static int fentry_event_define_fields(struct trace_event_call *event_call)
  492. {
  493. int ret;
  494. struct fentry_trace_entry_head field;
  495. struct trace_probe *tp;
  496. tp = trace_probe_primary_from_call(event_call);
  497. if (WARN_ON_ONCE(!tp))
  498. return -ENOENT;
  499. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  500. return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
  501. }
  502. static int fexit_event_define_fields(struct trace_event_call *event_call)
  503. {
  504. int ret;
  505. struct fexit_trace_entry_head field;
  506. struct trace_probe *tp;
  507. tp = trace_probe_primary_from_call(event_call);
  508. if (WARN_ON_ONCE(!tp))
  509. return -ENOENT;
  510. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  511. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  512. return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
  513. }
  514. static struct trace_event_functions fentry_funcs = {
  515. .trace = print_fentry_event
  516. };
  517. static struct trace_event_functions fexit_funcs = {
  518. .trace = print_fexit_event
  519. };
  520. static struct trace_event_fields fentry_fields_array[] = {
  521. { .type = TRACE_FUNCTION_TYPE,
  522. .define_fields = fentry_event_define_fields },
  523. {}
  524. };
  525. static struct trace_event_fields fexit_fields_array[] = {
  526. { .type = TRACE_FUNCTION_TYPE,
  527. .define_fields = fexit_event_define_fields },
  528. {}
  529. };
  530. static int fprobe_register(struct trace_event_call *event,
  531. enum trace_reg type, void *data);
  532. static inline void init_trace_event_call(struct trace_fprobe *tf)
  533. {
  534. struct trace_event_call *call = trace_probe_event_call(&tf->tp);
  535. if (trace_fprobe_is_return(tf)) {
  536. call->event.funcs = &fexit_funcs;
  537. call->class->fields_array = fexit_fields_array;
  538. } else {
  539. call->event.funcs = &fentry_funcs;
  540. call->class->fields_array = fentry_fields_array;
  541. }
  542. call->flags = TRACE_EVENT_FL_FPROBE;
  543. call->class->reg = fprobe_register;
  544. }
  545. static int register_fprobe_event(struct trace_fprobe *tf)
  546. {
  547. init_trace_event_call(tf);
  548. return trace_probe_register_event_call(&tf->tp);
  549. }
  550. static int unregister_fprobe_event(struct trace_fprobe *tf)
  551. {
  552. return trace_probe_unregister_event_call(&tf->tp);
  553. }
  554. static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
  555. {
  556. struct tracepoint *tpoint = tf->tpoint;
  557. unsigned long ip = (unsigned long)tpoint->probestub;
  558. int ret;
  559. /*
  560. * Here, we do 2 steps to enable fprobe on a tracepoint.
  561. * At first, put __probestub_##TP function on the tracepoint
  562. * and put a fprobe on the stub function.
  563. */
  564. ret = tracepoint_probe_register_prio_may_exist(tpoint,
  565. tpoint->probestub, NULL, 0);
  566. if (ret < 0)
  567. return ret;
  568. return register_fprobe_ips(&tf->fp, &ip, 1);
  569. }
  570. /* Internal register function - just handle fprobe and flags */
  571. static int __register_trace_fprobe(struct trace_fprobe *tf)
  572. {
  573. int i, ret;
  574. /* Should we need new LOCKDOWN flag for fprobe? */
  575. ret = security_locked_down(LOCKDOWN_KPROBES);
  576. if (ret)
  577. return ret;
  578. if (trace_fprobe_is_registered(tf))
  579. return -EINVAL;
  580. for (i = 0; i < tf->tp.nr_args; i++) {
  581. ret = traceprobe_update_arg(&tf->tp.args[i]);
  582. if (ret)
  583. return ret;
  584. }
  585. /* Set/clear disabled flag according to tp->flag */
  586. if (trace_probe_is_enabled(&tf->tp))
  587. tf->fp.flags &= ~FPROBE_FL_DISABLED;
  588. else
  589. tf->fp.flags |= FPROBE_FL_DISABLED;
  590. if (trace_fprobe_is_tracepoint(tf)) {
  591. /* This tracepoint is not loaded yet */
  592. if (tf->tpoint == TRACEPOINT_STUB)
  593. return 0;
  594. return __regsiter_tracepoint_fprobe(tf);
  595. }
  596. /* TODO: handle filter, nofilter or symbol list */
  597. return register_fprobe(&tf->fp, tf->symbol, NULL);
  598. }
  599. /* Internal unregister function - just handle fprobe and flags */
  600. static void __unregister_trace_fprobe(struct trace_fprobe *tf)
  601. {
  602. if (trace_fprobe_is_registered(tf)) {
  603. unregister_fprobe(&tf->fp);
  604. memset(&tf->fp, 0, sizeof(tf->fp));
  605. if (trace_fprobe_is_tracepoint(tf)) {
  606. tracepoint_probe_unregister(tf->tpoint,
  607. tf->tpoint->probestub, NULL);
  608. tf->tpoint = NULL;
  609. tf->mod = NULL;
  610. }
  611. }
  612. }
  613. /* TODO: make this trace_*probe common function */
  614. /* Unregister a trace_probe and probe_event */
  615. static int unregister_trace_fprobe(struct trace_fprobe *tf)
  616. {
  617. /* If other probes are on the event, just unregister fprobe */
  618. if (trace_probe_has_sibling(&tf->tp))
  619. goto unreg;
  620. /* Enabled event can not be unregistered */
  621. if (trace_probe_is_enabled(&tf->tp))
  622. return -EBUSY;
  623. /* If there's a reference to the dynamic event */
  624. if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
  625. return -EBUSY;
  626. /* Will fail if probe is being used by ftrace or perf */
  627. if (unregister_fprobe_event(tf))
  628. return -EBUSY;
  629. unreg:
  630. __unregister_trace_fprobe(tf);
  631. dyn_event_remove(&tf->devent);
  632. trace_probe_unlink(&tf->tp);
  633. return 0;
  634. }
  635. static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
  636. struct trace_fprobe *comp)
  637. {
  638. struct trace_probe_event *tpe = orig->tp.event;
  639. int i;
  640. list_for_each_entry(orig, &tpe->probes, tp.list) {
  641. if (strcmp(trace_fprobe_symbol(orig),
  642. trace_fprobe_symbol(comp)))
  643. continue;
  644. /*
  645. * trace_probe_compare_arg_type() ensured that nr_args and
  646. * each argument name and type are same. Let's compare comm.
  647. */
  648. for (i = 0; i < orig->tp.nr_args; i++) {
  649. if (strcmp(orig->tp.args[i].comm,
  650. comp->tp.args[i].comm))
  651. break;
  652. }
  653. if (i == orig->tp.nr_args)
  654. return true;
  655. }
  656. return false;
  657. }
  658. static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
  659. {
  660. int ret;
  661. if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
  662. trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
  663. trace_probe_log_set_index(0);
  664. trace_probe_log_err(0, DIFF_PROBE_TYPE);
  665. return -EEXIST;
  666. }
  667. ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
  668. if (ret) {
  669. /* Note that argument starts index = 2 */
  670. trace_probe_log_set_index(ret + 1);
  671. trace_probe_log_err(0, DIFF_ARG_TYPE);
  672. return -EEXIST;
  673. }
  674. if (trace_fprobe_has_same_fprobe(to, tf)) {
  675. trace_probe_log_set_index(0);
  676. trace_probe_log_err(0, SAME_PROBE);
  677. return -EEXIST;
  678. }
  679. /* Append to existing event */
  680. ret = trace_probe_append(&tf->tp, &to->tp);
  681. if (ret)
  682. return ret;
  683. ret = __register_trace_fprobe(tf);
  684. if (ret)
  685. trace_probe_unlink(&tf->tp);
  686. else
  687. dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
  688. return ret;
  689. }
  690. /* Register a trace_probe and probe_event */
  691. static int register_trace_fprobe(struct trace_fprobe *tf)
  692. {
  693. struct trace_fprobe *old_tf;
  694. int ret;
  695. mutex_lock(&event_mutex);
  696. old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
  697. trace_probe_group_name(&tf->tp));
  698. if (old_tf) {
  699. ret = append_trace_fprobe(tf, old_tf);
  700. goto end;
  701. }
  702. /* Register new event */
  703. ret = register_fprobe_event(tf);
  704. if (ret) {
  705. if (ret == -EEXIST) {
  706. trace_probe_log_set_index(0);
  707. trace_probe_log_err(0, EVENT_EXIST);
  708. } else
  709. pr_warn("Failed to register probe event(%d)\n", ret);
  710. goto end;
  711. }
  712. /* Register fprobe */
  713. ret = __register_trace_fprobe(tf);
  714. if (ret < 0)
  715. unregister_fprobe_event(tf);
  716. else
  717. dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
  718. end:
  719. mutex_unlock(&event_mutex);
  720. return ret;
  721. }
  722. struct __find_tracepoint_cb_data {
  723. const char *tp_name;
  724. struct tracepoint *tpoint;
  725. struct module *mod;
  726. };
  727. static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
  728. {
  729. struct __find_tracepoint_cb_data *data = priv;
  730. if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
  731. data->tpoint = tp;
  732. if (!data->mod) {
  733. data->mod = mod;
  734. if (!try_module_get(data->mod)) {
  735. data->tpoint = NULL;
  736. data->mod = NULL;
  737. }
  738. }
  739. }
  740. }
  741. static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
  742. {
  743. struct __find_tracepoint_cb_data *data = priv;
  744. if (!data->tpoint && !strcmp(data->tp_name, tp->name))
  745. data->tpoint = tp;
  746. }
  747. /*
  748. * Find a tracepoint from kernel and module. If the tracepoint is in a module,
  749. * this increments the module refcount to prevent unloading until the
  750. * trace_fprobe is registered to the list. After registering the trace_fprobe
  751. * on the trace_fprobe list, the module refcount is decremented because
  752. * tracepoint_probe_module_cb will handle it.
  753. */
  754. static struct tracepoint *find_tracepoint(const char *tp_name,
  755. struct module **tp_mod)
  756. {
  757. struct __find_tracepoint_cb_data data = {
  758. .tp_name = tp_name,
  759. .mod = NULL,
  760. };
  761. for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
  762. if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
  763. for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
  764. *tp_mod = data.mod;
  765. }
  766. return data.tpoint;
  767. }
  768. #ifdef CONFIG_MODULES
  769. static void reenable_trace_fprobe(struct trace_fprobe *tf)
  770. {
  771. struct trace_probe *tp = &tf->tp;
  772. list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
  773. __enable_trace_fprobe(tf);
  774. }
  775. }
  776. static struct tracepoint *find_tracepoint_in_module(struct module *mod,
  777. const char *tp_name)
  778. {
  779. struct __find_tracepoint_cb_data data = {
  780. .tp_name = tp_name,
  781. .mod = mod,
  782. };
  783. for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
  784. return data.tpoint;
  785. }
  786. static int __tracepoint_probe_module_cb(struct notifier_block *self,
  787. unsigned long val, void *data)
  788. {
  789. struct tp_module *tp_mod = data;
  790. struct tracepoint *tpoint;
  791. struct trace_fprobe *tf;
  792. struct dyn_event *pos;
  793. if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
  794. return NOTIFY_DONE;
  795. mutex_lock(&event_mutex);
  796. for_each_trace_fprobe(tf, pos) {
  797. if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
  798. tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
  799. if (tpoint) {
  800. tf->tpoint = tpoint;
  801. tf->mod = tp_mod->mod;
  802. if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
  803. trace_probe_is_enabled(&tf->tp))
  804. reenable_trace_fprobe(tf);
  805. }
  806. } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
  807. tracepoint_probe_unregister(tf->tpoint,
  808. tf->tpoint->probestub, NULL);
  809. tf->tpoint = NULL;
  810. tf->mod = NULL;
  811. }
  812. }
  813. mutex_unlock(&event_mutex);
  814. return NOTIFY_DONE;
  815. }
  816. static struct notifier_block tracepoint_module_nb = {
  817. .notifier_call = __tracepoint_probe_module_cb,
  818. };
  819. #endif /* CONFIG_MODULES */
  820. static int parse_symbol_and_return(int argc, const char *argv[],
  821. char **symbol, bool *is_return,
  822. bool is_tracepoint)
  823. {
  824. char *tmp = strchr(argv[1], '%');
  825. int i;
  826. if (tmp) {
  827. int len = tmp - argv[1];
  828. if (!is_tracepoint && !strcmp(tmp, "%return")) {
  829. *is_return = true;
  830. } else {
  831. trace_probe_log_err(len, BAD_ADDR_SUFFIX);
  832. return -EINVAL;
  833. }
  834. *symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
  835. } else
  836. *symbol = kstrdup(argv[1], GFP_KERNEL);
  837. if (!*symbol)
  838. return -ENOMEM;
  839. if (*is_return)
  840. return 0;
  841. /* If there is $retval, this should be a return fprobe. */
  842. for (i = 2; i < argc; i++) {
  843. tmp = strstr(argv[i], "$retval");
  844. if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
  845. if (is_tracepoint) {
  846. trace_probe_log_set_index(i);
  847. trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
  848. return -EINVAL;
  849. }
  850. *is_return = true;
  851. break;
  852. }
  853. }
  854. return 0;
  855. }
  856. static int __trace_fprobe_create(int argc, const char *argv[])
  857. {
  858. /*
  859. * Argument syntax:
  860. * - Add fentry probe:
  861. * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
  862. * - Add fexit probe:
  863. * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
  864. * - Add tracepoint probe:
  865. * t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
  866. *
  867. * Fetch args:
  868. * $retval : fetch return value
  869. * $stack : fetch stack address
  870. * $stackN : fetch Nth entry of stack (N:0-)
  871. * $argN : fetch Nth argument (N:1-)
  872. * $comm : fetch current task comm
  873. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  874. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  875. * Dereferencing memory fetch:
  876. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  877. * Alias name of args:
  878. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  879. * Type of args:
  880. * FETCHARG:TYPE : use TYPE instead of unsigned long.
  881. */
  882. struct trace_fprobe *tf = NULL;
  883. int i, len, new_argc = 0, ret = 0;
  884. bool is_return = false;
  885. char *symbol = NULL;
  886. const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
  887. const char **new_argv = NULL;
  888. int maxactive = 0;
  889. char buf[MAX_EVENT_NAME_LEN];
  890. char gbuf[MAX_EVENT_NAME_LEN];
  891. char sbuf[KSYM_NAME_LEN];
  892. char abuf[MAX_BTF_ARGS_LEN];
  893. char *dbuf = NULL;
  894. bool is_tracepoint = false;
  895. struct module *tp_mod = NULL;
  896. struct tracepoint *tpoint = NULL;
  897. struct traceprobe_parse_context ctx = {
  898. .flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
  899. };
  900. if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
  901. return -ECANCELED;
  902. if (argv[0][0] == 't') {
  903. is_tracepoint = true;
  904. group = TRACEPOINT_EVENT_SYSTEM;
  905. }
  906. trace_probe_log_init("trace_fprobe", argc, argv);
  907. event = strchr(&argv[0][1], ':');
  908. if (event)
  909. event++;
  910. if (isdigit(argv[0][1])) {
  911. if (event)
  912. len = event - &argv[0][1] - 1;
  913. else
  914. len = strlen(&argv[0][1]);
  915. if (len > MAX_EVENT_NAME_LEN - 1) {
  916. trace_probe_log_err(1, BAD_MAXACT);
  917. goto parse_error;
  918. }
  919. memcpy(buf, &argv[0][1], len);
  920. buf[len] = '\0';
  921. ret = kstrtouint(buf, 0, &maxactive);
  922. if (ret || !maxactive) {
  923. trace_probe_log_err(1, BAD_MAXACT);
  924. goto parse_error;
  925. }
  926. /* fprobe rethook instances are iterated over via a list. The
  927. * maximum should stay reasonable.
  928. */
  929. if (maxactive > RETHOOK_MAXACTIVE_MAX) {
  930. trace_probe_log_err(1, MAXACT_TOO_BIG);
  931. goto parse_error;
  932. }
  933. }
  934. trace_probe_log_set_index(1);
  935. /* a symbol(or tracepoint) must be specified */
  936. ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
  937. if (ret < 0)
  938. goto parse_error;
  939. if (!is_return && maxactive) {
  940. trace_probe_log_set_index(0);
  941. trace_probe_log_err(1, BAD_MAXACT_TYPE);
  942. goto parse_error;
  943. }
  944. trace_probe_log_set_index(0);
  945. if (event) {
  946. ret = traceprobe_parse_event_name(&event, &group, gbuf,
  947. event - argv[0]);
  948. if (ret)
  949. goto parse_error;
  950. }
  951. if (!event) {
  952. /* Make a new event name */
  953. if (is_tracepoint)
  954. snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
  955. isdigit(*symbol) ? "_" : "", symbol);
  956. else
  957. snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
  958. is_return ? "exit" : "entry");
  959. sanitize_event_name(buf);
  960. event = buf;
  961. }
  962. if (is_return)
  963. ctx.flags |= TPARG_FL_RETURN;
  964. else
  965. ctx.flags |= TPARG_FL_FENTRY;
  966. if (is_tracepoint) {
  967. ctx.flags |= TPARG_FL_TPOINT;
  968. tpoint = find_tracepoint(symbol, &tp_mod);
  969. if (tpoint) {
  970. ctx.funcname = kallsyms_lookup(
  971. (unsigned long)tpoint->probestub,
  972. NULL, NULL, NULL, sbuf);
  973. } else if (IS_ENABLED(CONFIG_MODULES)) {
  974. /* This *may* be loaded afterwards */
  975. tpoint = TRACEPOINT_STUB;
  976. ctx.funcname = symbol;
  977. } else {
  978. trace_probe_log_set_index(1);
  979. trace_probe_log_err(0, NO_TRACEPOINT);
  980. goto parse_error;
  981. }
  982. } else
  983. ctx.funcname = symbol;
  984. argc -= 2; argv += 2;
  985. new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
  986. abuf, MAX_BTF_ARGS_LEN, &ctx);
  987. if (IS_ERR(new_argv)) {
  988. ret = PTR_ERR(new_argv);
  989. new_argv = NULL;
  990. goto out;
  991. }
  992. if (new_argv) {
  993. argc = new_argc;
  994. argv = new_argv;
  995. }
  996. if (argc > MAX_TRACE_ARGS) {
  997. ret = -E2BIG;
  998. goto out;
  999. }
  1000. ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
  1001. if (ret)
  1002. goto out;
  1003. /* setup a probe */
  1004. tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
  1005. maxactive, argc, is_return);
  1006. if (IS_ERR(tf)) {
  1007. ret = PTR_ERR(tf);
  1008. /* This must return -ENOMEM, else there is a bug */
  1009. WARN_ON_ONCE(ret != -ENOMEM);
  1010. goto out; /* We know tf is not allocated */
  1011. }
  1012. /* parse arguments */
  1013. for (i = 0; i < argc; i++) {
  1014. trace_probe_log_set_index(i + 2);
  1015. ctx.offset = 0;
  1016. ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
  1017. if (ret)
  1018. goto error; /* This can be -ENOMEM */
  1019. }
  1020. if (is_return && tf->tp.entry_arg) {
  1021. tf->fp.entry_handler = trace_fprobe_entry_handler;
  1022. tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
  1023. }
  1024. ret = traceprobe_set_print_fmt(&tf->tp,
  1025. is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
  1026. if (ret < 0)
  1027. goto error;
  1028. ret = register_trace_fprobe(tf);
  1029. if (ret) {
  1030. trace_probe_log_set_index(1);
  1031. if (ret == -EILSEQ)
  1032. trace_probe_log_err(0, BAD_INSN_BNDRY);
  1033. else if (ret == -ENOENT)
  1034. trace_probe_log_err(0, BAD_PROBE_ADDR);
  1035. else if (ret != -ENOMEM && ret != -EEXIST)
  1036. trace_probe_log_err(0, FAIL_REG_PROBE);
  1037. goto error;
  1038. }
  1039. out:
  1040. if (tp_mod)
  1041. module_put(tp_mod);
  1042. traceprobe_finish_parse(&ctx);
  1043. trace_probe_log_clear();
  1044. kfree(new_argv);
  1045. kfree(symbol);
  1046. kfree(dbuf);
  1047. return ret;
  1048. parse_error:
  1049. ret = -EINVAL;
  1050. error:
  1051. free_trace_fprobe(tf);
  1052. goto out;
  1053. }
  1054. static int trace_fprobe_create(const char *raw_command)
  1055. {
  1056. return trace_probe_create(raw_command, __trace_fprobe_create);
  1057. }
  1058. static int trace_fprobe_release(struct dyn_event *ev)
  1059. {
  1060. struct trace_fprobe *tf = to_trace_fprobe(ev);
  1061. int ret = unregister_trace_fprobe(tf);
  1062. if (!ret)
  1063. free_trace_fprobe(tf);
  1064. return ret;
  1065. }
  1066. static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
  1067. {
  1068. struct trace_fprobe *tf = to_trace_fprobe(ev);
  1069. int i;
  1070. if (trace_fprobe_is_tracepoint(tf))
  1071. seq_putc(m, 't');
  1072. else
  1073. seq_putc(m, 'f');
  1074. if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive)
  1075. seq_printf(m, "%d", tf->fp.nr_maxactive);
  1076. seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
  1077. trace_probe_name(&tf->tp));
  1078. seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
  1079. trace_fprobe_is_return(tf) ? "%return" : "");
  1080. for (i = 0; i < tf->tp.nr_args; i++)
  1081. seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
  1082. seq_putc(m, '\n');
  1083. return 0;
  1084. }
  1085. /*
  1086. * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
  1087. */
  1088. static int fprobe_register(struct trace_event_call *event,
  1089. enum trace_reg type, void *data)
  1090. {
  1091. struct trace_event_file *file = data;
  1092. switch (type) {
  1093. case TRACE_REG_REGISTER:
  1094. return enable_trace_fprobe(event, file);
  1095. case TRACE_REG_UNREGISTER:
  1096. return disable_trace_fprobe(event, file);
  1097. #ifdef CONFIG_PERF_EVENTS
  1098. case TRACE_REG_PERF_REGISTER:
  1099. return enable_trace_fprobe(event, NULL);
  1100. case TRACE_REG_PERF_UNREGISTER:
  1101. return disable_trace_fprobe(event, NULL);
  1102. case TRACE_REG_PERF_OPEN:
  1103. case TRACE_REG_PERF_CLOSE:
  1104. case TRACE_REG_PERF_ADD:
  1105. case TRACE_REG_PERF_DEL:
  1106. return 0;
  1107. #endif
  1108. }
  1109. return 0;
  1110. }
  1111. /*
  1112. * Register dynevent at core_initcall. This allows kernel to setup fprobe
  1113. * events in postcore_initcall without tracefs.
  1114. */
  1115. static __init int init_fprobe_trace_early(void)
  1116. {
  1117. int ret;
  1118. ret = dyn_event_register(&trace_fprobe_ops);
  1119. if (ret)
  1120. return ret;
  1121. #ifdef CONFIG_MODULES
  1122. ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
  1123. if (ret)
  1124. return ret;
  1125. #endif
  1126. return 0;
  1127. }
  1128. core_initcall(init_fprobe_trace_early);