event.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __PERF_RECORD_H
  3. #define __PERF_RECORD_H
  4. #include <limits.h>
  5. #include <stdio.h>
  6. #include <linux/kernel.h>
  7. #include "../perf.h"
  8. #include "build-id.h"
  9. #include "perf_regs.h"
  10. struct mmap_event {
  11. struct perf_event_header header;
  12. u32 pid, tid;
  13. u64 start;
  14. u64 len;
  15. u64 pgoff;
  16. char filename[PATH_MAX];
  17. };
  18. struct mmap2_event {
  19. struct perf_event_header header;
  20. u32 pid, tid;
  21. u64 start;
  22. u64 len;
  23. u64 pgoff;
  24. u32 maj;
  25. u32 min;
  26. u64 ino;
  27. u64 ino_generation;
  28. u32 prot;
  29. u32 flags;
  30. char filename[PATH_MAX];
  31. };
  32. struct comm_event {
  33. struct perf_event_header header;
  34. u32 pid, tid;
  35. char comm[16];
  36. };
  37. struct namespaces_event {
  38. struct perf_event_header header;
  39. u32 pid, tid;
  40. u64 nr_namespaces;
  41. struct perf_ns_link_info link_info[];
  42. };
  43. struct fork_event {
  44. struct perf_event_header header;
  45. u32 pid, ppid;
  46. u32 tid, ptid;
  47. u64 time;
  48. };
  49. struct lost_event {
  50. struct perf_event_header header;
  51. u64 id;
  52. u64 lost;
  53. };
  54. struct lost_samples_event {
  55. struct perf_event_header header;
  56. u64 lost;
  57. };
  58. /*
  59. * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  60. */
  61. struct read_event {
  62. struct perf_event_header header;
  63. u32 pid, tid;
  64. u64 value;
  65. u64 time_enabled;
  66. u64 time_running;
  67. u64 id;
  68. };
  69. struct throttle_event {
  70. struct perf_event_header header;
  71. u64 time;
  72. u64 id;
  73. u64 stream_id;
  74. };
  75. #define PERF_SAMPLE_MASK \
  76. (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
  77. PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
  78. PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
  79. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
  80. PERF_SAMPLE_IDENTIFIER)
  81. /* perf sample has 16 bits size limit */
  82. #define PERF_SAMPLE_MAX_SIZE (1 << 16)
  83. struct sample_event {
  84. struct perf_event_header header;
  85. u64 array[];
  86. };
  87. struct regs_dump {
  88. u64 abi;
  89. u64 mask;
  90. u64 *regs;
  91. /* Cached values/mask filled by first register access. */
  92. u64 cache_regs[PERF_REGS_MAX];
  93. u64 cache_mask;
  94. };
  95. struct stack_dump {
  96. u16 offset;
  97. u64 size;
  98. char *data;
  99. };
  100. struct sample_read_value {
  101. u64 value;
  102. u64 id;
  103. };
  104. struct sample_read {
  105. u64 time_enabled;
  106. u64 time_running;
  107. union {
  108. struct {
  109. u64 nr;
  110. struct sample_read_value *values;
  111. } group;
  112. struct sample_read_value one;
  113. };
  114. };
  115. struct ip_callchain {
  116. u64 nr;
  117. u64 ips[0];
  118. };
  119. struct branch_flags {
  120. u64 mispred:1;
  121. u64 predicted:1;
  122. u64 in_tx:1;
  123. u64 abort:1;
  124. u64 cycles:16;
  125. u64 type:4;
  126. u64 reserved:40;
  127. };
  128. struct branch_entry {
  129. u64 from;
  130. u64 to;
  131. struct branch_flags flags;
  132. };
  133. struct branch_stack {
  134. u64 nr;
  135. struct branch_entry entries[0];
  136. };
  137. enum {
  138. PERF_IP_FLAG_BRANCH = 1ULL << 0,
  139. PERF_IP_FLAG_CALL = 1ULL << 1,
  140. PERF_IP_FLAG_RETURN = 1ULL << 2,
  141. PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
  142. PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
  143. PERF_IP_FLAG_ASYNC = 1ULL << 5,
  144. PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
  145. PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
  146. PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
  147. PERF_IP_FLAG_TRACE_END = 1ULL << 9,
  148. PERF_IP_FLAG_IN_TX = 1ULL << 10,
  149. };
  150. #define PERF_IP_FLAG_CHARS "bcrosyiABEx"
  151. #define PERF_BRANCH_MASK (\
  152. PERF_IP_FLAG_BRANCH |\
  153. PERF_IP_FLAG_CALL |\
  154. PERF_IP_FLAG_RETURN |\
  155. PERF_IP_FLAG_CONDITIONAL |\
  156. PERF_IP_FLAG_SYSCALLRET |\
  157. PERF_IP_FLAG_ASYNC |\
  158. PERF_IP_FLAG_INTERRUPT |\
  159. PERF_IP_FLAG_TX_ABORT |\
  160. PERF_IP_FLAG_TRACE_BEGIN |\
  161. PERF_IP_FLAG_TRACE_END)
  162. #define MAX_INSN 16
  163. struct perf_sample {
  164. u64 ip;
  165. u32 pid, tid;
  166. u64 time;
  167. u64 addr;
  168. u64 id;
  169. u64 stream_id;
  170. u64 period;
  171. u64 weight;
  172. u64 transaction;
  173. u32 cpu;
  174. u32 raw_size;
  175. u64 data_src;
  176. u64 phys_addr;
  177. u32 flags;
  178. u16 insn_len;
  179. u8 cpumode;
  180. u16 misc;
  181. char insn[MAX_INSN];
  182. void *raw_data;
  183. struct ip_callchain *callchain;
  184. struct branch_stack *branch_stack;
  185. struct regs_dump user_regs;
  186. struct regs_dump intr_regs;
  187. struct stack_dump user_stack;
  188. struct sample_read read;
  189. };
  190. #define PERF_MEM_DATA_SRC_NONE \
  191. (PERF_MEM_S(OP, NA) |\
  192. PERF_MEM_S(LVL, NA) |\
  193. PERF_MEM_S(SNOOP, NA) |\
  194. PERF_MEM_S(LOCK, NA) |\
  195. PERF_MEM_S(TLB, NA))
  196. struct build_id_event {
  197. struct perf_event_header header;
  198. pid_t pid;
  199. u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  200. char filename[];
  201. };
  202. enum perf_user_event_type { /* above any possible kernel type */
  203. PERF_RECORD_USER_TYPE_START = 64,
  204. PERF_RECORD_HEADER_ATTR = 64,
  205. PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */
  206. PERF_RECORD_HEADER_TRACING_DATA = 66,
  207. PERF_RECORD_HEADER_BUILD_ID = 67,
  208. PERF_RECORD_FINISHED_ROUND = 68,
  209. PERF_RECORD_ID_INDEX = 69,
  210. PERF_RECORD_AUXTRACE_INFO = 70,
  211. PERF_RECORD_AUXTRACE = 71,
  212. PERF_RECORD_AUXTRACE_ERROR = 72,
  213. PERF_RECORD_THREAD_MAP = 73,
  214. PERF_RECORD_CPU_MAP = 74,
  215. PERF_RECORD_STAT_CONFIG = 75,
  216. PERF_RECORD_STAT = 76,
  217. PERF_RECORD_STAT_ROUND = 77,
  218. PERF_RECORD_EVENT_UPDATE = 78,
  219. PERF_RECORD_TIME_CONV = 79,
  220. PERF_RECORD_HEADER_FEATURE = 80,
  221. PERF_RECORD_HEADER_MAX
  222. };
  223. enum auxtrace_error_type {
  224. PERF_AUXTRACE_ERROR_ITRACE = 1,
  225. PERF_AUXTRACE_ERROR_MAX
  226. };
  227. /* Attribute type for custom synthesized events */
  228. #define PERF_TYPE_SYNTH (INT_MAX + 1U)
  229. /* Attribute config for custom synthesized events */
  230. enum perf_synth_id {
  231. PERF_SYNTH_INTEL_PTWRITE,
  232. PERF_SYNTH_INTEL_MWAIT,
  233. PERF_SYNTH_INTEL_PWRE,
  234. PERF_SYNTH_INTEL_EXSTOP,
  235. PERF_SYNTH_INTEL_PWRX,
  236. PERF_SYNTH_INTEL_CBR,
  237. };
  238. /*
  239. * Raw data formats for synthesized events. Note that 4 bytes of padding are
  240. * present to match the 'size' member of PERF_SAMPLE_RAW data which is always
  241. * 8-byte aligned. That means we must dereference raw_data with an offset of 4.
  242. * Refer perf_sample__synth_ptr() and perf_synth__raw_data(). It also means the
  243. * structure sizes are 4 bytes bigger than the raw_size, refer
  244. * perf_synth__raw_size().
  245. */
  246. struct perf_synth_intel_ptwrite {
  247. u32 padding;
  248. union {
  249. struct {
  250. u32 ip : 1,
  251. reserved : 31;
  252. };
  253. u32 flags;
  254. };
  255. u64 payload;
  256. };
  257. struct perf_synth_intel_mwait {
  258. u32 padding;
  259. u32 reserved;
  260. union {
  261. struct {
  262. u64 hints : 8,
  263. reserved1 : 24,
  264. extensions : 2,
  265. reserved2 : 30;
  266. };
  267. u64 payload;
  268. };
  269. };
  270. struct perf_synth_intel_pwre {
  271. u32 padding;
  272. u32 reserved;
  273. union {
  274. struct {
  275. u64 reserved1 : 7,
  276. hw : 1,
  277. subcstate : 4,
  278. cstate : 4,
  279. reserved2 : 48;
  280. };
  281. u64 payload;
  282. };
  283. };
  284. struct perf_synth_intel_exstop {
  285. u32 padding;
  286. union {
  287. struct {
  288. u32 ip : 1,
  289. reserved : 31;
  290. };
  291. u32 flags;
  292. };
  293. };
  294. struct perf_synth_intel_pwrx {
  295. u32 padding;
  296. u32 reserved;
  297. union {
  298. struct {
  299. u64 deepest_cstate : 4,
  300. last_cstate : 4,
  301. wake_reason : 4,
  302. reserved1 : 52;
  303. };
  304. u64 payload;
  305. };
  306. };
  307. struct perf_synth_intel_cbr {
  308. u32 padding;
  309. union {
  310. struct {
  311. u32 cbr : 8,
  312. reserved1 : 8,
  313. max_nonturbo : 8,
  314. reserved2 : 8;
  315. };
  316. u32 flags;
  317. };
  318. u32 freq;
  319. u32 reserved3;
  320. };
  321. /*
  322. * raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
  323. * 8-byte alignment.
  324. */
  325. static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
  326. {
  327. return sample->raw_data - 4;
  328. }
  329. static inline void *perf_synth__raw_data(void *p)
  330. {
  331. return p + 4;
  332. }
  333. #define perf_synth__raw_size(d) (sizeof(d) - 4)
  334. #define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
  335. /*
  336. * The kernel collects the number of events it couldn't send in a stretch and
  337. * when possible sends this number in a PERF_RECORD_LOST event. The number of
  338. * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
  339. * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
  340. * the sum of all struct lost_event.lost fields reported.
  341. *
  342. * The kernel discards mixed up samples and sends the number in a
  343. * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
  344. * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
  345. * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
  346. * all struct lost_samples_event.lost fields reported.
  347. *
  348. * The total_period is needed because by default auto-freq is used, so
  349. * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
  350. * the total number of low level events, it is necessary to to sum all struct
  351. * sample_event.period and stash the result in total_period.
  352. */
  353. struct events_stats {
  354. u64 total_period;
  355. u64 total_non_filtered_period;
  356. u64 total_lost;
  357. u64 total_lost_samples;
  358. u64 total_aux_lost;
  359. u64 total_aux_partial;
  360. u64 total_invalid_chains;
  361. u32 nr_events[PERF_RECORD_HEADER_MAX];
  362. u32 nr_non_filtered_samples;
  363. u32 nr_lost_warned;
  364. u32 nr_unknown_events;
  365. u32 nr_invalid_chains;
  366. u32 nr_unknown_id;
  367. u32 nr_unprocessable_samples;
  368. u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
  369. u32 nr_proc_map_timeout;
  370. };
  371. enum {
  372. PERF_CPU_MAP__CPUS = 0,
  373. PERF_CPU_MAP__MASK = 1,
  374. };
  375. struct cpu_map_entries {
  376. u16 nr;
  377. u16 cpu[];
  378. };
  379. struct cpu_map_mask {
  380. u16 nr;
  381. u16 long_size;
  382. unsigned long mask[];
  383. };
  384. struct cpu_map_data {
  385. u16 type;
  386. char data[];
  387. };
  388. struct cpu_map_event {
  389. struct perf_event_header header;
  390. struct cpu_map_data data;
  391. };
  392. struct attr_event {
  393. struct perf_event_header header;
  394. struct perf_event_attr attr;
  395. u64 id[];
  396. };
  397. enum {
  398. PERF_EVENT_UPDATE__UNIT = 0,
  399. PERF_EVENT_UPDATE__SCALE = 1,
  400. PERF_EVENT_UPDATE__NAME = 2,
  401. PERF_EVENT_UPDATE__CPUS = 3,
  402. };
  403. struct event_update_event_cpus {
  404. struct cpu_map_data cpus;
  405. };
  406. struct event_update_event_scale {
  407. double scale;
  408. };
  409. struct event_update_event {
  410. struct perf_event_header header;
  411. u64 type;
  412. u64 id;
  413. char data[];
  414. };
  415. #define MAX_EVENT_NAME 64
  416. struct perf_trace_event_type {
  417. u64 event_id;
  418. char name[MAX_EVENT_NAME];
  419. };
  420. struct event_type_event {
  421. struct perf_event_header header;
  422. struct perf_trace_event_type event_type;
  423. };
  424. struct tracing_data_event {
  425. struct perf_event_header header;
  426. u32 size;
  427. };
  428. struct id_index_entry {
  429. u64 id;
  430. u64 idx;
  431. u64 cpu;
  432. u64 tid;
  433. };
  434. struct id_index_event {
  435. struct perf_event_header header;
  436. u64 nr;
  437. struct id_index_entry entries[0];
  438. };
  439. struct auxtrace_info_event {
  440. struct perf_event_header header;
  441. u32 type;
  442. u32 reserved__; /* For alignment */
  443. u64 priv[];
  444. };
  445. struct auxtrace_event {
  446. struct perf_event_header header;
  447. u64 size;
  448. u64 offset;
  449. u64 reference;
  450. u32 idx;
  451. u32 tid;
  452. u32 cpu;
  453. u32 reserved__; /* For alignment */
  454. };
  455. #define MAX_AUXTRACE_ERROR_MSG 64
  456. struct auxtrace_error_event {
  457. struct perf_event_header header;
  458. u32 type;
  459. u32 code;
  460. u32 cpu;
  461. u32 pid;
  462. u32 tid;
  463. u32 reserved__; /* For alignment */
  464. u64 ip;
  465. char msg[MAX_AUXTRACE_ERROR_MSG];
  466. };
  467. struct aux_event {
  468. struct perf_event_header header;
  469. u64 aux_offset;
  470. u64 aux_size;
  471. u64 flags;
  472. };
  473. struct itrace_start_event {
  474. struct perf_event_header header;
  475. u32 pid, tid;
  476. };
  477. struct context_switch_event {
  478. struct perf_event_header header;
  479. u32 next_prev_pid;
  480. u32 next_prev_tid;
  481. };
  482. struct thread_map_event_entry {
  483. u64 pid;
  484. char comm[16];
  485. };
  486. struct thread_map_event {
  487. struct perf_event_header header;
  488. u64 nr;
  489. struct thread_map_event_entry entries[];
  490. };
  491. enum {
  492. PERF_STAT_CONFIG_TERM__AGGR_MODE = 0,
  493. PERF_STAT_CONFIG_TERM__INTERVAL = 1,
  494. PERF_STAT_CONFIG_TERM__SCALE = 2,
  495. PERF_STAT_CONFIG_TERM__MAX = 3,
  496. };
  497. struct stat_config_event_entry {
  498. u64 tag;
  499. u64 val;
  500. };
  501. struct stat_config_event {
  502. struct perf_event_header header;
  503. u64 nr;
  504. struct stat_config_event_entry data[];
  505. };
  506. struct stat_event {
  507. struct perf_event_header header;
  508. u64 id;
  509. u32 cpu;
  510. u32 thread;
  511. union {
  512. struct {
  513. u64 val;
  514. u64 ena;
  515. u64 run;
  516. };
  517. u64 values[3];
  518. };
  519. };
  520. enum {
  521. PERF_STAT_ROUND_TYPE__INTERVAL = 0,
  522. PERF_STAT_ROUND_TYPE__FINAL = 1,
  523. };
  524. struct stat_round_event {
  525. struct perf_event_header header;
  526. u64 type;
  527. u64 time;
  528. };
  529. struct time_conv_event {
  530. struct perf_event_header header;
  531. u64 time_shift;
  532. u64 time_mult;
  533. u64 time_zero;
  534. };
  535. struct feature_event {
  536. struct perf_event_header header;
  537. u64 feat_id;
  538. char data[];
  539. };
  540. union perf_event {
  541. struct perf_event_header header;
  542. struct mmap_event mmap;
  543. struct mmap2_event mmap2;
  544. struct comm_event comm;
  545. struct namespaces_event namespaces;
  546. struct fork_event fork;
  547. struct lost_event lost;
  548. struct lost_samples_event lost_samples;
  549. struct read_event read;
  550. struct throttle_event throttle;
  551. struct sample_event sample;
  552. struct attr_event attr;
  553. struct event_update_event event_update;
  554. struct event_type_event event_type;
  555. struct tracing_data_event tracing_data;
  556. struct build_id_event build_id;
  557. struct id_index_event id_index;
  558. struct auxtrace_info_event auxtrace_info;
  559. struct auxtrace_event auxtrace;
  560. struct auxtrace_error_event auxtrace_error;
  561. struct aux_event aux;
  562. struct itrace_start_event itrace_start;
  563. struct context_switch_event context_switch;
  564. struct thread_map_event thread_map;
  565. struct cpu_map_event cpu_map;
  566. struct stat_config_event stat_config;
  567. struct stat_event stat;
  568. struct stat_round_event stat_round;
  569. struct time_conv_event time_conv;
  570. struct feature_event feat;
  571. };
  572. void perf_event__print_totals(void);
  573. struct perf_tool;
  574. struct thread_map;
  575. struct cpu_map;
  576. struct perf_stat_config;
  577. struct perf_counts_values;
  578. typedef int (*perf_event__handler_t)(struct perf_tool *tool,
  579. union perf_event *event,
  580. struct perf_sample *sample,
  581. struct machine *machine);
  582. int perf_event__synthesize_thread_map(struct perf_tool *tool,
  583. struct thread_map *threads,
  584. perf_event__handler_t process,
  585. struct machine *machine, bool mmap_data,
  586. unsigned int proc_map_timeout);
  587. int perf_event__synthesize_thread_map2(struct perf_tool *tool,
  588. struct thread_map *threads,
  589. perf_event__handler_t process,
  590. struct machine *machine);
  591. int perf_event__synthesize_cpu_map(struct perf_tool *tool,
  592. struct cpu_map *cpus,
  593. perf_event__handler_t process,
  594. struct machine *machine);
  595. int perf_event__synthesize_threads(struct perf_tool *tool,
  596. perf_event__handler_t process,
  597. struct machine *machine, bool mmap_data,
  598. unsigned int proc_map_timeout,
  599. unsigned int nr_threads_synthesize);
  600. int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  601. perf_event__handler_t process,
  602. struct machine *machine);
  603. int perf_event__synthesize_stat_config(struct perf_tool *tool,
  604. struct perf_stat_config *config,
  605. perf_event__handler_t process,
  606. struct machine *machine);
  607. void perf_event__read_stat_config(struct perf_stat_config *config,
  608. struct stat_config_event *event);
  609. int perf_event__synthesize_stat(struct perf_tool *tool,
  610. u32 cpu, u32 thread, u64 id,
  611. struct perf_counts_values *count,
  612. perf_event__handler_t process,
  613. struct machine *machine);
  614. int perf_event__synthesize_stat_round(struct perf_tool *tool,
  615. u64 time, u64 type,
  616. perf_event__handler_t process,
  617. struct machine *machine);
  618. int perf_event__synthesize_modules(struct perf_tool *tool,
  619. perf_event__handler_t process,
  620. struct machine *machine);
  621. int perf_event__process_comm(struct perf_tool *tool,
  622. union perf_event *event,
  623. struct perf_sample *sample,
  624. struct machine *machine);
  625. int perf_event__process_lost(struct perf_tool *tool,
  626. union perf_event *event,
  627. struct perf_sample *sample,
  628. struct machine *machine);
  629. int perf_event__process_lost_samples(struct perf_tool *tool,
  630. union perf_event *event,
  631. struct perf_sample *sample,
  632. struct machine *machine);
  633. int perf_event__process_aux(struct perf_tool *tool,
  634. union perf_event *event,
  635. struct perf_sample *sample,
  636. struct machine *machine);
  637. int perf_event__process_itrace_start(struct perf_tool *tool,
  638. union perf_event *event,
  639. struct perf_sample *sample,
  640. struct machine *machine);
  641. int perf_event__process_switch(struct perf_tool *tool,
  642. union perf_event *event,
  643. struct perf_sample *sample,
  644. struct machine *machine);
  645. int perf_event__process_namespaces(struct perf_tool *tool,
  646. union perf_event *event,
  647. struct perf_sample *sample,
  648. struct machine *machine);
  649. int perf_event__process_mmap(struct perf_tool *tool,
  650. union perf_event *event,
  651. struct perf_sample *sample,
  652. struct machine *machine);
  653. int perf_event__process_mmap2(struct perf_tool *tool,
  654. union perf_event *event,
  655. struct perf_sample *sample,
  656. struct machine *machine);
  657. int perf_event__process_fork(struct perf_tool *tool,
  658. union perf_event *event,
  659. struct perf_sample *sample,
  660. struct machine *machine);
  661. int perf_event__process_exit(struct perf_tool *tool,
  662. union perf_event *event,
  663. struct perf_sample *sample,
  664. struct machine *machine);
  665. int perf_tool__process_synth_event(struct perf_tool *tool,
  666. union perf_event *event,
  667. struct machine *machine,
  668. perf_event__handler_t process);
  669. int perf_event__process(struct perf_tool *tool,
  670. union perf_event *event,
  671. struct perf_sample *sample,
  672. struct machine *machine);
  673. struct addr_location;
  674. int machine__resolve(struct machine *machine, struct addr_location *al,
  675. struct perf_sample *sample);
  676. void addr_location__put(struct addr_location *al);
  677. struct thread;
  678. bool is_bts_event(struct perf_event_attr *attr);
  679. bool sample_addr_correlates_sym(struct perf_event_attr *attr);
  680. void thread__resolve(struct thread *thread, struct addr_location *al,
  681. struct perf_sample *sample);
  682. const char *perf_event__name(unsigned int id);
  683. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
  684. u64 read_format);
  685. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  686. u64 read_format,
  687. const struct perf_sample *sample);
  688. pid_t perf_event__synthesize_comm(struct perf_tool *tool,
  689. union perf_event *event, pid_t pid,
  690. perf_event__handler_t process,
  691. struct machine *machine);
  692. int perf_event__synthesize_namespaces(struct perf_tool *tool,
  693. union perf_event *event,
  694. pid_t pid, pid_t tgid,
  695. perf_event__handler_t process,
  696. struct machine *machine);
  697. int perf_event__synthesize_mmap_events(struct perf_tool *tool,
  698. union perf_event *event,
  699. pid_t pid, pid_t tgid,
  700. perf_event__handler_t process,
  701. struct machine *machine,
  702. bool mmap_data,
  703. unsigned int proc_map_timeout);
  704. int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
  705. perf_event__handler_t process,
  706. struct machine *machine);
  707. size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
  708. size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
  709. size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
  710. size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
  711. size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
  712. size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
  713. size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
  714. size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
  715. size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
  716. size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
  717. size_t perf_event__fprintf(union perf_event *event, FILE *fp);
  718. int kallsyms__get_function_start(const char *kallsyms_filename,
  719. const char *symbol_name, u64 *addr);
  720. void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
  721. void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
  722. u16 type, int max);
  723. void event_attr_init(struct perf_event_attr *attr);
  724. int perf_event_paranoid(void);
  725. extern int sysctl_perf_event_max_stack;
  726. extern int sysctl_perf_event_max_contexts_per_stack;
  727. #endif /* __PERF_RECORD_H */