builtin-inject.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * builtin-inject.c
  4. *
  5. * Builtin inject command: Examine the live mode (stdin) event stream
  6. * and repipe it to stdout while optionally injecting additional
  7. * events into it.
  8. */
  9. #include "builtin.h"
  10. #include "util/color.h"
  11. #include "util/dso.h"
  12. #include "util/vdso.h"
  13. #include "util/evlist.h"
  14. #include "util/evsel.h"
  15. #include "util/map.h"
  16. #include "util/session.h"
  17. #include "util/tool.h"
  18. #include "util/debug.h"
  19. #include "util/build-id.h"
  20. #include "util/data.h"
  21. #include "util/auxtrace.h"
  22. #include "util/jit.h"
  23. #include "util/string2.h"
  24. #include "util/symbol.h"
  25. #include "util/synthetic-events.h"
  26. #include "util/thread.h"
  27. #include "util/namespaces.h"
  28. #include "util/util.h"
  29. #include "util/tsc.h"
  30. #include <internal/lib.h>
  31. #include <linux/err.h>
  32. #include <subcmd/parse-options.h>
  33. #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  34. #include <linux/list.h>
  35. #include <linux/string.h>
  36. #include <linux/zalloc.h>
  37. #include <linux/hash.h>
  38. #include <ctype.h>
  39. #include <errno.h>
  40. #include <signal.h>
  41. #include <inttypes.h>
  42. struct guest_event {
  43. struct perf_sample sample;
  44. union perf_event *event;
  45. char *event_buf;
  46. };
  47. struct guest_id {
  48. /* hlist_node must be first, see free_hlist() */
  49. struct hlist_node node;
  50. u64 id;
  51. u64 host_id;
  52. u32 vcpu;
  53. };
  54. struct guest_tid {
  55. /* hlist_node must be first, see free_hlist() */
  56. struct hlist_node node;
  57. /* Thread ID of QEMU thread */
  58. u32 tid;
  59. u32 vcpu;
  60. };
  61. struct guest_vcpu {
  62. /* Current host CPU */
  63. u32 cpu;
  64. /* Thread ID of QEMU thread */
  65. u32 tid;
  66. };
  67. struct guest_session {
  68. char *perf_data_file;
  69. u32 machine_pid;
  70. u64 time_offset;
  71. double time_scale;
  72. struct perf_tool tool;
  73. struct perf_data data;
  74. struct perf_session *session;
  75. char *tmp_file_name;
  76. int tmp_fd;
  77. struct perf_tsc_conversion host_tc;
  78. struct perf_tsc_conversion guest_tc;
  79. bool copy_kcore_dir;
  80. bool have_tc;
  81. bool fetched;
  82. bool ready;
  83. u16 dflt_id_hdr_size;
  84. u64 dflt_id;
  85. u64 highest_id;
  86. /* Array of guest_vcpu */
  87. struct guest_vcpu *vcpu;
  88. size_t vcpu_cnt;
  89. /* Hash table for guest_id */
  90. struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
  91. /* Hash table for guest_tid */
  92. struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
  93. /* Place to stash next guest event */
  94. struct guest_event ev;
  95. };
  96. enum build_id_rewrite_style {
  97. BID_RWS__NONE = 0,
  98. BID_RWS__INJECT_HEADER_LAZY,
  99. BID_RWS__INJECT_HEADER_ALL,
  100. BID_RWS__MMAP2_BUILDID_ALL,
  101. BID_RWS__MMAP2_BUILDID_LAZY,
  102. };
  103. struct perf_inject {
  104. struct perf_tool tool;
  105. struct perf_session *session;
  106. enum build_id_rewrite_style build_id_style;
  107. bool sched_stat;
  108. bool have_auxtrace;
  109. bool strip;
  110. bool jit_mode;
  111. bool in_place_update;
  112. bool in_place_update_dry_run;
  113. bool copy_kcore_dir;
  114. const char *input_name;
  115. struct perf_data output;
  116. u64 bytes_written;
  117. u64 aux_id;
  118. struct list_head samples;
  119. struct itrace_synth_opts itrace_synth_opts;
  120. char *event_copy;
  121. struct perf_file_section secs[HEADER_FEAT_BITS];
  122. struct guest_session guest_session;
  123. struct strlist *known_build_ids;
  124. const struct evsel *mmap_evsel;
  125. };
  126. struct event_entry {
  127. struct list_head node;
  128. u32 tid;
  129. union perf_event event[];
  130. };
  131. static int tool__inject_build_id(const struct perf_tool *tool,
  132. struct perf_sample *sample,
  133. struct machine *machine,
  134. const struct evsel *evsel,
  135. __u16 misc,
  136. const char *filename,
  137. struct dso *dso, u32 flags);
  138. static int tool__inject_mmap2_build_id(const struct perf_tool *tool,
  139. struct perf_sample *sample,
  140. struct machine *machine,
  141. const struct evsel *evsel,
  142. __u16 misc,
  143. __u32 pid, __u32 tid,
  144. __u64 start, __u64 len, __u64 pgoff,
  145. struct dso *dso,
  146. __u32 prot, __u32 flags,
  147. const char *filename);
  148. static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
  149. {
  150. ssize_t size;
  151. size = perf_data__write(&inject->output, buf, sz);
  152. if (size < 0)
  153. return -errno;
  154. inject->bytes_written += size;
  155. return 0;
  156. }
  157. static int perf_event__repipe_synth(const struct perf_tool *tool,
  158. union perf_event *event)
  159. {
  160. struct perf_inject *inject = container_of(tool, struct perf_inject,
  161. tool);
  162. return output_bytes(inject, event, event->header.size);
  163. }
  164. static int perf_event__repipe_oe_synth(const struct perf_tool *tool,
  165. union perf_event *event,
  166. struct ordered_events *oe __maybe_unused)
  167. {
  168. return perf_event__repipe_synth(tool, event);
  169. }
  170. #ifdef HAVE_JITDUMP
  171. static int perf_event__drop_oe(const struct perf_tool *tool __maybe_unused,
  172. union perf_event *event __maybe_unused,
  173. struct ordered_events *oe __maybe_unused)
  174. {
  175. return 0;
  176. }
  177. #endif
  178. static int perf_event__repipe_op2_synth(struct perf_session *session,
  179. union perf_event *event)
  180. {
  181. return perf_event__repipe_synth(session->tool, event);
  182. }
  183. static int perf_event__repipe_op4_synth(struct perf_session *session,
  184. union perf_event *event,
  185. u64 data __maybe_unused,
  186. const char *str __maybe_unused)
  187. {
  188. return perf_event__repipe_synth(session->tool, event);
  189. }
  190. static int perf_event__repipe_attr(const struct perf_tool *tool,
  191. union perf_event *event,
  192. struct evlist **pevlist)
  193. {
  194. struct perf_inject *inject = container_of(tool, struct perf_inject,
  195. tool);
  196. int ret;
  197. ret = perf_event__process_attr(tool, event, pevlist);
  198. if (ret)
  199. return ret;
  200. /* If the output isn't a pipe then the attributes will be written as part of the header. */
  201. if (!inject->output.is_pipe)
  202. return 0;
  203. return perf_event__repipe_synth(tool, event);
  204. }
  205. static int perf_event__repipe_event_update(const struct perf_tool *tool,
  206. union perf_event *event,
  207. struct evlist **pevlist __maybe_unused)
  208. {
  209. return perf_event__repipe_synth(tool, event);
  210. }
  211. #ifdef HAVE_AUXTRACE_SUPPORT
  212. static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
  213. {
  214. char buf[4096];
  215. ssize_t ssz;
  216. int ret;
  217. while (size > 0) {
  218. ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
  219. if (ssz < 0)
  220. return -errno;
  221. ret = output_bytes(inject, buf, ssz);
  222. if (ret)
  223. return ret;
  224. size -= ssz;
  225. }
  226. return 0;
  227. }
  228. static s64 perf_event__repipe_auxtrace(struct perf_session *session,
  229. union perf_event *event)
  230. {
  231. const struct perf_tool *tool = session->tool;
  232. struct perf_inject *inject = container_of(tool, struct perf_inject,
  233. tool);
  234. int ret;
  235. inject->have_auxtrace = true;
  236. if (!inject->output.is_pipe) {
  237. off_t offset;
  238. offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
  239. if (offset == -1)
  240. return -errno;
  241. ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
  242. event, offset);
  243. if (ret < 0)
  244. return ret;
  245. }
  246. if (perf_data__is_pipe(session->data) || !session->one_mmap) {
  247. ret = output_bytes(inject, event, event->header.size);
  248. if (ret < 0)
  249. return ret;
  250. ret = copy_bytes(inject, session->data,
  251. event->auxtrace.size);
  252. } else {
  253. ret = output_bytes(inject, event,
  254. event->header.size + event->auxtrace.size);
  255. }
  256. if (ret < 0)
  257. return ret;
  258. return event->auxtrace.size;
  259. }
  260. #else
  261. static s64
  262. perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
  263. union perf_event *event __maybe_unused)
  264. {
  265. pr_err("AUX area tracing not supported\n");
  266. return -EINVAL;
  267. }
  268. #endif
  269. static int perf_event__repipe(const struct perf_tool *tool,
  270. union perf_event *event,
  271. struct perf_sample *sample __maybe_unused,
  272. struct machine *machine __maybe_unused)
  273. {
  274. return perf_event__repipe_synth(tool, event);
  275. }
  276. static int perf_event__drop(const struct perf_tool *tool __maybe_unused,
  277. union perf_event *event __maybe_unused,
  278. struct perf_sample *sample __maybe_unused,
  279. struct machine *machine __maybe_unused)
  280. {
  281. return 0;
  282. }
  283. static int perf_event__drop_aux(const struct perf_tool *tool,
  284. union perf_event *event __maybe_unused,
  285. struct perf_sample *sample,
  286. struct machine *machine __maybe_unused)
  287. {
  288. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  289. if (!inject->aux_id)
  290. inject->aux_id = sample->id;
  291. return 0;
  292. }
  293. static union perf_event *
  294. perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
  295. union perf_event *event,
  296. struct perf_sample *sample)
  297. {
  298. size_t sz1 = sample->aux_sample.data - (void *)event;
  299. size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
  300. union perf_event *ev;
  301. if (inject->event_copy == NULL) {
  302. inject->event_copy = malloc(PERF_SAMPLE_MAX_SIZE);
  303. if (!inject->event_copy)
  304. return ERR_PTR(-ENOMEM);
  305. }
  306. ev = (union perf_event *)inject->event_copy;
  307. if (sz1 > event->header.size || sz2 > event->header.size ||
  308. sz1 + sz2 > event->header.size ||
  309. sz1 < sizeof(struct perf_event_header) + sizeof(u64))
  310. return event;
  311. memcpy(ev, event, sz1);
  312. memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
  313. ev->header.size = sz1 + sz2;
  314. ((u64 *)((void *)ev + sz1))[-1] = 0;
  315. return ev;
  316. }
  317. typedef int (*inject_handler)(const struct perf_tool *tool,
  318. union perf_event *event,
  319. struct perf_sample *sample,
  320. struct evsel *evsel,
  321. struct machine *machine);
  322. static int perf_event__repipe_sample(const struct perf_tool *tool,
  323. union perf_event *event,
  324. struct perf_sample *sample,
  325. struct evsel *evsel,
  326. struct machine *machine)
  327. {
  328. struct perf_inject *inject = container_of(tool, struct perf_inject,
  329. tool);
  330. if (evsel && evsel->handler) {
  331. inject_handler f = evsel->handler;
  332. return f(tool, event, sample, evsel, machine);
  333. }
  334. build_id__mark_dso_hit(tool, event, sample, evsel, machine);
  335. if (inject->itrace_synth_opts.set && sample->aux_sample.size) {
  336. event = perf_inject__cut_auxtrace_sample(inject, event, sample);
  337. if (IS_ERR(event))
  338. return PTR_ERR(event);
  339. }
  340. return perf_event__repipe_synth(tool, event);
  341. }
  342. static struct dso *findnew_dso(int pid, int tid, const char *filename,
  343. const struct dso_id *id, struct machine *machine)
  344. {
  345. struct thread *thread;
  346. struct nsinfo *nsi = NULL;
  347. struct nsinfo *nnsi;
  348. struct dso *dso;
  349. bool vdso;
  350. thread = machine__findnew_thread(machine, pid, tid);
  351. if (thread == NULL) {
  352. pr_err("cannot find or create a task %d/%d.\n", tid, pid);
  353. return NULL;
  354. }
  355. vdso = is_vdso_map(filename);
  356. nsi = nsinfo__get(thread__nsinfo(thread));
  357. if (vdso) {
  358. /* The vdso maps are always on the host and not the
  359. * container. Ensure that we don't use setns to look
  360. * them up.
  361. */
  362. nnsi = nsinfo__copy(nsi);
  363. if (nnsi) {
  364. nsinfo__put(nsi);
  365. nsinfo__clear_need_setns(nnsi);
  366. nsi = nnsi;
  367. }
  368. dso = machine__findnew_vdso(machine, thread);
  369. } else {
  370. dso = machine__findnew_dso_id(machine, filename, id);
  371. }
  372. if (dso) {
  373. mutex_lock(dso__lock(dso));
  374. dso__set_nsinfo(dso, nsi);
  375. mutex_unlock(dso__lock(dso));
  376. } else
  377. nsinfo__put(nsi);
  378. thread__put(thread);
  379. return dso;
  380. }
  381. /*
  382. * The evsel used for the sample ID for mmap events. Typically stashed when
  383. * processing mmap events. If not stashed, search the evlist for the first mmap
  384. * gathering event.
  385. */
  386. static const struct evsel *inject__mmap_evsel(struct perf_inject *inject)
  387. {
  388. struct evsel *pos;
  389. if (inject->mmap_evsel)
  390. return inject->mmap_evsel;
  391. evlist__for_each_entry(inject->session->evlist, pos) {
  392. if (pos->core.attr.mmap) {
  393. inject->mmap_evsel = pos;
  394. return pos;
  395. }
  396. }
  397. pr_err("No mmap events found\n");
  398. return NULL;
  399. }
  400. static int perf_event__repipe_common_mmap(const struct perf_tool *tool,
  401. union perf_event *event,
  402. struct perf_sample *sample,
  403. struct machine *machine,
  404. __u32 pid, __u32 tid,
  405. __u64 start, __u64 len, __u64 pgoff,
  406. __u32 flags, __u32 prot,
  407. const char *filename,
  408. const struct dso_id *dso_id,
  409. int (*perf_event_process)(const struct perf_tool *tool,
  410. union perf_event *event,
  411. struct perf_sample *sample,
  412. struct machine *machine))
  413. {
  414. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  415. struct dso *dso = NULL;
  416. bool dso_sought = false;
  417. #ifdef HAVE_JITDUMP
  418. if (inject->jit_mode) {
  419. u64 n = 0;
  420. int ret;
  421. /* If jit marker, then inject jit mmaps and generate ELF images. */
  422. ret = jit_process(inject->session, &inject->output, machine,
  423. filename, pid, tid, &n);
  424. if (ret < 0)
  425. return ret;
  426. if (ret) {
  427. inject->bytes_written += n;
  428. return 0;
  429. }
  430. }
  431. #endif
  432. if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
  433. dso = findnew_dso(pid, tid, filename, dso_id, machine);
  434. dso_sought = true;
  435. if (dso) {
  436. /* mark it not to inject build-id */
  437. dso__set_hit(dso);
  438. }
  439. }
  440. if (inject->build_id_style == BID_RWS__INJECT_HEADER_ALL) {
  441. if (!dso_sought) {
  442. dso = findnew_dso(pid, tid, filename, dso_id, machine);
  443. dso_sought = true;
  444. }
  445. if (dso && !dso__hit(dso)) {
  446. struct evsel *evsel = evlist__event2evsel(inject->session->evlist, event);
  447. if (evsel) {
  448. dso__set_hit(dso);
  449. tool__inject_build_id(tool, sample, machine, evsel,
  450. /*misc=*/sample->cpumode,
  451. filename, dso, flags);
  452. }
  453. }
  454. } else {
  455. int err;
  456. /*
  457. * Remember the evsel for lazy build id generation. It is used
  458. * for the sample id header type.
  459. */
  460. if ((inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
  461. inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) &&
  462. !inject->mmap_evsel)
  463. inject->mmap_evsel = evlist__event2evsel(inject->session->evlist, event);
  464. /* Create the thread, map, etc. Not done for the unordered inject all case. */
  465. err = perf_event_process(tool, event, sample, machine);
  466. if (err) {
  467. dso__put(dso);
  468. return err;
  469. }
  470. }
  471. if ((inject->build_id_style == BID_RWS__MMAP2_BUILDID_ALL) &&
  472. !(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
  473. struct evsel *evsel = evlist__event2evsel(inject->session->evlist, event);
  474. if (evsel && !dso_sought) {
  475. dso = findnew_dso(pid, tid, filename, dso_id, machine);
  476. dso_sought = true;
  477. }
  478. if (evsel && dso &&
  479. !tool__inject_mmap2_build_id(tool, sample, machine, evsel,
  480. sample->cpumode | PERF_RECORD_MISC_MMAP_BUILD_ID,
  481. pid, tid, start, len, pgoff,
  482. dso,
  483. prot, flags,
  484. filename)) {
  485. /* Injected mmap2 so no need to repipe. */
  486. dso__put(dso);
  487. return 0;
  488. }
  489. }
  490. dso__put(dso);
  491. if (inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY)
  492. return 0;
  493. return perf_event__repipe(tool, event, sample, machine);
  494. }
  495. static int perf_event__repipe_mmap(const struct perf_tool *tool,
  496. union perf_event *event,
  497. struct perf_sample *sample,
  498. struct machine *machine)
  499. {
  500. return perf_event__repipe_common_mmap(
  501. tool, event, sample, machine,
  502. event->mmap.pid, event->mmap.tid,
  503. event->mmap.start, event->mmap.len, event->mmap.pgoff,
  504. /*flags=*/0, PROT_EXEC,
  505. event->mmap.filename, /*dso_id=*/NULL,
  506. perf_event__process_mmap);
  507. }
  508. static int perf_event__repipe_mmap2(const struct perf_tool *tool,
  509. union perf_event *event,
  510. struct perf_sample *sample,
  511. struct machine *machine)
  512. {
  513. struct dso_id id;
  514. struct dso_id *dso_id = NULL;
  515. if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
  516. id.maj = event->mmap2.maj;
  517. id.min = event->mmap2.min;
  518. id.ino = event->mmap2.ino;
  519. id.ino_generation = event->mmap2.ino_generation;
  520. dso_id = &id;
  521. }
  522. return perf_event__repipe_common_mmap(
  523. tool, event, sample, machine,
  524. event->mmap2.pid, event->mmap2.tid,
  525. event->mmap2.start, event->mmap2.len, event->mmap2.pgoff,
  526. event->mmap2.flags, event->mmap2.prot,
  527. event->mmap2.filename, dso_id,
  528. perf_event__process_mmap2);
  529. }
  530. static int perf_event__repipe_fork(const struct perf_tool *tool,
  531. union perf_event *event,
  532. struct perf_sample *sample,
  533. struct machine *machine)
  534. {
  535. int err;
  536. err = perf_event__process_fork(tool, event, sample, machine);
  537. perf_event__repipe(tool, event, sample, machine);
  538. return err;
  539. }
  540. static int perf_event__repipe_comm(const struct perf_tool *tool,
  541. union perf_event *event,
  542. struct perf_sample *sample,
  543. struct machine *machine)
  544. {
  545. int err;
  546. err = perf_event__process_comm(tool, event, sample, machine);
  547. perf_event__repipe(tool, event, sample, machine);
  548. return err;
  549. }
  550. static int perf_event__repipe_namespaces(const struct perf_tool *tool,
  551. union perf_event *event,
  552. struct perf_sample *sample,
  553. struct machine *machine)
  554. {
  555. int err = perf_event__process_namespaces(tool, event, sample, machine);
  556. perf_event__repipe(tool, event, sample, machine);
  557. return err;
  558. }
  559. static int perf_event__repipe_exit(const struct perf_tool *tool,
  560. union perf_event *event,
  561. struct perf_sample *sample,
  562. struct machine *machine)
  563. {
  564. int err;
  565. err = perf_event__process_exit(tool, event, sample, machine);
  566. perf_event__repipe(tool, event, sample, machine);
  567. return err;
  568. }
  569. #ifdef HAVE_LIBTRACEEVENT
  570. static int perf_event__repipe_tracing_data(struct perf_session *session,
  571. union perf_event *event)
  572. {
  573. perf_event__repipe_synth(session->tool, event);
  574. return perf_event__process_tracing_data(session, event);
  575. }
  576. #endif
  577. static int dso__read_build_id(struct dso *dso)
  578. {
  579. struct nscookie nsc;
  580. if (dso__has_build_id(dso))
  581. return 0;
  582. mutex_lock(dso__lock(dso));
  583. nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
  584. if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0)
  585. dso__set_has_build_id(dso);
  586. else if (dso__nsinfo(dso)) {
  587. char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
  588. if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0)
  589. dso__set_has_build_id(dso);
  590. free(new_name);
  591. }
  592. nsinfo__mountns_exit(&nsc);
  593. mutex_unlock(dso__lock(dso));
  594. return dso__has_build_id(dso) ? 0 : -1;
  595. }
  596. static struct strlist *perf_inject__parse_known_build_ids(
  597. const char *known_build_ids_string)
  598. {
  599. struct str_node *pos, *tmp;
  600. struct strlist *known_build_ids;
  601. int bid_len;
  602. known_build_ids = strlist__new(known_build_ids_string, NULL);
  603. if (known_build_ids == NULL)
  604. return NULL;
  605. strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
  606. const char *build_id, *dso_name;
  607. build_id = skip_spaces(pos->s);
  608. dso_name = strchr(build_id, ' ');
  609. if (dso_name == NULL) {
  610. strlist__remove(known_build_ids, pos);
  611. continue;
  612. }
  613. bid_len = dso_name - pos->s;
  614. dso_name = skip_spaces(dso_name);
  615. if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
  616. strlist__remove(known_build_ids, pos);
  617. continue;
  618. }
  619. for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
  620. if (!isxdigit(build_id[2 * ix]) ||
  621. !isxdigit(build_id[2 * ix + 1])) {
  622. strlist__remove(known_build_ids, pos);
  623. break;
  624. }
  625. }
  626. }
  627. return known_build_ids;
  628. }
  629. static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
  630. struct dso *dso)
  631. {
  632. struct str_node *pos;
  633. int bid_len;
  634. strlist__for_each_entry(pos, inject->known_build_ids) {
  635. const char *build_id, *dso_name;
  636. build_id = skip_spaces(pos->s);
  637. dso_name = strchr(build_id, ' ');
  638. bid_len = dso_name - pos->s;
  639. dso_name = skip_spaces(dso_name);
  640. if (strcmp(dso__long_name(dso), dso_name))
  641. continue;
  642. for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
  643. dso__bid(dso)->data[ix] = (hex(build_id[2 * ix]) << 4 |
  644. hex(build_id[2 * ix + 1]));
  645. }
  646. dso__bid(dso)->size = bid_len / 2;
  647. dso__set_has_build_id(dso);
  648. return true;
  649. }
  650. return false;
  651. }
  652. static int tool__inject_build_id(const struct perf_tool *tool,
  653. struct perf_sample *sample,
  654. struct machine *machine,
  655. const struct evsel *evsel,
  656. __u16 misc,
  657. const char *filename,
  658. struct dso *dso, u32 flags)
  659. {
  660. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  661. int err;
  662. if (is_anon_memory(filename) || flags & MAP_HUGETLB)
  663. return 0;
  664. if (is_no_dso_memory(filename))
  665. return 0;
  666. if (inject->known_build_ids != NULL &&
  667. perf_inject__lookup_known_build_id(inject, dso))
  668. return 1;
  669. if (dso__read_build_id(dso) < 0) {
  670. pr_debug("no build_id found for %s\n", filename);
  671. return -1;
  672. }
  673. err = perf_event__synthesize_build_id(tool, sample, machine,
  674. perf_event__repipe,
  675. evsel, misc, dso__bid(dso),
  676. filename);
  677. if (err) {
  678. pr_err("Can't synthesize build_id event for %s\n", filename);
  679. return -1;
  680. }
  681. return 0;
  682. }
  683. static int tool__inject_mmap2_build_id(const struct perf_tool *tool,
  684. struct perf_sample *sample,
  685. struct machine *machine,
  686. const struct evsel *evsel,
  687. __u16 misc,
  688. __u32 pid, __u32 tid,
  689. __u64 start, __u64 len, __u64 pgoff,
  690. struct dso *dso,
  691. __u32 prot, __u32 flags,
  692. const char *filename)
  693. {
  694. int err;
  695. /* Return to repipe anonymous maps. */
  696. if (is_anon_memory(filename) || flags & MAP_HUGETLB)
  697. return 1;
  698. if (is_no_dso_memory(filename))
  699. return 1;
  700. if (dso__read_build_id(dso)) {
  701. pr_debug("no build_id found for %s\n", filename);
  702. return -1;
  703. }
  704. err = perf_event__synthesize_mmap2_build_id(tool, sample, machine,
  705. perf_event__repipe,
  706. evsel,
  707. misc, pid, tid,
  708. start, len, pgoff,
  709. dso__bid(dso),
  710. prot, flags,
  711. filename);
  712. if (err) {
  713. pr_err("Can't synthesize build_id event for %s\n", filename);
  714. return -1;
  715. }
  716. return 0;
  717. }
  718. static int mark_dso_hit(const struct perf_inject *inject,
  719. const struct perf_tool *tool,
  720. struct perf_sample *sample,
  721. struct machine *machine,
  722. const struct evsel *mmap_evsel,
  723. struct map *map, bool sample_in_dso)
  724. {
  725. struct dso *dso;
  726. u16 misc = sample->cpumode;
  727. if (!map)
  728. return 0;
  729. if (!sample_in_dso) {
  730. u16 guest_mask = PERF_RECORD_MISC_GUEST_KERNEL |
  731. PERF_RECORD_MISC_GUEST_USER;
  732. if ((misc & guest_mask) != 0) {
  733. misc &= PERF_RECORD_MISC_HYPERVISOR;
  734. misc |= __map__is_kernel(map)
  735. ? PERF_RECORD_MISC_GUEST_KERNEL
  736. : PERF_RECORD_MISC_GUEST_USER;
  737. } else {
  738. misc &= PERF_RECORD_MISC_HYPERVISOR;
  739. misc |= __map__is_kernel(map)
  740. ? PERF_RECORD_MISC_KERNEL
  741. : PERF_RECORD_MISC_USER;
  742. }
  743. }
  744. dso = map__dso(map);
  745. if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY) {
  746. if (dso && !dso__hit(dso)) {
  747. dso__set_hit(dso);
  748. tool__inject_build_id(tool, sample, machine,
  749. mmap_evsel, misc, dso__long_name(dso), dso,
  750. map__flags(map));
  751. }
  752. } else if (inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
  753. if (!map__hit(map)) {
  754. const struct build_id null_bid = { .size = 0 };
  755. const struct build_id *bid = dso ? dso__bid(dso) : &null_bid;
  756. const char *filename = dso ? dso__long_name(dso) : "";
  757. map__set_hit(map);
  758. perf_event__synthesize_mmap2_build_id(tool, sample, machine,
  759. perf_event__repipe,
  760. mmap_evsel,
  761. misc,
  762. sample->pid, sample->tid,
  763. map__start(map),
  764. map__end(map) - map__start(map),
  765. map__pgoff(map),
  766. bid,
  767. map__prot(map),
  768. map__flags(map),
  769. filename);
  770. }
  771. }
  772. return 0;
  773. }
  774. struct mark_dso_hit_args {
  775. const struct perf_inject *inject;
  776. const struct perf_tool *tool;
  777. struct perf_sample *sample;
  778. struct machine *machine;
  779. const struct evsel *mmap_evsel;
  780. };
  781. static int mark_dso_hit_callback(struct callchain_cursor_node *node, void *data)
  782. {
  783. struct mark_dso_hit_args *args = data;
  784. struct map *map = node->ms.map;
  785. return mark_dso_hit(args->inject, args->tool, args->sample, args->machine,
  786. args->mmap_evsel, map, /*sample_in_dso=*/false);
  787. }
  788. int perf_event__inject_buildid(const struct perf_tool *tool, union perf_event *event,
  789. struct perf_sample *sample,
  790. struct evsel *evsel __maybe_unused,
  791. struct machine *machine)
  792. {
  793. struct addr_location al;
  794. struct thread *thread;
  795. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  796. struct mark_dso_hit_args args = {
  797. .inject = inject,
  798. .tool = tool,
  799. /*
  800. * Use the parsed sample data of the sample event, which will
  801. * have a later timestamp than the mmap event.
  802. */
  803. .sample = sample,
  804. .machine = machine,
  805. .mmap_evsel = inject__mmap_evsel(inject),
  806. };
  807. addr_location__init(&al);
  808. thread = machine__findnew_thread(machine, sample->pid, sample->tid);
  809. if (thread == NULL) {
  810. pr_err("problem processing %d event, skipping it.\n",
  811. event->header.type);
  812. goto repipe;
  813. }
  814. if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
  815. mark_dso_hit(inject, tool, sample, machine, args.mmap_evsel, al.map,
  816. /*sample_in_dso=*/true);
  817. }
  818. sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
  819. /*symbols=*/false, mark_dso_hit_callback, &args);
  820. thread__put(thread);
  821. repipe:
  822. perf_event__repipe(tool, event, sample, machine);
  823. addr_location__exit(&al);
  824. return 0;
  825. }
  826. static int perf_inject__sched_process_exit(const struct perf_tool *tool,
  827. union perf_event *event __maybe_unused,
  828. struct perf_sample *sample,
  829. struct evsel *evsel __maybe_unused,
  830. struct machine *machine __maybe_unused)
  831. {
  832. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  833. struct event_entry *ent;
  834. list_for_each_entry(ent, &inject->samples, node) {
  835. if (sample->tid == ent->tid) {
  836. list_del_init(&ent->node);
  837. free(ent);
  838. break;
  839. }
  840. }
  841. return 0;
  842. }
  843. static int perf_inject__sched_switch(const struct perf_tool *tool,
  844. union perf_event *event,
  845. struct perf_sample *sample,
  846. struct evsel *evsel,
  847. struct machine *machine)
  848. {
  849. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  850. struct event_entry *ent;
  851. perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
  852. ent = malloc(event->header.size + sizeof(struct event_entry));
  853. if (ent == NULL) {
  854. color_fprintf(stderr, PERF_COLOR_RED,
  855. "Not enough memory to process sched switch event!");
  856. return -1;
  857. }
  858. ent->tid = sample->tid;
  859. memcpy(&ent->event, event, event->header.size);
  860. list_add(&ent->node, &inject->samples);
  861. return 0;
  862. }
  863. #ifdef HAVE_LIBTRACEEVENT
  864. static int perf_inject__sched_stat(const struct perf_tool *tool,
  865. union perf_event *event __maybe_unused,
  866. struct perf_sample *sample,
  867. struct evsel *evsel,
  868. struct machine *machine)
  869. {
  870. struct event_entry *ent;
  871. union perf_event *event_sw;
  872. struct perf_sample sample_sw;
  873. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  874. u32 pid = evsel__intval(evsel, sample, "pid");
  875. list_for_each_entry(ent, &inject->samples, node) {
  876. if (pid == ent->tid)
  877. goto found;
  878. }
  879. return 0;
  880. found:
  881. event_sw = &ent->event[0];
  882. evsel__parse_sample(evsel, event_sw, &sample_sw);
  883. sample_sw.period = sample->period;
  884. sample_sw.time = sample->time;
  885. perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
  886. evsel->core.attr.read_format, &sample_sw);
  887. build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
  888. return perf_event__repipe(tool, event_sw, &sample_sw, machine);
  889. }
  890. #endif
  891. static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
  892. {
  893. if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
  894. return NULL;
  895. return &gs->vcpu[vcpu];
  896. }
  897. static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
  898. {
  899. ssize_t ret = writen(gs->tmp_fd, buf, sz);
  900. return ret < 0 ? ret : 0;
  901. }
  902. static int guest_session__repipe(const struct perf_tool *tool,
  903. union perf_event *event,
  904. struct perf_sample *sample __maybe_unused,
  905. struct machine *machine __maybe_unused)
  906. {
  907. struct guest_session *gs = container_of(tool, struct guest_session, tool);
  908. return guest_session__output_bytes(gs, event, event->header.size);
  909. }
  910. static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
  911. {
  912. struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
  913. int hash;
  914. if (!guest_tid)
  915. return -ENOMEM;
  916. guest_tid->tid = tid;
  917. guest_tid->vcpu = vcpu;
  918. hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
  919. hlist_add_head(&guest_tid->node, &gs->tids[hash]);
  920. return 0;
  921. }
  922. static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
  923. union perf_event *event,
  924. u64 offset __maybe_unused, void *data)
  925. {
  926. struct guest_session *gs = data;
  927. unsigned int vcpu;
  928. struct guest_vcpu *guest_vcpu;
  929. int ret;
  930. if (event->header.type != PERF_RECORD_COMM ||
  931. event->comm.pid != gs->machine_pid)
  932. return 0;
  933. /*
  934. * QEMU option -name debug-threads=on, causes thread names formatted as
  935. * below, although it is not an ABI. Also libvirt seems to use this by
  936. * default. Here we rely on it to tell us which thread is which VCPU.
  937. */
  938. ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
  939. if (ret <= 0)
  940. return ret;
  941. pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
  942. event->comm.tid, event->comm.comm, vcpu);
  943. if (vcpu > INT_MAX) {
  944. pr_err("Invalid VCPU %u\n", vcpu);
  945. return -EINVAL;
  946. }
  947. guest_vcpu = guest_session__vcpu(gs, vcpu);
  948. if (!guest_vcpu)
  949. return -ENOMEM;
  950. if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
  951. pr_err("Fatal error: Two threads found with the same VCPU\n");
  952. return -EINVAL;
  953. }
  954. guest_vcpu->tid = event->comm.tid;
  955. return guest_session__map_tid(gs, event->comm.tid, vcpu);
  956. }
  957. static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
  958. {
  959. return perf_session__peek_events(session, session->header.data_offset,
  960. session->header.data_size,
  961. host_peek_vm_comms_cb, gs);
  962. }
  963. static bool evlist__is_id_used(struct evlist *evlist, u64 id)
  964. {
  965. return evlist__id2sid(evlist, id);
  966. }
  967. static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
  968. {
  969. do {
  970. gs->highest_id += 1;
  971. } while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
  972. return gs->highest_id;
  973. }
  974. static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
  975. {
  976. struct guest_id *guest_id = zalloc(sizeof(*guest_id));
  977. int hash;
  978. if (!guest_id)
  979. return -ENOMEM;
  980. guest_id->id = id;
  981. guest_id->host_id = host_id;
  982. guest_id->vcpu = vcpu;
  983. hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
  984. hlist_add_head(&guest_id->node, &gs->heads[hash]);
  985. return 0;
  986. }
  987. static u64 evlist__find_highest_id(struct evlist *evlist)
  988. {
  989. struct evsel *evsel;
  990. u64 highest_id = 1;
  991. evlist__for_each_entry(evlist, evsel) {
  992. u32 j;
  993. for (j = 0; j < evsel->core.ids; j++) {
  994. u64 id = evsel->core.id[j];
  995. if (id > highest_id)
  996. highest_id = id;
  997. }
  998. }
  999. return highest_id;
  1000. }
  1001. static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
  1002. {
  1003. struct evlist *evlist = gs->session->evlist;
  1004. struct evsel *evsel;
  1005. int ret;
  1006. evlist__for_each_entry(evlist, evsel) {
  1007. u32 j;
  1008. for (j = 0; j < evsel->core.ids; j++) {
  1009. struct perf_sample_id *sid;
  1010. u64 host_id;
  1011. u64 id;
  1012. id = evsel->core.id[j];
  1013. sid = evlist__id2sid(evlist, id);
  1014. if (!sid || sid->cpu.cpu == -1)
  1015. continue;
  1016. host_id = guest_session__allocate_new_id(gs, host_evlist);
  1017. ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
  1018. if (ret)
  1019. return ret;
  1020. }
  1021. }
  1022. return 0;
  1023. }
  1024. static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
  1025. {
  1026. struct hlist_head *head;
  1027. struct guest_id *guest_id;
  1028. int hash;
  1029. hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
  1030. head = &gs->heads[hash];
  1031. hlist_for_each_entry(guest_id, head, node)
  1032. if (guest_id->id == id)
  1033. return guest_id;
  1034. return NULL;
  1035. }
  1036. static int process_attr(const struct perf_tool *tool, union perf_event *event,
  1037. struct perf_sample *sample __maybe_unused,
  1038. struct machine *machine __maybe_unused)
  1039. {
  1040. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  1041. return perf_event__process_attr(tool, event, &inject->session->evlist);
  1042. }
  1043. static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
  1044. {
  1045. struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
  1046. struct perf_event_attr attr = evsel->core.attr;
  1047. u64 *id_array;
  1048. u32 *vcpu_array;
  1049. int ret = -ENOMEM;
  1050. u32 i;
  1051. id_array = calloc(evsel->core.ids, sizeof(*id_array));
  1052. if (!id_array)
  1053. return -ENOMEM;
  1054. vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
  1055. if (!vcpu_array)
  1056. goto out;
  1057. for (i = 0; i < evsel->core.ids; i++) {
  1058. u64 id = evsel->core.id[i];
  1059. struct guest_id *guest_id = guest_session__lookup_id(gs, id);
  1060. if (!guest_id) {
  1061. pr_err("Failed to find guest id %"PRIu64"\n", id);
  1062. ret = -EINVAL;
  1063. goto out;
  1064. }
  1065. id_array[i] = guest_id->host_id;
  1066. vcpu_array[i] = guest_id->vcpu;
  1067. }
  1068. attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
  1069. attr.exclude_host = 1;
  1070. attr.exclude_guest = 0;
  1071. ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
  1072. id_array, process_attr);
  1073. if (ret)
  1074. pr_err("Failed to add guest attr.\n");
  1075. for (i = 0; i < evsel->core.ids; i++) {
  1076. struct perf_sample_id *sid;
  1077. u32 vcpu = vcpu_array[i];
  1078. sid = evlist__id2sid(inject->session->evlist, id_array[i]);
  1079. /* Guest event is per-thread from the host point of view */
  1080. sid->cpu.cpu = -1;
  1081. sid->tid = gs->vcpu[vcpu].tid;
  1082. sid->machine_pid = gs->machine_pid;
  1083. sid->vcpu.cpu = vcpu;
  1084. }
  1085. out:
  1086. free(vcpu_array);
  1087. free(id_array);
  1088. return ret;
  1089. }
  1090. static int guest_session__add_attrs(struct guest_session *gs)
  1091. {
  1092. struct evlist *evlist = gs->session->evlist;
  1093. struct evsel *evsel;
  1094. int ret;
  1095. evlist__for_each_entry(evlist, evsel) {
  1096. ret = guest_session__add_attr(gs, evsel);
  1097. if (ret)
  1098. return ret;
  1099. }
  1100. return 0;
  1101. }
  1102. static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
  1103. {
  1104. struct perf_session *session = inject->session;
  1105. struct evlist *evlist = session->evlist;
  1106. struct machine *machine = &session->machines.host;
  1107. size_t from = evlist->core.nr_entries - new_cnt;
  1108. return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
  1109. evlist, machine, from);
  1110. }
  1111. static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
  1112. {
  1113. struct hlist_head *head;
  1114. struct guest_tid *guest_tid;
  1115. int hash;
  1116. hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
  1117. head = &gs->tids[hash];
  1118. hlist_for_each_entry(guest_tid, head, node)
  1119. if (guest_tid->tid == tid)
  1120. return guest_tid;
  1121. return NULL;
  1122. }
  1123. static bool dso__is_in_kernel_space(struct dso *dso)
  1124. {
  1125. if (dso__is_vdso(dso))
  1126. return false;
  1127. return dso__is_kcore(dso) ||
  1128. dso__kernel(dso) ||
  1129. is_kernel_module(dso__long_name(dso), PERF_RECORD_MISC_CPUMODE_UNKNOWN);
  1130. }
  1131. static u64 evlist__first_id(struct evlist *evlist)
  1132. {
  1133. struct evsel *evsel;
  1134. evlist__for_each_entry(evlist, evsel) {
  1135. if (evsel->core.ids)
  1136. return evsel->core.id[0];
  1137. }
  1138. return 0;
  1139. }
  1140. static int process_build_id(const struct perf_tool *tool,
  1141. union perf_event *event,
  1142. struct perf_sample *sample __maybe_unused,
  1143. struct machine *machine __maybe_unused)
  1144. {
  1145. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  1146. return perf_event__process_build_id(inject->session, event);
  1147. }
  1148. static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
  1149. {
  1150. struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
  1151. struct perf_sample synth_sample = {
  1152. .pid = -1,
  1153. .tid = -1,
  1154. .time = -1,
  1155. .stream_id = -1,
  1156. .cpu = -1,
  1157. .period = 1,
  1158. .cpumode = dso__is_in_kernel_space(dso)
  1159. ? PERF_RECORD_MISC_GUEST_KERNEL
  1160. : PERF_RECORD_MISC_GUEST_USER,
  1161. };
  1162. if (!machine)
  1163. return -ENOMEM;
  1164. dso__set_hit(dso);
  1165. return perf_event__synthesize_build_id(&inject->tool, &synth_sample, machine,
  1166. process_build_id, inject__mmap_evsel(inject),
  1167. /*misc=*/synth_sample.cpumode,
  1168. dso__bid(dso), dso__long_name(dso));
  1169. }
  1170. static int guest_session__add_build_ids_cb(struct dso *dso, void *data)
  1171. {
  1172. struct guest_session *gs = data;
  1173. struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
  1174. if (!dso__has_build_id(dso))
  1175. return 0;
  1176. return synthesize_build_id(inject, dso, gs->machine_pid);
  1177. }
  1178. static int guest_session__add_build_ids(struct guest_session *gs)
  1179. {
  1180. struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
  1181. /* Build IDs will be put in the Build ID feature section */
  1182. perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
  1183. return dsos__for_each_dso(&gs->session->machines.host.dsos,
  1184. guest_session__add_build_ids_cb,
  1185. gs);
  1186. }
  1187. static int guest_session__ksymbol_event(const struct perf_tool *tool,
  1188. union perf_event *event,
  1189. struct perf_sample *sample __maybe_unused,
  1190. struct machine *machine __maybe_unused)
  1191. {
  1192. struct guest_session *gs = container_of(tool, struct guest_session, tool);
  1193. /* Only support out-of-line i.e. no BPF support */
  1194. if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
  1195. return 0;
  1196. return guest_session__output_bytes(gs, event, event->header.size);
  1197. }
  1198. static int guest_session__start(struct guest_session *gs, const char *name, bool force)
  1199. {
  1200. char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
  1201. struct perf_session *session;
  1202. int ret;
  1203. /* Only these events will be injected */
  1204. gs->tool.mmap = guest_session__repipe;
  1205. gs->tool.mmap2 = guest_session__repipe;
  1206. gs->tool.comm = guest_session__repipe;
  1207. gs->tool.fork = guest_session__repipe;
  1208. gs->tool.exit = guest_session__repipe;
  1209. gs->tool.lost = guest_session__repipe;
  1210. gs->tool.context_switch = guest_session__repipe;
  1211. gs->tool.ksymbol = guest_session__ksymbol_event;
  1212. gs->tool.text_poke = guest_session__repipe;
  1213. /*
  1214. * Processing a build ID creates a struct dso with that build ID. Later,
  1215. * all guest dsos are iterated and the build IDs processed into the host
  1216. * session where they will be output to the Build ID feature section
  1217. * when the perf.data file header is written.
  1218. */
  1219. gs->tool.build_id = perf_event__process_build_id;
  1220. /* Process the id index to know what VCPU an ID belongs to */
  1221. gs->tool.id_index = perf_event__process_id_index;
  1222. gs->tool.ordered_events = true;
  1223. gs->tool.ordering_requires_timestamps = true;
  1224. gs->data.path = name;
  1225. gs->data.force = force;
  1226. gs->data.mode = PERF_DATA_MODE_READ;
  1227. session = perf_session__new(&gs->data, &gs->tool);
  1228. if (IS_ERR(session))
  1229. return PTR_ERR(session);
  1230. gs->session = session;
  1231. /*
  1232. * Initial events have zero'd ID samples. Get default ID sample size
  1233. * used for removing them.
  1234. */
  1235. gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
  1236. /* And default ID for adding back a host-compatible ID sample */
  1237. gs->dflt_id = evlist__first_id(session->evlist);
  1238. if (!gs->dflt_id) {
  1239. pr_err("Guest data has no sample IDs");
  1240. return -EINVAL;
  1241. }
  1242. /* Temporary file for guest events */
  1243. gs->tmp_file_name = strdup(tmp_file_name);
  1244. if (!gs->tmp_file_name)
  1245. return -ENOMEM;
  1246. gs->tmp_fd = mkstemp(gs->tmp_file_name);
  1247. if (gs->tmp_fd < 0)
  1248. return -errno;
  1249. if (zstd_init(&gs->session->zstd_data, 0) < 0)
  1250. pr_warning("Guest session decompression initialization failed.\n");
  1251. /*
  1252. * perf does not support processing 2 sessions simultaneously, so output
  1253. * guest events to a temporary file.
  1254. */
  1255. ret = perf_session__process_events(gs->session);
  1256. if (ret)
  1257. return ret;
  1258. if (lseek(gs->tmp_fd, 0, SEEK_SET))
  1259. return -errno;
  1260. return 0;
  1261. }
  1262. /* Free hlist nodes assuming hlist_node is the first member of hlist entries */
  1263. static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
  1264. {
  1265. struct hlist_node *pos, *n;
  1266. size_t i;
  1267. for (i = 0; i < hlist_sz; ++i) {
  1268. hlist_for_each_safe(pos, n, &heads[i]) {
  1269. hlist_del(pos);
  1270. free(pos);
  1271. }
  1272. }
  1273. }
  1274. static void guest_session__exit(struct guest_session *gs)
  1275. {
  1276. if (gs->session) {
  1277. perf_session__delete(gs->session);
  1278. free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
  1279. free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
  1280. }
  1281. if (gs->tmp_file_name) {
  1282. if (gs->tmp_fd >= 0)
  1283. close(gs->tmp_fd);
  1284. unlink(gs->tmp_file_name);
  1285. zfree(&gs->tmp_file_name);
  1286. }
  1287. zfree(&gs->vcpu);
  1288. zfree(&gs->perf_data_file);
  1289. }
  1290. static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
  1291. {
  1292. tc->time_shift = time_conv->time_shift;
  1293. tc->time_mult = time_conv->time_mult;
  1294. tc->time_zero = time_conv->time_zero;
  1295. tc->time_cycles = time_conv->time_cycles;
  1296. tc->time_mask = time_conv->time_mask;
  1297. tc->cap_user_time_zero = time_conv->cap_user_time_zero;
  1298. tc->cap_user_time_short = time_conv->cap_user_time_short;
  1299. }
  1300. static void guest_session__get_tc(struct guest_session *gs)
  1301. {
  1302. struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
  1303. get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
  1304. get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
  1305. }
  1306. static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
  1307. {
  1308. u64 tsc;
  1309. if (!guest_time) {
  1310. *host_time = 0;
  1311. return;
  1312. }
  1313. if (gs->guest_tc.cap_user_time_zero)
  1314. tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
  1315. else
  1316. tsc = guest_time;
  1317. /*
  1318. * This is the correct order of operations for x86 if the TSC Offset and
  1319. * Multiplier values are used.
  1320. */
  1321. tsc -= gs->time_offset;
  1322. tsc /= gs->time_scale;
  1323. if (gs->host_tc.cap_user_time_zero)
  1324. *host_time = tsc_to_perf_time(tsc, &gs->host_tc);
  1325. else
  1326. *host_time = tsc;
  1327. }
  1328. static int guest_session__fetch(struct guest_session *gs)
  1329. {
  1330. void *buf;
  1331. struct perf_event_header *hdr;
  1332. size_t hdr_sz = sizeof(*hdr);
  1333. ssize_t ret;
  1334. buf = gs->ev.event_buf;
  1335. if (!buf) {
  1336. buf = malloc(PERF_SAMPLE_MAX_SIZE);
  1337. if (!buf)
  1338. return -ENOMEM;
  1339. gs->ev.event_buf = buf;
  1340. }
  1341. hdr = buf;
  1342. ret = readn(gs->tmp_fd, buf, hdr_sz);
  1343. if (ret < 0)
  1344. return ret;
  1345. if (!ret) {
  1346. /* Zero size means EOF */
  1347. hdr->size = 0;
  1348. return 0;
  1349. }
  1350. buf += hdr_sz;
  1351. ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
  1352. if (ret < 0)
  1353. return ret;
  1354. gs->ev.event = (union perf_event *)gs->ev.event_buf;
  1355. gs->ev.sample.time = 0;
  1356. if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
  1357. pr_err("Unexpected type fetching guest event");
  1358. return 0;
  1359. }
  1360. ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
  1361. if (ret) {
  1362. pr_err("Parse failed fetching guest event");
  1363. return ret;
  1364. }
  1365. if (!gs->have_tc) {
  1366. guest_session__get_tc(gs);
  1367. gs->have_tc = true;
  1368. }
  1369. guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
  1370. return 0;
  1371. }
  1372. static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
  1373. const struct perf_sample *sample)
  1374. {
  1375. struct evsel *evsel;
  1376. void *array;
  1377. int ret;
  1378. evsel = evlist__id2evsel(evlist, sample->id);
  1379. array = ev;
  1380. if (!evsel) {
  1381. pr_err("No evsel for id %"PRIu64"\n", sample->id);
  1382. return -EINVAL;
  1383. }
  1384. array += ev->header.size;
  1385. ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
  1386. if (ret < 0)
  1387. return ret;
  1388. if (ret & 7) {
  1389. pr_err("Bad id sample size %d\n", ret);
  1390. return -EINVAL;
  1391. }
  1392. ev->header.size += ret;
  1393. return 0;
  1394. }
  1395. static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
  1396. {
  1397. struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
  1398. int ret;
  1399. if (!gs->ready)
  1400. return 0;
  1401. while (1) {
  1402. struct perf_sample *sample;
  1403. struct guest_id *guest_id;
  1404. union perf_event *ev;
  1405. u16 id_hdr_size;
  1406. u8 cpumode;
  1407. u64 id;
  1408. if (!gs->fetched) {
  1409. ret = guest_session__fetch(gs);
  1410. if (ret)
  1411. return ret;
  1412. gs->fetched = true;
  1413. }
  1414. ev = gs->ev.event;
  1415. sample = &gs->ev.sample;
  1416. if (!ev->header.size)
  1417. return 0; /* EOF */
  1418. if (sample->time > timestamp)
  1419. return 0;
  1420. /* Change cpumode to guest */
  1421. cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  1422. if (cpumode & PERF_RECORD_MISC_USER)
  1423. cpumode = PERF_RECORD_MISC_GUEST_USER;
  1424. else
  1425. cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
  1426. ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
  1427. ev->header.misc |= cpumode;
  1428. id = sample->id;
  1429. if (!id) {
  1430. id = gs->dflt_id;
  1431. id_hdr_size = gs->dflt_id_hdr_size;
  1432. } else {
  1433. struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
  1434. id_hdr_size = evsel__id_hdr_size(evsel);
  1435. }
  1436. if (id_hdr_size & 7) {
  1437. pr_err("Bad id_hdr_size %u\n", id_hdr_size);
  1438. return -EINVAL;
  1439. }
  1440. if (ev->header.size & 7) {
  1441. pr_err("Bad event size %u\n", ev->header.size);
  1442. return -EINVAL;
  1443. }
  1444. /* Remove guest id sample */
  1445. ev->header.size -= id_hdr_size;
  1446. if (ev->header.size & 7) {
  1447. pr_err("Bad raw event size %u\n", ev->header.size);
  1448. return -EINVAL;
  1449. }
  1450. guest_id = guest_session__lookup_id(gs, id);
  1451. if (!guest_id) {
  1452. pr_err("Guest event with unknown id %llu\n",
  1453. (unsigned long long)id);
  1454. return -EINVAL;
  1455. }
  1456. /* Change to host ID to avoid conflicting ID values */
  1457. sample->id = guest_id->host_id;
  1458. sample->stream_id = guest_id->host_id;
  1459. if (sample->cpu != (u32)-1) {
  1460. if (sample->cpu >= gs->vcpu_cnt) {
  1461. pr_err("Guest event with unknown VCPU %u\n",
  1462. sample->cpu);
  1463. return -EINVAL;
  1464. }
  1465. /* Change to host CPU instead of guest VCPU */
  1466. sample->cpu = gs->vcpu[sample->cpu].cpu;
  1467. }
  1468. /* New id sample with new ID and CPU */
  1469. ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
  1470. if (ret)
  1471. return ret;
  1472. if (ev->header.size & 7) {
  1473. pr_err("Bad new event size %u\n", ev->header.size);
  1474. return -EINVAL;
  1475. }
  1476. gs->fetched = false;
  1477. ret = output_bytes(inject, ev, ev->header.size);
  1478. if (ret)
  1479. return ret;
  1480. }
  1481. }
  1482. static int guest_session__flush_events(struct guest_session *gs)
  1483. {
  1484. return guest_session__inject_events(gs, -1);
  1485. }
  1486. static int host__repipe(const struct perf_tool *tool,
  1487. union perf_event *event,
  1488. struct perf_sample *sample,
  1489. struct machine *machine)
  1490. {
  1491. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  1492. int ret;
  1493. ret = guest_session__inject_events(&inject->guest_session, sample->time);
  1494. if (ret)
  1495. return ret;
  1496. return perf_event__repipe(tool, event, sample, machine);
  1497. }
  1498. static int host__finished_init(struct perf_session *session, union perf_event *event)
  1499. {
  1500. struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
  1501. struct guest_session *gs = &inject->guest_session;
  1502. int ret;
  1503. /*
  1504. * Peek through host COMM events to find QEMU threads and the VCPU they
  1505. * are running.
  1506. */
  1507. ret = host_peek_vm_comms(session, gs);
  1508. if (ret)
  1509. return ret;
  1510. if (!gs->vcpu_cnt) {
  1511. pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
  1512. return -EINVAL;
  1513. }
  1514. /*
  1515. * Allocate new (unused) host sample IDs and map them to the guest IDs.
  1516. */
  1517. gs->highest_id = evlist__find_highest_id(session->evlist);
  1518. ret = guest_session__map_ids(gs, session->evlist);
  1519. if (ret)
  1520. return ret;
  1521. ret = guest_session__add_attrs(gs);
  1522. if (ret)
  1523. return ret;
  1524. ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
  1525. if (ret) {
  1526. pr_err("Failed to synthesize id_index\n");
  1527. return ret;
  1528. }
  1529. ret = guest_session__add_build_ids(gs);
  1530. if (ret) {
  1531. pr_err("Failed to add guest build IDs\n");
  1532. return ret;
  1533. }
  1534. gs->ready = true;
  1535. ret = guest_session__inject_events(gs, 0);
  1536. if (ret)
  1537. return ret;
  1538. return perf_event__repipe_op2_synth(session, event);
  1539. }
  1540. /*
  1541. * Obey finished-round ordering. The FINISHED_ROUND event is first processed
  1542. * which flushes host events to file up until the last flush time. Then inject
  1543. * guest events up to the same time. Finally write out the FINISHED_ROUND event
  1544. * itself.
  1545. */
  1546. static int host__finished_round(const struct perf_tool *tool,
  1547. union perf_event *event,
  1548. struct ordered_events *oe)
  1549. {
  1550. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  1551. int ret = perf_event__process_finished_round(tool, event, oe);
  1552. u64 timestamp = ordered_events__last_flush_time(oe);
  1553. if (ret)
  1554. return ret;
  1555. ret = guest_session__inject_events(&inject->guest_session, timestamp);
  1556. if (ret)
  1557. return ret;
  1558. return perf_event__repipe_oe_synth(tool, event, oe);
  1559. }
  1560. static int host__context_switch(const struct perf_tool *tool,
  1561. union perf_event *event,
  1562. struct perf_sample *sample,
  1563. struct machine *machine)
  1564. {
  1565. struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
  1566. bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
  1567. struct guest_session *gs = &inject->guest_session;
  1568. u32 pid = event->context_switch.next_prev_pid;
  1569. u32 tid = event->context_switch.next_prev_tid;
  1570. struct guest_tid *guest_tid;
  1571. u32 vcpu;
  1572. if (out || pid != gs->machine_pid)
  1573. goto out;
  1574. guest_tid = guest_session__lookup_tid(gs, tid);
  1575. if (!guest_tid)
  1576. goto out;
  1577. if (sample->cpu == (u32)-1) {
  1578. pr_err("Switch event does not have CPU\n");
  1579. return -EINVAL;
  1580. }
  1581. vcpu = guest_tid->vcpu;
  1582. if (vcpu >= gs->vcpu_cnt)
  1583. return -EINVAL;
  1584. /* Guest is switching in, record which CPU the VCPU is now running on */
  1585. gs->vcpu[vcpu].cpu = sample->cpu;
  1586. out:
  1587. return host__repipe(tool, event, sample, machine);
  1588. }
  1589. static void sig_handler(int sig __maybe_unused)
  1590. {
  1591. session_done = 1;
  1592. }
  1593. static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
  1594. {
  1595. struct perf_event_attr *attr = &evsel->core.attr;
  1596. const char *name = evsel__name(evsel);
  1597. if (!(attr->sample_type & sample_type)) {
  1598. pr_err("Samples for %s event do not have %s attribute set.",
  1599. name, sample_msg);
  1600. return -EINVAL;
  1601. }
  1602. return 0;
  1603. }
  1604. static int drop_sample(const struct perf_tool *tool __maybe_unused,
  1605. union perf_event *event __maybe_unused,
  1606. struct perf_sample *sample __maybe_unused,
  1607. struct evsel *evsel __maybe_unused,
  1608. struct machine *machine __maybe_unused)
  1609. {
  1610. return 0;
  1611. }
  1612. static void strip_init(struct perf_inject *inject)
  1613. {
  1614. struct evlist *evlist = inject->session->evlist;
  1615. struct evsel *evsel;
  1616. inject->tool.context_switch = perf_event__drop;
  1617. evlist__for_each_entry(evlist, evsel)
  1618. evsel->handler = drop_sample;
  1619. }
  1620. static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
  1621. {
  1622. struct perf_inject *inject = opt->value;
  1623. const char *args;
  1624. char *dry_run;
  1625. if (unset)
  1626. return 0;
  1627. inject->itrace_synth_opts.set = true;
  1628. inject->itrace_synth_opts.vm_time_correlation = true;
  1629. inject->in_place_update = true;
  1630. if (!str)
  1631. return 0;
  1632. dry_run = skip_spaces(str);
  1633. if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
  1634. inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
  1635. inject->in_place_update_dry_run = true;
  1636. args = dry_run + strlen("dry-run");
  1637. } else {
  1638. args = str;
  1639. }
  1640. inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
  1641. return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
  1642. }
  1643. static int parse_guest_data(const struct option *opt, const char *str, int unset)
  1644. {
  1645. struct perf_inject *inject = opt->value;
  1646. struct guest_session *gs = &inject->guest_session;
  1647. char *tok;
  1648. char *s;
  1649. if (unset)
  1650. return 0;
  1651. if (!str)
  1652. goto bad_args;
  1653. s = strdup(str);
  1654. if (!s)
  1655. return -ENOMEM;
  1656. gs->perf_data_file = strsep(&s, ",");
  1657. if (!gs->perf_data_file)
  1658. goto bad_args;
  1659. gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
  1660. if (gs->copy_kcore_dir)
  1661. inject->output.is_dir = true;
  1662. tok = strsep(&s, ",");
  1663. if (!tok)
  1664. goto bad_args;
  1665. gs->machine_pid = strtoul(tok, NULL, 0);
  1666. if (!inject->guest_session.machine_pid)
  1667. goto bad_args;
  1668. gs->time_scale = 1;
  1669. tok = strsep(&s, ",");
  1670. if (!tok)
  1671. goto out;
  1672. gs->time_offset = strtoull(tok, NULL, 0);
  1673. tok = strsep(&s, ",");
  1674. if (!tok)
  1675. goto out;
  1676. gs->time_scale = strtod(tok, NULL);
  1677. if (!gs->time_scale)
  1678. goto bad_args;
  1679. out:
  1680. return 0;
  1681. bad_args:
  1682. pr_err("--guest-data option requires guest perf.data file name, "
  1683. "guest machine PID, and optionally guest timestamp offset, "
  1684. "and guest timestamp scale factor, separated by commas.\n");
  1685. return -1;
  1686. }
  1687. static int save_section_info_cb(struct perf_file_section *section,
  1688. struct perf_header *ph __maybe_unused,
  1689. int feat, int fd __maybe_unused, void *data)
  1690. {
  1691. struct perf_inject *inject = data;
  1692. inject->secs[feat] = *section;
  1693. return 0;
  1694. }
  1695. static int save_section_info(struct perf_inject *inject)
  1696. {
  1697. struct perf_header *header = &inject->session->header;
  1698. int fd = perf_data__fd(inject->session->data);
  1699. return perf_header__process_sections(header, fd, inject, save_section_info_cb);
  1700. }
  1701. static bool keep_feat(int feat)
  1702. {
  1703. switch (feat) {
  1704. /* Keep original information that describes the machine or software */
  1705. case HEADER_TRACING_DATA:
  1706. case HEADER_HOSTNAME:
  1707. case HEADER_OSRELEASE:
  1708. case HEADER_VERSION:
  1709. case HEADER_ARCH:
  1710. case HEADER_NRCPUS:
  1711. case HEADER_CPUDESC:
  1712. case HEADER_CPUID:
  1713. case HEADER_TOTAL_MEM:
  1714. case HEADER_CPU_TOPOLOGY:
  1715. case HEADER_NUMA_TOPOLOGY:
  1716. case HEADER_PMU_MAPPINGS:
  1717. case HEADER_CACHE:
  1718. case HEADER_MEM_TOPOLOGY:
  1719. case HEADER_CLOCKID:
  1720. case HEADER_BPF_PROG_INFO:
  1721. case HEADER_BPF_BTF:
  1722. case HEADER_CPU_PMU_CAPS:
  1723. case HEADER_CLOCK_DATA:
  1724. case HEADER_HYBRID_TOPOLOGY:
  1725. case HEADER_PMU_CAPS:
  1726. return true;
  1727. /* Information that can be updated */
  1728. case HEADER_BUILD_ID:
  1729. case HEADER_CMDLINE:
  1730. case HEADER_EVENT_DESC:
  1731. case HEADER_BRANCH_STACK:
  1732. case HEADER_GROUP_DESC:
  1733. case HEADER_AUXTRACE:
  1734. case HEADER_STAT:
  1735. case HEADER_SAMPLE_TIME:
  1736. case HEADER_DIR_FORMAT:
  1737. case HEADER_COMPRESSED:
  1738. default:
  1739. return false;
  1740. };
  1741. }
  1742. static int read_file(int fd, u64 offs, void *buf, size_t sz)
  1743. {
  1744. ssize_t ret = preadn(fd, buf, sz, offs);
  1745. if (ret < 0)
  1746. return -errno;
  1747. if ((size_t)ret != sz)
  1748. return -EINVAL;
  1749. return 0;
  1750. }
  1751. static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
  1752. {
  1753. int fd = perf_data__fd(inject->session->data);
  1754. u64 offs = inject->secs[feat].offset;
  1755. size_t sz = inject->secs[feat].size;
  1756. void *buf = malloc(sz);
  1757. int ret;
  1758. if (!buf)
  1759. return -ENOMEM;
  1760. ret = read_file(fd, offs, buf, sz);
  1761. if (ret)
  1762. goto out_free;
  1763. ret = fw->write(fw, buf, sz);
  1764. out_free:
  1765. free(buf);
  1766. return ret;
  1767. }
  1768. struct inject_fc {
  1769. struct feat_copier fc;
  1770. struct perf_inject *inject;
  1771. };
  1772. static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
  1773. {
  1774. struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
  1775. struct perf_inject *inject = inj_fc->inject;
  1776. int ret;
  1777. if (!inject->secs[feat].offset ||
  1778. !keep_feat(feat))
  1779. return 0;
  1780. ret = feat_copy(inject, feat, fw);
  1781. if (ret < 0)
  1782. return ret;
  1783. return 1; /* Feature section copied */
  1784. }
  1785. static int copy_kcore_dir(struct perf_inject *inject)
  1786. {
  1787. char *cmd;
  1788. int ret;
  1789. ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
  1790. inject->input_name, inject->output.path);
  1791. if (ret < 0)
  1792. return ret;
  1793. pr_debug("%s\n", cmd);
  1794. ret = system(cmd);
  1795. free(cmd);
  1796. return ret;
  1797. }
  1798. static int guest_session__copy_kcore_dir(struct guest_session *gs)
  1799. {
  1800. struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
  1801. char *cmd;
  1802. int ret;
  1803. ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
  1804. gs->perf_data_file, inject->output.path, gs->machine_pid);
  1805. if (ret < 0)
  1806. return ret;
  1807. pr_debug("%s\n", cmd);
  1808. ret = system(cmd);
  1809. free(cmd);
  1810. return ret;
  1811. }
  1812. static int output_fd(struct perf_inject *inject)
  1813. {
  1814. return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
  1815. }
  1816. static int __cmd_inject(struct perf_inject *inject)
  1817. {
  1818. int ret = -EINVAL;
  1819. struct guest_session *gs = &inject->guest_session;
  1820. struct perf_session *session = inject->session;
  1821. int fd = output_fd(inject);
  1822. u64 output_data_offset = perf_session__data_offset(session->evlist);
  1823. /*
  1824. * Pipe input hasn't loaded the attributes and will handle them as
  1825. * events. So that the attributes don't overlap the data, write the
  1826. * attributes after the data.
  1827. */
  1828. bool write_attrs_after_data = !inject->output.is_pipe && inject->session->data->is_pipe;
  1829. signal(SIGINT, sig_handler);
  1830. if (inject->build_id_style != BID_RWS__NONE || inject->sched_stat ||
  1831. inject->itrace_synth_opts.set) {
  1832. inject->tool.mmap = perf_event__repipe_mmap;
  1833. inject->tool.mmap2 = perf_event__repipe_mmap2;
  1834. inject->tool.fork = perf_event__repipe_fork;
  1835. #ifdef HAVE_LIBTRACEEVENT
  1836. inject->tool.tracing_data = perf_event__repipe_tracing_data;
  1837. #endif
  1838. }
  1839. if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
  1840. inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
  1841. inject->tool.sample = perf_event__inject_buildid;
  1842. } else if (inject->sched_stat) {
  1843. struct evsel *evsel;
  1844. evlist__for_each_entry(session->evlist, evsel) {
  1845. const char *name = evsel__name(evsel);
  1846. if (!strcmp(name, "sched:sched_switch")) {
  1847. if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
  1848. return -EINVAL;
  1849. evsel->handler = perf_inject__sched_switch;
  1850. } else if (!strcmp(name, "sched:sched_process_exit"))
  1851. evsel->handler = perf_inject__sched_process_exit;
  1852. #ifdef HAVE_LIBTRACEEVENT
  1853. else if (!strncmp(name, "sched:sched_stat_", 17))
  1854. evsel->handler = perf_inject__sched_stat;
  1855. #endif
  1856. }
  1857. } else if (inject->itrace_synth_opts.vm_time_correlation) {
  1858. session->itrace_synth_opts = &inject->itrace_synth_opts;
  1859. memset(&inject->tool, 0, sizeof(inject->tool));
  1860. inject->tool.id_index = perf_event__process_id_index;
  1861. inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
  1862. inject->tool.auxtrace = perf_event__process_auxtrace;
  1863. inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
  1864. inject->tool.ordered_events = true;
  1865. inject->tool.ordering_requires_timestamps = true;
  1866. } else if (inject->itrace_synth_opts.set) {
  1867. session->itrace_synth_opts = &inject->itrace_synth_opts;
  1868. inject->itrace_synth_opts.inject = true;
  1869. inject->tool.comm = perf_event__repipe_comm;
  1870. inject->tool.namespaces = perf_event__repipe_namespaces;
  1871. inject->tool.exit = perf_event__repipe_exit;
  1872. inject->tool.id_index = perf_event__process_id_index;
  1873. inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
  1874. inject->tool.auxtrace = perf_event__process_auxtrace;
  1875. inject->tool.aux = perf_event__drop_aux;
  1876. inject->tool.itrace_start = perf_event__drop_aux;
  1877. inject->tool.aux_output_hw_id = perf_event__drop_aux;
  1878. inject->tool.ordered_events = true;
  1879. inject->tool.ordering_requires_timestamps = true;
  1880. /* Allow space in the header for new attributes */
  1881. output_data_offset = roundup(8192 + session->header.data_offset, 4096);
  1882. if (inject->strip)
  1883. strip_init(inject);
  1884. } else if (gs->perf_data_file) {
  1885. char *name = gs->perf_data_file;
  1886. /*
  1887. * Not strictly necessary, but keep these events in order wrt
  1888. * guest events.
  1889. */
  1890. inject->tool.mmap = host__repipe;
  1891. inject->tool.mmap2 = host__repipe;
  1892. inject->tool.comm = host__repipe;
  1893. inject->tool.fork = host__repipe;
  1894. inject->tool.exit = host__repipe;
  1895. inject->tool.lost = host__repipe;
  1896. inject->tool.context_switch = host__repipe;
  1897. inject->tool.ksymbol = host__repipe;
  1898. inject->tool.text_poke = host__repipe;
  1899. /*
  1900. * Once the host session has initialized, set up sample ID
  1901. * mapping and feed in guest attrs, build IDs and initial
  1902. * events.
  1903. */
  1904. inject->tool.finished_init = host__finished_init;
  1905. /* Obey finished round ordering */
  1906. inject->tool.finished_round = host__finished_round;
  1907. /* Keep track of which CPU a VCPU is runnng on */
  1908. inject->tool.context_switch = host__context_switch;
  1909. /*
  1910. * Must order events to be able to obey finished round
  1911. * ordering.
  1912. */
  1913. inject->tool.ordered_events = true;
  1914. inject->tool.ordering_requires_timestamps = true;
  1915. /* Set up a separate session to process guest perf.data file */
  1916. ret = guest_session__start(gs, name, session->data->force);
  1917. if (ret) {
  1918. pr_err("Failed to process %s, error %d\n", name, ret);
  1919. return ret;
  1920. }
  1921. /* Allow space in the header for guest attributes */
  1922. output_data_offset += gs->session->header.data_offset;
  1923. output_data_offset = roundup(output_data_offset, 4096);
  1924. }
  1925. if (!inject->itrace_synth_opts.set)
  1926. auxtrace_index__free(&session->auxtrace_index);
  1927. if (!inject->output.is_pipe && !inject->in_place_update)
  1928. lseek(fd, output_data_offset, SEEK_SET);
  1929. ret = perf_session__process_events(session);
  1930. if (ret)
  1931. return ret;
  1932. if (gs->session) {
  1933. /*
  1934. * Remaining guest events have later timestamps. Flush them
  1935. * out to file.
  1936. */
  1937. ret = guest_session__flush_events(gs);
  1938. if (ret) {
  1939. pr_err("Failed to flush guest events\n");
  1940. return ret;
  1941. }
  1942. }
  1943. if (!inject->output.is_pipe && !inject->in_place_update) {
  1944. struct inject_fc inj_fc = {
  1945. .fc.copy = feat_copy_cb,
  1946. .inject = inject,
  1947. };
  1948. if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
  1949. inject->build_id_style == BID_RWS__INJECT_HEADER_ALL)
  1950. perf_header__set_feat(&session->header, HEADER_BUILD_ID);
  1951. /*
  1952. * Keep all buildids when there is unprocessed AUX data because
  1953. * it is not known which ones the AUX trace hits.
  1954. */
  1955. if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
  1956. inject->have_auxtrace && !inject->itrace_synth_opts.set)
  1957. perf_session__dsos_hit_all(session);
  1958. /*
  1959. * The AUX areas have been removed and replaced with
  1960. * synthesized hardware events, so clear the feature flag.
  1961. */
  1962. if (inject->itrace_synth_opts.set) {
  1963. perf_header__clear_feat(&session->header,
  1964. HEADER_AUXTRACE);
  1965. if (inject->itrace_synth_opts.last_branch ||
  1966. inject->itrace_synth_opts.add_last_branch)
  1967. perf_header__set_feat(&session->header,
  1968. HEADER_BRANCH_STACK);
  1969. }
  1970. session->header.data_offset = output_data_offset;
  1971. session->header.data_size = inject->bytes_written;
  1972. perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc,
  1973. write_attrs_after_data);
  1974. if (inject->copy_kcore_dir) {
  1975. ret = copy_kcore_dir(inject);
  1976. if (ret) {
  1977. pr_err("Failed to copy kcore\n");
  1978. return ret;
  1979. }
  1980. }
  1981. if (gs->copy_kcore_dir) {
  1982. ret = guest_session__copy_kcore_dir(gs);
  1983. if (ret) {
  1984. pr_err("Failed to copy guest kcore\n");
  1985. return ret;
  1986. }
  1987. }
  1988. }
  1989. return ret;
  1990. }
  1991. int cmd_inject(int argc, const char **argv)
  1992. {
  1993. struct perf_inject inject = {
  1994. .input_name = "-",
  1995. .samples = LIST_HEAD_INIT(inject.samples),
  1996. .output = {
  1997. .path = "-",
  1998. .mode = PERF_DATA_MODE_WRITE,
  1999. .use_stdio = true,
  2000. },
  2001. };
  2002. struct perf_data data = {
  2003. .mode = PERF_DATA_MODE_READ,
  2004. .use_stdio = true,
  2005. };
  2006. int ret;
  2007. const char *known_build_ids = NULL;
  2008. bool build_ids = false;
  2009. bool build_id_all = false;
  2010. bool mmap2_build_ids = false;
  2011. bool mmap2_build_id_all = false;
  2012. struct option options[] = {
  2013. OPT_BOOLEAN('b', "build-ids", &build_ids,
  2014. "Inject build-ids into the output stream"),
  2015. OPT_BOOLEAN(0, "buildid-all", &build_id_all,
  2016. "Inject build-ids of all DSOs into the output stream"),
  2017. OPT_BOOLEAN('B', "mmap2-buildids", &mmap2_build_ids,
  2018. "Drop unused mmap events, make others mmap2 with build IDs"),
  2019. OPT_BOOLEAN(0, "mmap2-buildid-all", &mmap2_build_id_all,
  2020. "Rewrite all mmap events as mmap2 events with build IDs"),
  2021. OPT_STRING(0, "known-build-ids", &known_build_ids,
  2022. "buildid path [,buildid path...]",
  2023. "build-ids to use for given paths"),
  2024. OPT_STRING('i', "input", &inject.input_name, "file",
  2025. "input file name"),
  2026. OPT_STRING('o', "output", &inject.output.path, "file",
  2027. "output file name"),
  2028. OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
  2029. "Merge sched-stat and sched-switch for getting events "
  2030. "where and how long tasks slept"),
  2031. #ifdef HAVE_JITDUMP
  2032. OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
  2033. #endif
  2034. OPT_INCR('v', "verbose", &verbose,
  2035. "be more verbose (show build ids, etc)"),
  2036. OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
  2037. "file", "vmlinux pathname"),
  2038. OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
  2039. "don't load vmlinux even if found"),
  2040. OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
  2041. "kallsyms pathname"),
  2042. OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
  2043. OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
  2044. NULL, "opts", "Instruction Tracing options\n"
  2045. ITRACE_HELP,
  2046. itrace_parse_synth_opts),
  2047. OPT_BOOLEAN(0, "strip", &inject.strip,
  2048. "strip non-synthesized events (use with --itrace)"),
  2049. OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
  2050. "correlate time between VM guests and the host",
  2051. parse_vm_time_correlation),
  2052. OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
  2053. "inject events from a guest perf.data file",
  2054. parse_guest_data),
  2055. OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
  2056. "guest mount directory under which every guest os"
  2057. " instance has a subdir"),
  2058. OPT_END()
  2059. };
  2060. const char * const inject_usage[] = {
  2061. "perf inject [<options>]",
  2062. NULL
  2063. };
  2064. bool ordered_events;
  2065. if (!inject.itrace_synth_opts.set) {
  2066. /* Disable eager loading of kernel symbols that adds overhead to perf inject. */
  2067. symbol_conf.lazy_load_kernel_maps = true;
  2068. }
  2069. #ifndef HAVE_JITDUMP
  2070. set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
  2071. #endif
  2072. argc = parse_options(argc, argv, options, inject_usage, 0);
  2073. /*
  2074. * Any (unrecognized) arguments left?
  2075. */
  2076. if (argc)
  2077. usage_with_options(inject_usage, options);
  2078. if (inject.strip && !inject.itrace_synth_opts.set) {
  2079. pr_err("--strip option requires --itrace option\n");
  2080. return -1;
  2081. }
  2082. if (symbol__validate_sym_arguments())
  2083. return -1;
  2084. if (inject.in_place_update) {
  2085. if (!strcmp(inject.input_name, "-")) {
  2086. pr_err("Input file name required for in-place updating\n");
  2087. return -1;
  2088. }
  2089. if (strcmp(inject.output.path, "-")) {
  2090. pr_err("Output file name must not be specified for in-place updating\n");
  2091. return -1;
  2092. }
  2093. if (!data.force && !inject.in_place_update_dry_run) {
  2094. pr_err("The input file would be updated in place, "
  2095. "the --force option is required.\n");
  2096. return -1;
  2097. }
  2098. if (!inject.in_place_update_dry_run)
  2099. data.in_place_update = true;
  2100. } else {
  2101. if (strcmp(inject.output.path, "-") && !inject.strip &&
  2102. has_kcore_dir(inject.input_name)) {
  2103. inject.output.is_dir = true;
  2104. inject.copy_kcore_dir = true;
  2105. }
  2106. if (perf_data__open(&inject.output)) {
  2107. perror("failed to create output file");
  2108. return -1;
  2109. }
  2110. }
  2111. if (mmap2_build_ids)
  2112. inject.build_id_style = BID_RWS__MMAP2_BUILDID_LAZY;
  2113. if (mmap2_build_id_all)
  2114. inject.build_id_style = BID_RWS__MMAP2_BUILDID_ALL;
  2115. if (build_ids)
  2116. inject.build_id_style = BID_RWS__INJECT_HEADER_LAZY;
  2117. if (build_id_all)
  2118. inject.build_id_style = BID_RWS__INJECT_HEADER_ALL;
  2119. data.path = inject.input_name;
  2120. ordered_events = inject.jit_mode || inject.sched_stat ||
  2121. inject.build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
  2122. inject.build_id_style == BID_RWS__MMAP2_BUILDID_LAZY;
  2123. perf_tool__init(&inject.tool, ordered_events);
  2124. inject.tool.sample = perf_event__repipe_sample;
  2125. inject.tool.read = perf_event__repipe_sample;
  2126. inject.tool.mmap = perf_event__repipe;
  2127. inject.tool.mmap2 = perf_event__repipe;
  2128. inject.tool.comm = perf_event__repipe;
  2129. inject.tool.namespaces = perf_event__repipe;
  2130. inject.tool.cgroup = perf_event__repipe;
  2131. inject.tool.fork = perf_event__repipe;
  2132. inject.tool.exit = perf_event__repipe;
  2133. inject.tool.lost = perf_event__repipe;
  2134. inject.tool.lost_samples = perf_event__repipe;
  2135. inject.tool.aux = perf_event__repipe;
  2136. inject.tool.itrace_start = perf_event__repipe;
  2137. inject.tool.aux_output_hw_id = perf_event__repipe;
  2138. inject.tool.context_switch = perf_event__repipe;
  2139. inject.tool.throttle = perf_event__repipe;
  2140. inject.tool.unthrottle = perf_event__repipe;
  2141. inject.tool.ksymbol = perf_event__repipe;
  2142. inject.tool.bpf = perf_event__repipe;
  2143. inject.tool.text_poke = perf_event__repipe;
  2144. inject.tool.attr = perf_event__repipe_attr;
  2145. inject.tool.event_update = perf_event__repipe_event_update;
  2146. inject.tool.tracing_data = perf_event__repipe_op2_synth;
  2147. inject.tool.finished_round = perf_event__repipe_oe_synth;
  2148. inject.tool.build_id = perf_event__repipe_op2_synth;
  2149. inject.tool.id_index = perf_event__repipe_op2_synth;
  2150. inject.tool.auxtrace_info = perf_event__repipe_op2_synth;
  2151. inject.tool.auxtrace_error = perf_event__repipe_op2_synth;
  2152. inject.tool.time_conv = perf_event__repipe_op2_synth;
  2153. inject.tool.thread_map = perf_event__repipe_op2_synth;
  2154. inject.tool.cpu_map = perf_event__repipe_op2_synth;
  2155. inject.tool.stat_config = perf_event__repipe_op2_synth;
  2156. inject.tool.stat = perf_event__repipe_op2_synth;
  2157. inject.tool.stat_round = perf_event__repipe_op2_synth;
  2158. inject.tool.feature = perf_event__repipe_op2_synth;
  2159. inject.tool.finished_init = perf_event__repipe_op2_synth;
  2160. inject.tool.compressed = perf_event__repipe_op4_synth;
  2161. inject.tool.auxtrace = perf_event__repipe_auxtrace;
  2162. inject.tool.dont_split_sample_group = true;
  2163. inject.session = __perf_session__new(&data, &inject.tool,
  2164. /*trace_event_repipe=*/inject.output.is_pipe);
  2165. if (IS_ERR(inject.session)) {
  2166. ret = PTR_ERR(inject.session);
  2167. goto out_close_output;
  2168. }
  2169. if (zstd_init(&(inject.session->zstd_data), 0) < 0)
  2170. pr_warning("Decompression initialization failed.\n");
  2171. /* Save original section info before feature bits change */
  2172. ret = save_section_info(&inject);
  2173. if (ret)
  2174. goto out_delete;
  2175. if (inject.output.is_pipe) {
  2176. ret = perf_header__write_pipe(perf_data__fd(&inject.output));
  2177. if (ret < 0) {
  2178. pr_err("Couldn't write a new pipe header.\n");
  2179. goto out_delete;
  2180. }
  2181. /*
  2182. * If the input is already a pipe then the features and
  2183. * attributes don't need synthesizing, they will be present in
  2184. * the input.
  2185. */
  2186. if (!data.is_pipe) {
  2187. ret = perf_event__synthesize_for_pipe(&inject.tool,
  2188. inject.session,
  2189. &inject.output,
  2190. perf_event__repipe);
  2191. if (ret < 0)
  2192. goto out_delete;
  2193. }
  2194. }
  2195. if (inject.build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
  2196. inject.build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
  2197. /*
  2198. * to make sure the mmap records are ordered correctly
  2199. * and so that the correct especially due to jitted code
  2200. * mmaps. We cannot generate the buildid hit list and
  2201. * inject the jit mmaps at the same time for now.
  2202. */
  2203. inject.tool.ordering_requires_timestamps = true;
  2204. }
  2205. if (inject.build_id_style != BID_RWS__NONE && known_build_ids != NULL) {
  2206. inject.known_build_ids =
  2207. perf_inject__parse_known_build_ids(known_build_ids);
  2208. if (inject.known_build_ids == NULL) {
  2209. pr_err("Couldn't parse known build ids.\n");
  2210. goto out_delete;
  2211. }
  2212. }
  2213. #ifdef HAVE_JITDUMP
  2214. if (inject.jit_mode) {
  2215. inject.tool.mmap2 = perf_event__repipe_mmap2;
  2216. inject.tool.mmap = perf_event__repipe_mmap;
  2217. inject.tool.ordering_requires_timestamps = true;
  2218. /*
  2219. * JIT MMAP injection injects all MMAP events in one go, so it
  2220. * does not obey finished_round semantics.
  2221. */
  2222. inject.tool.finished_round = perf_event__drop_oe;
  2223. }
  2224. #endif
  2225. ret = symbol__init(&inject.session->header.env);
  2226. if (ret < 0)
  2227. goto out_delete;
  2228. ret = __cmd_inject(&inject);
  2229. guest_session__exit(&inject.guest_session);
  2230. out_delete:
  2231. strlist__delete(inject.known_build_ids);
  2232. zstd_fini(&(inject.session->zstd_data));
  2233. perf_session__delete(inject.session);
  2234. out_close_output:
  2235. if (!inject.in_place_update)
  2236. perf_data__close(&inject.output);
  2237. free(inject.itrace_synth_opts.vm_tm_corr_args);
  2238. free(inject.event_copy);
  2239. free(inject.guest_session.ev.event_buf);
  2240. return ret;
  2241. }