trace.h 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  3. #define _TRACE_KVM_H
  4. #include <linux/tracepoint.h>
  5. #include <asm/vmx.h>
  6. #include <asm/svm.h>
  7. #include <asm/clocksource.h>
  8. #include <asm/pvclock-abi.h>
  9. #undef TRACE_SYSTEM
  10. #define TRACE_SYSTEM kvm
  11. /*
  12. * Tracepoint for guest mode entry.
  13. */
  14. TRACE_EVENT(kvm_entry,
  15. TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit),
  16. TP_ARGS(vcpu, force_immediate_exit),
  17. TP_STRUCT__entry(
  18. __field( unsigned int, vcpu_id )
  19. __field( unsigned long, rip )
  20. __field( bool, immediate_exit )
  21. ),
  22. TP_fast_assign(
  23. __entry->vcpu_id = vcpu->vcpu_id;
  24. __entry->rip = kvm_rip_read(vcpu);
  25. __entry->immediate_exit = force_immediate_exit;
  26. ),
  27. TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip,
  28. __entry->immediate_exit ? "[immediate exit]" : "")
  29. );
  30. /*
  31. * Tracepoint for hypercall.
  32. */
  33. TRACE_EVENT(kvm_hypercall,
  34. TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
  35. unsigned long a2, unsigned long a3),
  36. TP_ARGS(nr, a0, a1, a2, a3),
  37. TP_STRUCT__entry(
  38. __field( unsigned long, nr )
  39. __field( unsigned long, a0 )
  40. __field( unsigned long, a1 )
  41. __field( unsigned long, a2 )
  42. __field( unsigned long, a3 )
  43. ),
  44. TP_fast_assign(
  45. __entry->nr = nr;
  46. __entry->a0 = a0;
  47. __entry->a1 = a1;
  48. __entry->a2 = a2;
  49. __entry->a3 = a3;
  50. ),
  51. TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
  52. __entry->nr, __entry->a0, __entry->a1, __entry->a2,
  53. __entry->a3)
  54. );
  55. /*
  56. * Tracepoint for hypercall.
  57. */
  58. TRACE_EVENT(kvm_hv_hypercall,
  59. TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt,
  60. __u16 rep_idx, __u64 ingpa, __u64 outgpa),
  61. TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa),
  62. TP_STRUCT__entry(
  63. __field( __u16, rep_cnt )
  64. __field( __u16, rep_idx )
  65. __field( __u64, ingpa )
  66. __field( __u64, outgpa )
  67. __field( __u16, code )
  68. __field( __u16, var_cnt )
  69. __field( bool, fast )
  70. ),
  71. TP_fast_assign(
  72. __entry->rep_cnt = rep_cnt;
  73. __entry->rep_idx = rep_idx;
  74. __entry->ingpa = ingpa;
  75. __entry->outgpa = outgpa;
  76. __entry->code = code;
  77. __entry->var_cnt = var_cnt;
  78. __entry->fast = fast;
  79. ),
  80. TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
  81. __entry->code, __entry->fast ? "fast" : "slow",
  82. __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx,
  83. __entry->ingpa, __entry->outgpa)
  84. );
  85. TRACE_EVENT(kvm_hv_hypercall_done,
  86. TP_PROTO(u64 result),
  87. TP_ARGS(result),
  88. TP_STRUCT__entry(
  89. __field(__u64, result)
  90. ),
  91. TP_fast_assign(
  92. __entry->result = result;
  93. ),
  94. TP_printk("result 0x%llx", __entry->result)
  95. );
  96. /*
  97. * Tracepoint for Xen hypercall.
  98. */
  99. TRACE_EVENT(kvm_xen_hypercall,
  100. TP_PROTO(u8 cpl, unsigned long nr,
  101. unsigned long a0, unsigned long a1, unsigned long a2,
  102. unsigned long a3, unsigned long a4, unsigned long a5),
  103. TP_ARGS(cpl, nr, a0, a1, a2, a3, a4, a5),
  104. TP_STRUCT__entry(
  105. __field(u8, cpl)
  106. __field(unsigned long, nr)
  107. __field(unsigned long, a0)
  108. __field(unsigned long, a1)
  109. __field(unsigned long, a2)
  110. __field(unsigned long, a3)
  111. __field(unsigned long, a4)
  112. __field(unsigned long, a5)
  113. ),
  114. TP_fast_assign(
  115. __entry->cpl = cpl;
  116. __entry->nr = nr;
  117. __entry->a0 = a0;
  118. __entry->a1 = a1;
  119. __entry->a2 = a2;
  120. __entry->a3 = a3;
  121. __entry->a4 = a4;
  122. __entry->a4 = a5;
  123. ),
  124. TP_printk("cpl %d nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx",
  125. __entry->cpl, __entry->nr,
  126. __entry->a0, __entry->a1, __entry->a2,
  127. __entry->a3, __entry->a4, __entry->a5)
  128. );
  129. /*
  130. * Tracepoint for PIO.
  131. */
  132. #define KVM_PIO_IN 0
  133. #define KVM_PIO_OUT 1
  134. TRACE_EVENT(kvm_pio,
  135. TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
  136. unsigned int count, const void *data),
  137. TP_ARGS(rw, port, size, count, data),
  138. TP_STRUCT__entry(
  139. __field( unsigned int, rw )
  140. __field( unsigned int, port )
  141. __field( unsigned int, size )
  142. __field( unsigned int, count )
  143. __field( unsigned int, val )
  144. ),
  145. TP_fast_assign(
  146. __entry->rw = rw;
  147. __entry->port = port;
  148. __entry->size = size;
  149. __entry->count = count;
  150. if (size == 1)
  151. __entry->val = *(unsigned char *)data;
  152. else if (size == 2)
  153. __entry->val = *(unsigned short *)data;
  154. else
  155. __entry->val = *(unsigned int *)data;
  156. ),
  157. TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
  158. __entry->rw ? "write" : "read",
  159. __entry->port, __entry->size, __entry->count, __entry->val,
  160. __entry->count > 1 ? "(...)" : "")
  161. );
  162. /*
  163. * Tracepoint for fast mmio.
  164. */
  165. TRACE_EVENT(kvm_fast_mmio,
  166. TP_PROTO(u64 gpa),
  167. TP_ARGS(gpa),
  168. TP_STRUCT__entry(
  169. __field(u64, gpa)
  170. ),
  171. TP_fast_assign(
  172. __entry->gpa = gpa;
  173. ),
  174. TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
  175. );
  176. /*
  177. * Tracepoint for cpuid.
  178. */
  179. TRACE_EVENT(kvm_cpuid,
  180. TP_PROTO(unsigned int function, unsigned int index, unsigned long rax,
  181. unsigned long rbx, unsigned long rcx, unsigned long rdx,
  182. bool found, bool used_max_basic),
  183. TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic),
  184. TP_STRUCT__entry(
  185. __field( unsigned int, function )
  186. __field( unsigned int, index )
  187. __field( unsigned long, rax )
  188. __field( unsigned long, rbx )
  189. __field( unsigned long, rcx )
  190. __field( unsigned long, rdx )
  191. __field( bool, found )
  192. __field( bool, used_max_basic )
  193. ),
  194. TP_fast_assign(
  195. __entry->function = function;
  196. __entry->index = index;
  197. __entry->rax = rax;
  198. __entry->rbx = rbx;
  199. __entry->rcx = rcx;
  200. __entry->rdx = rdx;
  201. __entry->found = found;
  202. __entry->used_max_basic = used_max_basic;
  203. ),
  204. TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s",
  205. __entry->function, __entry->index, __entry->rax,
  206. __entry->rbx, __entry->rcx, __entry->rdx,
  207. __entry->found ? "found" : "not found",
  208. __entry->used_max_basic ? ", used max basic" : "")
  209. );
  210. #define AREG(x) { APIC_##x, "APIC_" #x }
  211. #define kvm_trace_symbol_apic \
  212. AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \
  213. AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \
  214. AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
  215. AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \
  216. AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \
  217. AREG(ECTRL)
  218. /*
  219. * Tracepoint for apic access.
  220. */
  221. TRACE_EVENT(kvm_apic,
  222. TP_PROTO(unsigned int rw, unsigned int reg, u64 val),
  223. TP_ARGS(rw, reg, val),
  224. TP_STRUCT__entry(
  225. __field( unsigned int, rw )
  226. __field( unsigned int, reg )
  227. __field( u64, val )
  228. ),
  229. TP_fast_assign(
  230. __entry->rw = rw;
  231. __entry->reg = reg;
  232. __entry->val = val;
  233. ),
  234. TP_printk("apic_%s %s = 0x%llx",
  235. __entry->rw ? "write" : "read",
  236. __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
  237. __entry->val)
  238. );
  239. #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
  240. #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
  241. #define KVM_ISA_VMX 1
  242. #define KVM_ISA_SVM 2
  243. #define kvm_print_exit_reason(exit_reason, isa) \
  244. (isa == KVM_ISA_VMX) ? \
  245. __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \
  246. __print_symbolic(exit_reason, SVM_EXIT_REASONS), \
  247. (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \
  248. (isa == KVM_ISA_VMX) ? \
  249. __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : ""
  250. #define TRACE_EVENT_KVM_EXIT(name) \
  251. TRACE_EVENT(name, \
  252. TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \
  253. TP_ARGS(vcpu, isa), \
  254. \
  255. TP_STRUCT__entry( \
  256. __field( unsigned int, exit_reason ) \
  257. __field( unsigned long, guest_rip ) \
  258. __field( u32, isa ) \
  259. __field( u64, info1 ) \
  260. __field( u64, info2 ) \
  261. __field( u32, intr_info ) \
  262. __field( u32, error_code ) \
  263. __field( unsigned int, vcpu_id ) \
  264. ), \
  265. \
  266. TP_fast_assign( \
  267. __entry->guest_rip = kvm_rip_read(vcpu); \
  268. __entry->isa = isa; \
  269. __entry->vcpu_id = vcpu->vcpu_id; \
  270. kvm_x86_call(get_exit_info)(vcpu, \
  271. &__entry->exit_reason, \
  272. &__entry->info1, \
  273. &__entry->info2, \
  274. &__entry->intr_info, \
  275. &__entry->error_code); \
  276. ), \
  277. \
  278. TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \
  279. "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \
  280. __entry->vcpu_id, \
  281. kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \
  282. __entry->guest_rip, __entry->info1, __entry->info2, \
  283. __entry->intr_info, __entry->error_code) \
  284. )
  285. /*
  286. * Tracepoint for kvm guest exit:
  287. */
  288. TRACE_EVENT_KVM_EXIT(kvm_exit);
  289. /*
  290. * Tracepoint for kvm interrupt injection:
  291. */
  292. TRACE_EVENT(kvm_inj_virq,
  293. TP_PROTO(unsigned int vector, bool soft, bool reinjected),
  294. TP_ARGS(vector, soft, reinjected),
  295. TP_STRUCT__entry(
  296. __field( unsigned int, vector )
  297. __field( bool, soft )
  298. __field( bool, reinjected )
  299. ),
  300. TP_fast_assign(
  301. __entry->vector = vector;
  302. __entry->soft = soft;
  303. __entry->reinjected = reinjected;
  304. ),
  305. TP_printk("%s 0x%x%s",
  306. __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector,
  307. __entry->reinjected ? " [reinjected]" : "")
  308. );
  309. #define EXS(x) { x##_VECTOR, "#" #x }
  310. #define kvm_trace_sym_exc \
  311. EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
  312. EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
  313. EXS(MF), EXS(AC), EXS(MC)
  314. /*
  315. * Tracepoint for kvm interrupt injection:
  316. */
  317. TRACE_EVENT(kvm_inj_exception,
  318. TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
  319. bool reinjected),
  320. TP_ARGS(exception, has_error, error_code, reinjected),
  321. TP_STRUCT__entry(
  322. __field( u8, exception )
  323. __field( u8, has_error )
  324. __field( u32, error_code )
  325. __field( bool, reinjected )
  326. ),
  327. TP_fast_assign(
  328. __entry->exception = exception;
  329. __entry->has_error = has_error;
  330. __entry->error_code = error_code;
  331. __entry->reinjected = reinjected;
  332. ),
  333. TP_printk("%s%s%s%s%s",
  334. __print_symbolic(__entry->exception, kvm_trace_sym_exc),
  335. !__entry->has_error ? "" : " (",
  336. !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }),
  337. !__entry->has_error ? "" : ")",
  338. __entry->reinjected ? " [reinjected]" : "")
  339. );
  340. /*
  341. * Tracepoint for page fault.
  342. */
  343. TRACE_EVENT(kvm_page_fault,
  344. TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code),
  345. TP_ARGS(vcpu, fault_address, error_code),
  346. TP_STRUCT__entry(
  347. __field( unsigned int, vcpu_id )
  348. __field( unsigned long, guest_rip )
  349. __field( u64, fault_address )
  350. __field( u64, error_code )
  351. ),
  352. TP_fast_assign(
  353. __entry->vcpu_id = vcpu->vcpu_id;
  354. __entry->guest_rip = kvm_rip_read(vcpu);
  355. __entry->fault_address = fault_address;
  356. __entry->error_code = error_code;
  357. ),
  358. TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx",
  359. __entry->vcpu_id, __entry->guest_rip,
  360. __entry->fault_address, __entry->error_code)
  361. );
  362. /*
  363. * Tracepoint for guest MSR access.
  364. */
  365. TRACE_EVENT(kvm_msr,
  366. TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
  367. TP_ARGS(write, ecx, data, exception),
  368. TP_STRUCT__entry(
  369. __field( unsigned, write )
  370. __field( u32, ecx )
  371. __field( u64, data )
  372. __field( u8, exception )
  373. ),
  374. TP_fast_assign(
  375. __entry->write = write;
  376. __entry->ecx = ecx;
  377. __entry->data = data;
  378. __entry->exception = exception;
  379. ),
  380. TP_printk("msr_%s %x = 0x%llx%s",
  381. __entry->write ? "write" : "read",
  382. __entry->ecx, __entry->data,
  383. __entry->exception ? " (#GP)" : "")
  384. );
  385. #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
  386. #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
  387. #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
  388. #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
  389. /*
  390. * Tracepoint for guest CR access.
  391. */
  392. TRACE_EVENT(kvm_cr,
  393. TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
  394. TP_ARGS(rw, cr, val),
  395. TP_STRUCT__entry(
  396. __field( unsigned int, rw )
  397. __field( unsigned int, cr )
  398. __field( unsigned long, val )
  399. ),
  400. TP_fast_assign(
  401. __entry->rw = rw;
  402. __entry->cr = cr;
  403. __entry->val = val;
  404. ),
  405. TP_printk("cr_%s %x = 0x%lx",
  406. __entry->rw ? "write" : "read",
  407. __entry->cr, __entry->val)
  408. );
  409. #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
  410. #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
  411. TRACE_EVENT(kvm_pic_set_irq,
  412. TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
  413. TP_ARGS(chip, pin, elcr, imr, coalesced),
  414. TP_STRUCT__entry(
  415. __field( __u8, chip )
  416. __field( __u8, pin )
  417. __field( __u8, elcr )
  418. __field( __u8, imr )
  419. __field( bool, coalesced )
  420. ),
  421. TP_fast_assign(
  422. __entry->chip = chip;
  423. __entry->pin = pin;
  424. __entry->elcr = elcr;
  425. __entry->imr = imr;
  426. __entry->coalesced = coalesced;
  427. ),
  428. TP_printk("chip %u pin %u (%s%s)%s",
  429. __entry->chip, __entry->pin,
  430. (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
  431. (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
  432. __entry->coalesced ? " (coalesced)" : "")
  433. );
  434. #define kvm_apic_dst_shorthand \
  435. {0x0, "dst"}, \
  436. {0x1, "self"}, \
  437. {0x2, "all"}, \
  438. {0x3, "all-but-self"}
  439. TRACE_EVENT(kvm_apic_ipi,
  440. TP_PROTO(__u32 icr_low, __u32 dest_id),
  441. TP_ARGS(icr_low, dest_id),
  442. TP_STRUCT__entry(
  443. __field( __u32, icr_low )
  444. __field( __u32, dest_id )
  445. ),
  446. TP_fast_assign(
  447. __entry->icr_low = icr_low;
  448. __entry->dest_id = dest_id;
  449. ),
  450. TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
  451. __entry->dest_id, (u8)__entry->icr_low,
  452. __print_symbolic((__entry->icr_low >> 8 & 0x7),
  453. kvm_deliver_mode),
  454. (__entry->icr_low & (1<<11)) ? "logical" : "physical",
  455. (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
  456. (__entry->icr_low & (1<<15)) ? "level" : "edge",
  457. __print_symbolic((__entry->icr_low >> 18 & 0x3),
  458. kvm_apic_dst_shorthand))
  459. );
  460. TRACE_EVENT(kvm_apic_accept_irq,
  461. TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
  462. TP_ARGS(apicid, dm, tm, vec),
  463. TP_STRUCT__entry(
  464. __field( __u32, apicid )
  465. __field( __u16, dm )
  466. __field( __u16, tm )
  467. __field( __u8, vec )
  468. ),
  469. TP_fast_assign(
  470. __entry->apicid = apicid;
  471. __entry->dm = dm;
  472. __entry->tm = tm;
  473. __entry->vec = vec;
  474. ),
  475. TP_printk("apicid %x vec %u (%s|%s)",
  476. __entry->apicid, __entry->vec,
  477. __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
  478. __entry->tm ? "level" : "edge")
  479. );
  480. TRACE_EVENT(kvm_eoi,
  481. TP_PROTO(struct kvm_lapic *apic, int vector),
  482. TP_ARGS(apic, vector),
  483. TP_STRUCT__entry(
  484. __field( __u32, apicid )
  485. __field( int, vector )
  486. ),
  487. TP_fast_assign(
  488. __entry->apicid = apic->vcpu->vcpu_id;
  489. __entry->vector = vector;
  490. ),
  491. TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
  492. );
  493. TRACE_EVENT(kvm_pv_eoi,
  494. TP_PROTO(struct kvm_lapic *apic, int vector),
  495. TP_ARGS(apic, vector),
  496. TP_STRUCT__entry(
  497. __field( __u32, apicid )
  498. __field( int, vector )
  499. ),
  500. TP_fast_assign(
  501. __entry->apicid = apic->vcpu->vcpu_id;
  502. __entry->vector = vector;
  503. ),
  504. TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
  505. );
  506. /*
  507. * Tracepoint for nested VMRUN
  508. */
  509. TRACE_EVENT(kvm_nested_vmenter,
  510. TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
  511. __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd,
  512. __u64 guest_cr3, __u32 isa),
  513. TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled,
  514. guest_tdp_pgd, guest_cr3, isa),
  515. TP_STRUCT__entry(
  516. __field( __u64, rip )
  517. __field( __u64, vmcb )
  518. __field( __u64, nested_rip )
  519. __field( __u32, int_ctl )
  520. __field( __u32, event_inj )
  521. __field( bool, tdp_enabled )
  522. __field( __u64, guest_pgd )
  523. __field( __u32, isa )
  524. ),
  525. TP_fast_assign(
  526. __entry->rip = rip;
  527. __entry->vmcb = vmcb;
  528. __entry->nested_rip = nested_rip;
  529. __entry->int_ctl = int_ctl;
  530. __entry->event_inj = event_inj;
  531. __entry->tdp_enabled = tdp_enabled;
  532. __entry->guest_pgd = tdp_enabled ? guest_tdp_pgd : guest_cr3;
  533. __entry->isa = isa;
  534. ),
  535. TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx "
  536. "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx",
  537. __entry->rip,
  538. __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb",
  539. __entry->vmcb,
  540. __entry->nested_rip,
  541. __entry->int_ctl,
  542. __entry->event_inj,
  543. __entry->isa == KVM_ISA_VMX ? "ept" : "npt",
  544. __entry->tdp_enabled ? "y" : "n",
  545. !__entry->tdp_enabled ? "guest_cr3" :
  546. __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3",
  547. __entry->guest_pgd)
  548. );
  549. TRACE_EVENT(kvm_nested_intercepts,
  550. TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions,
  551. __u32 intercept1, __u32 intercept2, __u32 intercept3),
  552. TP_ARGS(cr_read, cr_write, exceptions, intercept1,
  553. intercept2, intercept3),
  554. TP_STRUCT__entry(
  555. __field( __u16, cr_read )
  556. __field( __u16, cr_write )
  557. __field( __u32, exceptions )
  558. __field( __u32, intercept1 )
  559. __field( __u32, intercept2 )
  560. __field( __u32, intercept3 )
  561. ),
  562. TP_fast_assign(
  563. __entry->cr_read = cr_read;
  564. __entry->cr_write = cr_write;
  565. __entry->exceptions = exceptions;
  566. __entry->intercept1 = intercept1;
  567. __entry->intercept2 = intercept2;
  568. __entry->intercept3 = intercept3;
  569. ),
  570. TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
  571. "intercepts: %08x %08x %08x",
  572. __entry->cr_read, __entry->cr_write, __entry->exceptions,
  573. __entry->intercept1, __entry->intercept2, __entry->intercept3)
  574. );
  575. /*
  576. * Tracepoint for #VMEXIT while nested
  577. */
  578. TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit);
  579. /*
  580. * Tracepoint for #VMEXIT reinjected to the guest
  581. */
  582. TRACE_EVENT(kvm_nested_vmexit_inject,
  583. TP_PROTO(__u32 exit_code,
  584. __u64 exit_info1, __u64 exit_info2,
  585. __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
  586. TP_ARGS(exit_code, exit_info1, exit_info2,
  587. exit_int_info, exit_int_info_err, isa),
  588. TP_STRUCT__entry(
  589. __field( __u32, exit_code )
  590. __field( __u64, exit_info1 )
  591. __field( __u64, exit_info2 )
  592. __field( __u32, exit_int_info )
  593. __field( __u32, exit_int_info_err )
  594. __field( __u32, isa )
  595. ),
  596. TP_fast_assign(
  597. __entry->exit_code = exit_code;
  598. __entry->exit_info1 = exit_info1;
  599. __entry->exit_info2 = exit_info2;
  600. __entry->exit_int_info = exit_int_info;
  601. __entry->exit_int_info_err = exit_int_info_err;
  602. __entry->isa = isa;
  603. ),
  604. TP_printk("reason: %s%s%s ext_inf1: 0x%016llx "
  605. "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
  606. kvm_print_exit_reason(__entry->exit_code, __entry->isa),
  607. __entry->exit_info1, __entry->exit_info2,
  608. __entry->exit_int_info, __entry->exit_int_info_err)
  609. );
  610. /*
  611. * Tracepoint for nested #vmexit because of interrupt pending
  612. */
  613. TRACE_EVENT(kvm_nested_intr_vmexit,
  614. TP_PROTO(__u64 rip),
  615. TP_ARGS(rip),
  616. TP_STRUCT__entry(
  617. __field( __u64, rip )
  618. ),
  619. TP_fast_assign(
  620. __entry->rip = rip
  621. ),
  622. TP_printk("rip: 0x%016llx", __entry->rip)
  623. );
  624. /*
  625. * Tracepoint for nested #vmexit because of interrupt pending
  626. */
  627. TRACE_EVENT(kvm_invlpga,
  628. TP_PROTO(__u64 rip, unsigned int asid, u64 address),
  629. TP_ARGS(rip, asid, address),
  630. TP_STRUCT__entry(
  631. __field( __u64, rip )
  632. __field( unsigned int, asid )
  633. __field( __u64, address )
  634. ),
  635. TP_fast_assign(
  636. __entry->rip = rip;
  637. __entry->asid = asid;
  638. __entry->address = address;
  639. ),
  640. TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
  641. __entry->rip, __entry->asid, __entry->address)
  642. );
  643. /*
  644. * Tracepoint for nested #vmexit because of interrupt pending
  645. */
  646. TRACE_EVENT(kvm_skinit,
  647. TP_PROTO(__u64 rip, __u32 slb),
  648. TP_ARGS(rip, slb),
  649. TP_STRUCT__entry(
  650. __field( __u64, rip )
  651. __field( __u32, slb )
  652. ),
  653. TP_fast_assign(
  654. __entry->rip = rip;
  655. __entry->slb = slb;
  656. ),
  657. TP_printk("rip: 0x%016llx slb: 0x%08x",
  658. __entry->rip, __entry->slb)
  659. );
  660. #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
  661. #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
  662. #define KVM_EMUL_INSN_F_CS_D (1 << 2)
  663. #define KVM_EMUL_INSN_F_CS_L (1 << 3)
  664. #define kvm_trace_symbol_emul_flags \
  665. { 0, "real" }, \
  666. { KVM_EMUL_INSN_F_CR0_PE \
  667. | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
  668. { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
  669. { KVM_EMUL_INSN_F_CR0_PE \
  670. | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
  671. { KVM_EMUL_INSN_F_CR0_PE \
  672. | KVM_EMUL_INSN_F_CS_L, "prot64" }
  673. #define kei_decode_mode(mode) ({ \
  674. u8 flags = 0xff; \
  675. switch (mode) { \
  676. case X86EMUL_MODE_REAL: \
  677. flags = 0; \
  678. break; \
  679. case X86EMUL_MODE_VM86: \
  680. flags = KVM_EMUL_INSN_F_EFL_VM; \
  681. break; \
  682. case X86EMUL_MODE_PROT16: \
  683. flags = KVM_EMUL_INSN_F_CR0_PE; \
  684. break; \
  685. case X86EMUL_MODE_PROT32: \
  686. flags = KVM_EMUL_INSN_F_CR0_PE \
  687. | KVM_EMUL_INSN_F_CS_D; \
  688. break; \
  689. case X86EMUL_MODE_PROT64: \
  690. flags = KVM_EMUL_INSN_F_CR0_PE \
  691. | KVM_EMUL_INSN_F_CS_L; \
  692. break; \
  693. } \
  694. flags; \
  695. })
  696. TRACE_EVENT(kvm_emulate_insn,
  697. TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
  698. TP_ARGS(vcpu, failed),
  699. TP_STRUCT__entry(
  700. __field( __u64, rip )
  701. __field( __u32, csbase )
  702. __field( __u8, len )
  703. __array( __u8, insn, 15 )
  704. __field( __u8, flags )
  705. __field( __u8, failed )
  706. ),
  707. TP_fast_assign(
  708. __entry->csbase = kvm_x86_call(get_segment_base)(vcpu,
  709. VCPU_SREG_CS);
  710. __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
  711. - vcpu->arch.emulate_ctxt->fetch.data;
  712. __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
  713. memcpy(__entry->insn,
  714. vcpu->arch.emulate_ctxt->fetch.data,
  715. 15);
  716. __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
  717. __entry->failed = failed;
  718. ),
  719. TP_printk("%x:%llx:%s (%s)%s",
  720. __entry->csbase, __entry->rip,
  721. __print_hex(__entry->insn, __entry->len),
  722. __print_symbolic(__entry->flags,
  723. kvm_trace_symbol_emul_flags),
  724. __entry->failed ? " failed" : ""
  725. )
  726. );
  727. #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
  728. #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
  729. TRACE_EVENT(
  730. vcpu_match_mmio,
  731. TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
  732. TP_ARGS(gva, gpa, write, gpa_match),
  733. TP_STRUCT__entry(
  734. __field(gva_t, gva)
  735. __field(gpa_t, gpa)
  736. __field(bool, write)
  737. __field(bool, gpa_match)
  738. ),
  739. TP_fast_assign(
  740. __entry->gva = gva;
  741. __entry->gpa = gpa;
  742. __entry->write = write;
  743. __entry->gpa_match = gpa_match
  744. ),
  745. TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
  746. __entry->write ? "Write" : "Read",
  747. __entry->gpa_match ? "GPA" : "GVA")
  748. );
  749. TRACE_EVENT(kvm_write_tsc_offset,
  750. TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
  751. __u64 next_tsc_offset),
  752. TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
  753. TP_STRUCT__entry(
  754. __field( unsigned int, vcpu_id )
  755. __field( __u64, previous_tsc_offset )
  756. __field( __u64, next_tsc_offset )
  757. ),
  758. TP_fast_assign(
  759. __entry->vcpu_id = vcpu_id;
  760. __entry->previous_tsc_offset = previous_tsc_offset;
  761. __entry->next_tsc_offset = next_tsc_offset;
  762. ),
  763. TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
  764. __entry->previous_tsc_offset, __entry->next_tsc_offset)
  765. );
  766. #ifdef CONFIG_X86_64
  767. #define host_clocks \
  768. {VDSO_CLOCKMODE_NONE, "none"}, \
  769. {VDSO_CLOCKMODE_TSC, "tsc"} \
  770. TRACE_EVENT(kvm_update_master_clock,
  771. TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
  772. TP_ARGS(use_master_clock, host_clock, offset_matched),
  773. TP_STRUCT__entry(
  774. __field( bool, use_master_clock )
  775. __field( unsigned int, host_clock )
  776. __field( bool, offset_matched )
  777. ),
  778. TP_fast_assign(
  779. __entry->use_master_clock = use_master_clock;
  780. __entry->host_clock = host_clock;
  781. __entry->offset_matched = offset_matched;
  782. ),
  783. TP_printk("masterclock %d hostclock %s offsetmatched %u",
  784. __entry->use_master_clock,
  785. __print_symbolic(__entry->host_clock, host_clocks),
  786. __entry->offset_matched)
  787. );
  788. TRACE_EVENT(kvm_track_tsc,
  789. TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
  790. unsigned int online_vcpus, bool use_master_clock,
  791. unsigned int host_clock),
  792. TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
  793. host_clock),
  794. TP_STRUCT__entry(
  795. __field( unsigned int, vcpu_id )
  796. __field( unsigned int, nr_vcpus_matched_tsc )
  797. __field( unsigned int, online_vcpus )
  798. __field( bool, use_master_clock )
  799. __field( unsigned int, host_clock )
  800. ),
  801. TP_fast_assign(
  802. __entry->vcpu_id = vcpu_id;
  803. __entry->nr_vcpus_matched_tsc = nr_matched;
  804. __entry->online_vcpus = online_vcpus;
  805. __entry->use_master_clock = use_master_clock;
  806. __entry->host_clock = host_clock;
  807. ),
  808. TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
  809. " hostclock %s",
  810. __entry->vcpu_id, __entry->use_master_clock,
  811. __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
  812. __print_symbolic(__entry->host_clock, host_clocks))
  813. );
  814. #endif /* CONFIG_X86_64 */
  815. /*
  816. * Tracepoint for PML full VMEXIT.
  817. */
  818. TRACE_EVENT(kvm_pml_full,
  819. TP_PROTO(unsigned int vcpu_id),
  820. TP_ARGS(vcpu_id),
  821. TP_STRUCT__entry(
  822. __field( unsigned int, vcpu_id )
  823. ),
  824. TP_fast_assign(
  825. __entry->vcpu_id = vcpu_id;
  826. ),
  827. TP_printk("vcpu %d: PML full", __entry->vcpu_id)
  828. );
  829. TRACE_EVENT(kvm_ple_window_update,
  830. TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old),
  831. TP_ARGS(vcpu_id, new, old),
  832. TP_STRUCT__entry(
  833. __field( unsigned int, vcpu_id )
  834. __field( unsigned int, new )
  835. __field( unsigned int, old )
  836. ),
  837. TP_fast_assign(
  838. __entry->vcpu_id = vcpu_id;
  839. __entry->new = new;
  840. __entry->old = old;
  841. ),
  842. TP_printk("vcpu %u old %u new %u (%s)",
  843. __entry->vcpu_id, __entry->old, __entry->new,
  844. __entry->old < __entry->new ? "growed" : "shrinked")
  845. );
  846. TRACE_EVENT(kvm_pvclock_update,
  847. TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
  848. TP_ARGS(vcpu_id, pvclock),
  849. TP_STRUCT__entry(
  850. __field( unsigned int, vcpu_id )
  851. __field( __u32, version )
  852. __field( __u64, tsc_timestamp )
  853. __field( __u64, system_time )
  854. __field( __u32, tsc_to_system_mul )
  855. __field( __s8, tsc_shift )
  856. __field( __u8, flags )
  857. ),
  858. TP_fast_assign(
  859. __entry->vcpu_id = vcpu_id;
  860. __entry->version = pvclock->version;
  861. __entry->tsc_timestamp = pvclock->tsc_timestamp;
  862. __entry->system_time = pvclock->system_time;
  863. __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
  864. __entry->tsc_shift = pvclock->tsc_shift;
  865. __entry->flags = pvclock->flags;
  866. ),
  867. TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
  868. "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
  869. "flags 0x%x }",
  870. __entry->vcpu_id,
  871. __entry->version,
  872. __entry->tsc_timestamp,
  873. __entry->system_time,
  874. __entry->tsc_to_system_mul,
  875. __entry->tsc_shift,
  876. __entry->flags)
  877. );
  878. TRACE_EVENT(kvm_wait_lapic_expire,
  879. TP_PROTO(unsigned int vcpu_id, s64 delta),
  880. TP_ARGS(vcpu_id, delta),
  881. TP_STRUCT__entry(
  882. __field( unsigned int, vcpu_id )
  883. __field( s64, delta )
  884. ),
  885. TP_fast_assign(
  886. __entry->vcpu_id = vcpu_id;
  887. __entry->delta = delta;
  888. ),
  889. TP_printk("vcpu %u: delta %lld (%s)",
  890. __entry->vcpu_id,
  891. __entry->delta,
  892. __entry->delta < 0 ? "early" : "late")
  893. );
  894. TRACE_EVENT(kvm_smm_transition,
  895. TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
  896. TP_ARGS(vcpu_id, smbase, entering),
  897. TP_STRUCT__entry(
  898. __field( unsigned int, vcpu_id )
  899. __field( u64, smbase )
  900. __field( bool, entering )
  901. ),
  902. TP_fast_assign(
  903. __entry->vcpu_id = vcpu_id;
  904. __entry->smbase = smbase;
  905. __entry->entering = entering;
  906. ),
  907. TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
  908. __entry->vcpu_id,
  909. __entry->entering ? "entering" : "leaving",
  910. __entry->smbase)
  911. );
  912. /*
  913. * Tracepoint for VT-d posted-interrupts and AMD-Vi Guest Virtual APIC.
  914. */
  915. TRACE_EVENT(kvm_pi_irte_update,
  916. TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
  917. unsigned int gsi, unsigned int gvec,
  918. u64 pi_desc_addr, bool set),
  919. TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
  920. TP_STRUCT__entry(
  921. __field( unsigned int, host_irq )
  922. __field( unsigned int, vcpu_id )
  923. __field( unsigned int, gsi )
  924. __field( unsigned int, gvec )
  925. __field( u64, pi_desc_addr )
  926. __field( bool, set )
  927. ),
  928. TP_fast_assign(
  929. __entry->host_irq = host_irq;
  930. __entry->vcpu_id = vcpu_id;
  931. __entry->gsi = gsi;
  932. __entry->gvec = gvec;
  933. __entry->pi_desc_addr = pi_desc_addr;
  934. __entry->set = set;
  935. ),
  936. TP_printk("PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
  937. "gvec: 0x%x, pi_desc_addr: 0x%llx",
  938. __entry->set ? "enabled and being updated" : "disabled",
  939. __entry->host_irq,
  940. __entry->vcpu_id,
  941. __entry->gsi,
  942. __entry->gvec,
  943. __entry->pi_desc_addr)
  944. );
  945. /*
  946. * Tracepoint for kvm_hv_notify_acked_sint.
  947. */
  948. TRACE_EVENT(kvm_hv_notify_acked_sint,
  949. TP_PROTO(int vcpu_id, u32 sint),
  950. TP_ARGS(vcpu_id, sint),
  951. TP_STRUCT__entry(
  952. __field(int, vcpu_id)
  953. __field(u32, sint)
  954. ),
  955. TP_fast_assign(
  956. __entry->vcpu_id = vcpu_id;
  957. __entry->sint = sint;
  958. ),
  959. TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
  960. );
  961. /*
  962. * Tracepoint for synic_set_irq.
  963. */
  964. TRACE_EVENT(kvm_hv_synic_set_irq,
  965. TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
  966. TP_ARGS(vcpu_id, sint, vector, ret),
  967. TP_STRUCT__entry(
  968. __field(int, vcpu_id)
  969. __field(u32, sint)
  970. __field(int, vector)
  971. __field(int, ret)
  972. ),
  973. TP_fast_assign(
  974. __entry->vcpu_id = vcpu_id;
  975. __entry->sint = sint;
  976. __entry->vector = vector;
  977. __entry->ret = ret;
  978. ),
  979. TP_printk("vcpu_id %d sint %u vector %d ret %d",
  980. __entry->vcpu_id, __entry->sint, __entry->vector,
  981. __entry->ret)
  982. );
  983. /*
  984. * Tracepoint for kvm_hv_synic_send_eoi.
  985. */
  986. TRACE_EVENT(kvm_hv_synic_send_eoi,
  987. TP_PROTO(int vcpu_id, int vector),
  988. TP_ARGS(vcpu_id, vector),
  989. TP_STRUCT__entry(
  990. __field(int, vcpu_id)
  991. __field(u32, sint)
  992. __field(int, vector)
  993. __field(int, ret)
  994. ),
  995. TP_fast_assign(
  996. __entry->vcpu_id = vcpu_id;
  997. __entry->vector = vector;
  998. ),
  999. TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
  1000. );
  1001. /*
  1002. * Tracepoint for synic_set_msr.
  1003. */
  1004. TRACE_EVENT(kvm_hv_synic_set_msr,
  1005. TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
  1006. TP_ARGS(vcpu_id, msr, data, host),
  1007. TP_STRUCT__entry(
  1008. __field(int, vcpu_id)
  1009. __field(u32, msr)
  1010. __field(u64, data)
  1011. __field(bool, host)
  1012. ),
  1013. TP_fast_assign(
  1014. __entry->vcpu_id = vcpu_id;
  1015. __entry->msr = msr;
  1016. __entry->data = data;
  1017. __entry->host = host
  1018. ),
  1019. TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
  1020. __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
  1021. );
  1022. /*
  1023. * Tracepoint for stimer_set_config.
  1024. */
  1025. TRACE_EVENT(kvm_hv_stimer_set_config,
  1026. TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
  1027. TP_ARGS(vcpu_id, timer_index, config, host),
  1028. TP_STRUCT__entry(
  1029. __field(int, vcpu_id)
  1030. __field(int, timer_index)
  1031. __field(u64, config)
  1032. __field(bool, host)
  1033. ),
  1034. TP_fast_assign(
  1035. __entry->vcpu_id = vcpu_id;
  1036. __entry->timer_index = timer_index;
  1037. __entry->config = config;
  1038. __entry->host = host;
  1039. ),
  1040. TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
  1041. __entry->vcpu_id, __entry->timer_index, __entry->config,
  1042. __entry->host)
  1043. );
  1044. /*
  1045. * Tracepoint for stimer_set_count.
  1046. */
  1047. TRACE_EVENT(kvm_hv_stimer_set_count,
  1048. TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
  1049. TP_ARGS(vcpu_id, timer_index, count, host),
  1050. TP_STRUCT__entry(
  1051. __field(int, vcpu_id)
  1052. __field(int, timer_index)
  1053. __field(u64, count)
  1054. __field(bool, host)
  1055. ),
  1056. TP_fast_assign(
  1057. __entry->vcpu_id = vcpu_id;
  1058. __entry->timer_index = timer_index;
  1059. __entry->count = count;
  1060. __entry->host = host;
  1061. ),
  1062. TP_printk("vcpu_id %d timer %d count %llu host %d",
  1063. __entry->vcpu_id, __entry->timer_index, __entry->count,
  1064. __entry->host)
  1065. );
  1066. /*
  1067. * Tracepoint for stimer_start(periodic timer case).
  1068. */
  1069. TRACE_EVENT(kvm_hv_stimer_start_periodic,
  1070. TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
  1071. TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
  1072. TP_STRUCT__entry(
  1073. __field(int, vcpu_id)
  1074. __field(int, timer_index)
  1075. __field(u64, time_now)
  1076. __field(u64, exp_time)
  1077. ),
  1078. TP_fast_assign(
  1079. __entry->vcpu_id = vcpu_id;
  1080. __entry->timer_index = timer_index;
  1081. __entry->time_now = time_now;
  1082. __entry->exp_time = exp_time;
  1083. ),
  1084. TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
  1085. __entry->vcpu_id, __entry->timer_index, __entry->time_now,
  1086. __entry->exp_time)
  1087. );
  1088. /*
  1089. * Tracepoint for stimer_start(one-shot timer case).
  1090. */
  1091. TRACE_EVENT(kvm_hv_stimer_start_one_shot,
  1092. TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
  1093. TP_ARGS(vcpu_id, timer_index, time_now, count),
  1094. TP_STRUCT__entry(
  1095. __field(int, vcpu_id)
  1096. __field(int, timer_index)
  1097. __field(u64, time_now)
  1098. __field(u64, count)
  1099. ),
  1100. TP_fast_assign(
  1101. __entry->vcpu_id = vcpu_id;
  1102. __entry->timer_index = timer_index;
  1103. __entry->time_now = time_now;
  1104. __entry->count = count;
  1105. ),
  1106. TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
  1107. __entry->vcpu_id, __entry->timer_index, __entry->time_now,
  1108. __entry->count)
  1109. );
  1110. /*
  1111. * Tracepoint for stimer_timer_callback.
  1112. */
  1113. TRACE_EVENT(kvm_hv_stimer_callback,
  1114. TP_PROTO(int vcpu_id, int timer_index),
  1115. TP_ARGS(vcpu_id, timer_index),
  1116. TP_STRUCT__entry(
  1117. __field(int, vcpu_id)
  1118. __field(int, timer_index)
  1119. ),
  1120. TP_fast_assign(
  1121. __entry->vcpu_id = vcpu_id;
  1122. __entry->timer_index = timer_index;
  1123. ),
  1124. TP_printk("vcpu_id %d timer %d",
  1125. __entry->vcpu_id, __entry->timer_index)
  1126. );
  1127. /*
  1128. * Tracepoint for stimer_expiration.
  1129. */
  1130. TRACE_EVENT(kvm_hv_stimer_expiration,
  1131. TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
  1132. TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
  1133. TP_STRUCT__entry(
  1134. __field(int, vcpu_id)
  1135. __field(int, timer_index)
  1136. __field(int, direct)
  1137. __field(int, msg_send_result)
  1138. ),
  1139. TP_fast_assign(
  1140. __entry->vcpu_id = vcpu_id;
  1141. __entry->timer_index = timer_index;
  1142. __entry->direct = direct;
  1143. __entry->msg_send_result = msg_send_result;
  1144. ),
  1145. TP_printk("vcpu_id %d timer %d direct %d send result %d",
  1146. __entry->vcpu_id, __entry->timer_index,
  1147. __entry->direct, __entry->msg_send_result)
  1148. );
  1149. /*
  1150. * Tracepoint for stimer_cleanup.
  1151. */
  1152. TRACE_EVENT(kvm_hv_stimer_cleanup,
  1153. TP_PROTO(int vcpu_id, int timer_index),
  1154. TP_ARGS(vcpu_id, timer_index),
  1155. TP_STRUCT__entry(
  1156. __field(int, vcpu_id)
  1157. __field(int, timer_index)
  1158. ),
  1159. TP_fast_assign(
  1160. __entry->vcpu_id = vcpu_id;
  1161. __entry->timer_index = timer_index;
  1162. ),
  1163. TP_printk("vcpu_id %d timer %d",
  1164. __entry->vcpu_id, __entry->timer_index)
  1165. );
  1166. #define kvm_print_apicv_inhibit_reasons(inhibits) \
  1167. (inhibits), (inhibits) ? " " : "", \
  1168. (inhibits) ? __print_flags(inhibits, "|", APICV_INHIBIT_REASONS) : ""
  1169. TRACE_EVENT(kvm_apicv_inhibit_changed,
  1170. TP_PROTO(int reason, bool set, unsigned long inhibits),
  1171. TP_ARGS(reason, set, inhibits),
  1172. TP_STRUCT__entry(
  1173. __field(int, reason)
  1174. __field(bool, set)
  1175. __field(unsigned long, inhibits)
  1176. ),
  1177. TP_fast_assign(
  1178. __entry->reason = reason;
  1179. __entry->set = set;
  1180. __entry->inhibits = inhibits;
  1181. ),
  1182. TP_printk("%s reason=%u, inhibits=0x%lx%s%s",
  1183. __entry->set ? "set" : "cleared",
  1184. __entry->reason,
  1185. kvm_print_apicv_inhibit_reasons(__entry->inhibits))
  1186. );
  1187. TRACE_EVENT(kvm_apicv_accept_irq,
  1188. TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
  1189. TP_ARGS(apicid, dm, tm, vec),
  1190. TP_STRUCT__entry(
  1191. __field( __u32, apicid )
  1192. __field( __u16, dm )
  1193. __field( __u16, tm )
  1194. __field( __u8, vec )
  1195. ),
  1196. TP_fast_assign(
  1197. __entry->apicid = apicid;
  1198. __entry->dm = dm;
  1199. __entry->tm = tm;
  1200. __entry->vec = vec;
  1201. ),
  1202. TP_printk("apicid %x vec %u (%s|%s)",
  1203. __entry->apicid, __entry->vec,
  1204. __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
  1205. __entry->tm ? "level" : "edge")
  1206. );
  1207. /*
  1208. * Tracepoint for AMD AVIC
  1209. */
  1210. TRACE_EVENT(kvm_avic_incomplete_ipi,
  1211. TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
  1212. TP_ARGS(vcpu, icrh, icrl, id, index),
  1213. TP_STRUCT__entry(
  1214. __field(u32, vcpu)
  1215. __field(u32, icrh)
  1216. __field(u32, icrl)
  1217. __field(u32, id)
  1218. __field(u32, index)
  1219. ),
  1220. TP_fast_assign(
  1221. __entry->vcpu = vcpu;
  1222. __entry->icrh = icrh;
  1223. __entry->icrl = icrl;
  1224. __entry->id = id;
  1225. __entry->index = index;
  1226. ),
  1227. TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u",
  1228. __entry->vcpu, __entry->icrh, __entry->icrl,
  1229. __entry->id, __entry->index)
  1230. );
  1231. TRACE_EVENT(kvm_avic_unaccelerated_access,
  1232. TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
  1233. TP_ARGS(vcpu, offset, ft, rw, vec),
  1234. TP_STRUCT__entry(
  1235. __field(u32, vcpu)
  1236. __field(u32, offset)
  1237. __field(bool, ft)
  1238. __field(bool, rw)
  1239. __field(u32, vec)
  1240. ),
  1241. TP_fast_assign(
  1242. __entry->vcpu = vcpu;
  1243. __entry->offset = offset;
  1244. __entry->ft = ft;
  1245. __entry->rw = rw;
  1246. __entry->vec = vec;
  1247. ),
  1248. TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x",
  1249. __entry->vcpu,
  1250. __entry->offset,
  1251. __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
  1252. __entry->ft ? "trap" : "fault",
  1253. __entry->rw ? "write" : "read",
  1254. __entry->vec)
  1255. );
  1256. TRACE_EVENT(kvm_avic_ga_log,
  1257. TP_PROTO(u32 vmid, u32 vcpuid),
  1258. TP_ARGS(vmid, vcpuid),
  1259. TP_STRUCT__entry(
  1260. __field(u32, vmid)
  1261. __field(u32, vcpuid)
  1262. ),
  1263. TP_fast_assign(
  1264. __entry->vmid = vmid;
  1265. __entry->vcpuid = vcpuid;
  1266. ),
  1267. TP_printk("vmid=%u, vcpuid=%u",
  1268. __entry->vmid, __entry->vcpuid)
  1269. );
  1270. TRACE_EVENT(kvm_avic_kick_vcpu_slowpath,
  1271. TP_PROTO(u32 icrh, u32 icrl, u32 index),
  1272. TP_ARGS(icrh, icrl, index),
  1273. TP_STRUCT__entry(
  1274. __field(u32, icrh)
  1275. __field(u32, icrl)
  1276. __field(u32, index)
  1277. ),
  1278. TP_fast_assign(
  1279. __entry->icrh = icrh;
  1280. __entry->icrl = icrl;
  1281. __entry->index = index;
  1282. ),
  1283. TP_printk("icrh:icrl=%#08x:%08x, index=%u",
  1284. __entry->icrh, __entry->icrl, __entry->index)
  1285. );
  1286. TRACE_EVENT(kvm_avic_doorbell,
  1287. TP_PROTO(u32 vcpuid, u32 apicid),
  1288. TP_ARGS(vcpuid, apicid),
  1289. TP_STRUCT__entry(
  1290. __field(u32, vcpuid)
  1291. __field(u32, apicid)
  1292. ),
  1293. TP_fast_assign(
  1294. __entry->vcpuid = vcpuid;
  1295. __entry->apicid = apicid;
  1296. ),
  1297. TP_printk("vcpuid=%u, apicid=%u",
  1298. __entry->vcpuid, __entry->apicid)
  1299. );
  1300. TRACE_EVENT(kvm_hv_timer_state,
  1301. TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
  1302. TP_ARGS(vcpu_id, hv_timer_in_use),
  1303. TP_STRUCT__entry(
  1304. __field(unsigned int, vcpu_id)
  1305. __field(unsigned int, hv_timer_in_use)
  1306. ),
  1307. TP_fast_assign(
  1308. __entry->vcpu_id = vcpu_id;
  1309. __entry->hv_timer_in_use = hv_timer_in_use;
  1310. ),
  1311. TP_printk("vcpu_id %x hv_timer %x",
  1312. __entry->vcpu_id,
  1313. __entry->hv_timer_in_use)
  1314. );
  1315. /*
  1316. * Tracepoint for kvm_hv_flush_tlb.
  1317. */
  1318. TRACE_EVENT(kvm_hv_flush_tlb,
  1319. TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode),
  1320. TP_ARGS(processor_mask, address_space, flags, guest_mode),
  1321. TP_STRUCT__entry(
  1322. __field(u64, processor_mask)
  1323. __field(u64, address_space)
  1324. __field(u64, flags)
  1325. __field(bool, guest_mode)
  1326. ),
  1327. TP_fast_assign(
  1328. __entry->processor_mask = processor_mask;
  1329. __entry->address_space = address_space;
  1330. __entry->flags = flags;
  1331. __entry->guest_mode = guest_mode;
  1332. ),
  1333. TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s",
  1334. __entry->processor_mask, __entry->address_space,
  1335. __entry->flags, __entry->guest_mode ? "(L2)" : "")
  1336. );
  1337. /*
  1338. * Tracepoint for kvm_hv_flush_tlb_ex.
  1339. */
  1340. TRACE_EVENT(kvm_hv_flush_tlb_ex,
  1341. TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode),
  1342. TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode),
  1343. TP_STRUCT__entry(
  1344. __field(u64, valid_bank_mask)
  1345. __field(u64, format)
  1346. __field(u64, address_space)
  1347. __field(u64, flags)
  1348. __field(bool, guest_mode)
  1349. ),
  1350. TP_fast_assign(
  1351. __entry->valid_bank_mask = valid_bank_mask;
  1352. __entry->format = format;
  1353. __entry->address_space = address_space;
  1354. __entry->flags = flags;
  1355. __entry->guest_mode = guest_mode;
  1356. ),
  1357. TP_printk("valid_bank_mask 0x%llx format 0x%llx "
  1358. "address_space 0x%llx flags 0x%llx %s",
  1359. __entry->valid_bank_mask, __entry->format,
  1360. __entry->address_space, __entry->flags,
  1361. __entry->guest_mode ? "(L2)" : "")
  1362. );
  1363. /*
  1364. * Tracepoints for kvm_hv_send_ipi.
  1365. */
  1366. TRACE_EVENT(kvm_hv_send_ipi,
  1367. TP_PROTO(u32 vector, u64 processor_mask),
  1368. TP_ARGS(vector, processor_mask),
  1369. TP_STRUCT__entry(
  1370. __field(u32, vector)
  1371. __field(u64, processor_mask)
  1372. ),
  1373. TP_fast_assign(
  1374. __entry->vector = vector;
  1375. __entry->processor_mask = processor_mask;
  1376. ),
  1377. TP_printk("vector %x processor_mask 0x%llx",
  1378. __entry->vector, __entry->processor_mask)
  1379. );
  1380. TRACE_EVENT(kvm_hv_send_ipi_ex,
  1381. TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
  1382. TP_ARGS(vector, format, valid_bank_mask),
  1383. TP_STRUCT__entry(
  1384. __field(u32, vector)
  1385. __field(u64, format)
  1386. __field(u64, valid_bank_mask)
  1387. ),
  1388. TP_fast_assign(
  1389. __entry->vector = vector;
  1390. __entry->format = format;
  1391. __entry->valid_bank_mask = valid_bank_mask;
  1392. ),
  1393. TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
  1394. __entry->vector, __entry->format,
  1395. __entry->valid_bank_mask)
  1396. );
  1397. TRACE_EVENT(kvm_pv_tlb_flush,
  1398. TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb),
  1399. TP_ARGS(vcpu_id, need_flush_tlb),
  1400. TP_STRUCT__entry(
  1401. __field( unsigned int, vcpu_id )
  1402. __field( bool, need_flush_tlb )
  1403. ),
  1404. TP_fast_assign(
  1405. __entry->vcpu_id = vcpu_id;
  1406. __entry->need_flush_tlb = need_flush_tlb;
  1407. ),
  1408. TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id,
  1409. __entry->need_flush_tlb ? "true" : "false")
  1410. );
  1411. /*
  1412. * Tracepoint for failed nested VMX VM-Enter.
  1413. */
  1414. TRACE_EVENT(kvm_nested_vmenter_failed,
  1415. TP_PROTO(const char *msg, u32 err),
  1416. TP_ARGS(msg, err),
  1417. TP_STRUCT__entry(
  1418. __string(msg, msg)
  1419. __field(u32, err)
  1420. ),
  1421. TP_fast_assign(
  1422. __assign_str(msg);
  1423. __entry->err = err;
  1424. ),
  1425. TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
  1426. __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
  1427. );
  1428. /*
  1429. * Tracepoint for syndbg_set_msr.
  1430. */
  1431. TRACE_EVENT(kvm_hv_syndbg_set_msr,
  1432. TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
  1433. TP_ARGS(vcpu_id, vp_index, msr, data),
  1434. TP_STRUCT__entry(
  1435. __field(int, vcpu_id)
  1436. __field(u32, vp_index)
  1437. __field(u32, msr)
  1438. __field(u64, data)
  1439. ),
  1440. TP_fast_assign(
  1441. __entry->vcpu_id = vcpu_id;
  1442. __entry->vp_index = vp_index;
  1443. __entry->msr = msr;
  1444. __entry->data = data;
  1445. ),
  1446. TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
  1447. __entry->vcpu_id, __entry->vp_index, __entry->msr,
  1448. __entry->data)
  1449. );
  1450. /*
  1451. * Tracepoint for syndbg_get_msr.
  1452. */
  1453. TRACE_EVENT(kvm_hv_syndbg_get_msr,
  1454. TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data),
  1455. TP_ARGS(vcpu_id, vp_index, msr, data),
  1456. TP_STRUCT__entry(
  1457. __field(int, vcpu_id)
  1458. __field(u32, vp_index)
  1459. __field(u32, msr)
  1460. __field(u64, data)
  1461. ),
  1462. TP_fast_assign(
  1463. __entry->vcpu_id = vcpu_id;
  1464. __entry->vp_index = vp_index;
  1465. __entry->msr = msr;
  1466. __entry->data = data;
  1467. ),
  1468. TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx",
  1469. __entry->vcpu_id, __entry->vp_index, __entry->msr,
  1470. __entry->data)
  1471. );
  1472. /*
  1473. * Tracepoint for the start of VMGEXIT processing
  1474. */
  1475. TRACE_EVENT(kvm_vmgexit_enter,
  1476. TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
  1477. TP_ARGS(vcpu_id, ghcb),
  1478. TP_STRUCT__entry(
  1479. __field(unsigned int, vcpu_id)
  1480. __field(u64, exit_reason)
  1481. __field(u64, info1)
  1482. __field(u64, info2)
  1483. ),
  1484. TP_fast_assign(
  1485. __entry->vcpu_id = vcpu_id;
  1486. __entry->exit_reason = ghcb->save.sw_exit_code;
  1487. __entry->info1 = ghcb->save.sw_exit_info_1;
  1488. __entry->info2 = ghcb->save.sw_exit_info_2;
  1489. ),
  1490. TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
  1491. __entry->vcpu_id, __entry->exit_reason,
  1492. __entry->info1, __entry->info2)
  1493. );
  1494. /*
  1495. * Tracepoint for the end of VMGEXIT processing
  1496. */
  1497. TRACE_EVENT(kvm_vmgexit_exit,
  1498. TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb),
  1499. TP_ARGS(vcpu_id, ghcb),
  1500. TP_STRUCT__entry(
  1501. __field(unsigned int, vcpu_id)
  1502. __field(u64, exit_reason)
  1503. __field(u64, info1)
  1504. __field(u64, info2)
  1505. ),
  1506. TP_fast_assign(
  1507. __entry->vcpu_id = vcpu_id;
  1508. __entry->exit_reason = ghcb->save.sw_exit_code;
  1509. __entry->info1 = ghcb->save.sw_exit_info_1;
  1510. __entry->info2 = ghcb->save.sw_exit_info_2;
  1511. ),
  1512. TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx",
  1513. __entry->vcpu_id, __entry->exit_reason,
  1514. __entry->info1, __entry->info2)
  1515. );
  1516. /*
  1517. * Tracepoint for the start of VMGEXIT MSR procotol processing
  1518. */
  1519. TRACE_EVENT(kvm_vmgexit_msr_protocol_enter,
  1520. TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa),
  1521. TP_ARGS(vcpu_id, ghcb_gpa),
  1522. TP_STRUCT__entry(
  1523. __field(unsigned int, vcpu_id)
  1524. __field(u64, ghcb_gpa)
  1525. ),
  1526. TP_fast_assign(
  1527. __entry->vcpu_id = vcpu_id;
  1528. __entry->ghcb_gpa = ghcb_gpa;
  1529. ),
  1530. TP_printk("vcpu %u, ghcb_gpa %016llx",
  1531. __entry->vcpu_id, __entry->ghcb_gpa)
  1532. );
  1533. /*
  1534. * Tracepoint for the end of VMGEXIT MSR procotol processing
  1535. */
  1536. TRACE_EVENT(kvm_vmgexit_msr_protocol_exit,
  1537. TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result),
  1538. TP_ARGS(vcpu_id, ghcb_gpa, result),
  1539. TP_STRUCT__entry(
  1540. __field(unsigned int, vcpu_id)
  1541. __field(u64, ghcb_gpa)
  1542. __field(int, result)
  1543. ),
  1544. TP_fast_assign(
  1545. __entry->vcpu_id = vcpu_id;
  1546. __entry->ghcb_gpa = ghcb_gpa;
  1547. __entry->result = result;
  1548. ),
  1549. TP_printk("vcpu %u, ghcb_gpa %016llx, result %d",
  1550. __entry->vcpu_id, __entry->ghcb_gpa, __entry->result)
  1551. );
  1552. /*
  1553. * Tracepoint for #NPFs due to RMP faults.
  1554. */
  1555. TRACE_EVENT(kvm_rmp_fault,
  1556. TP_PROTO(struct kvm_vcpu *vcpu, u64 gpa, u64 pfn, u64 error_code,
  1557. int rmp_level, int psmash_ret),
  1558. TP_ARGS(vcpu, gpa, pfn, error_code, rmp_level, psmash_ret),
  1559. TP_STRUCT__entry(
  1560. __field(unsigned int, vcpu_id)
  1561. __field(u64, gpa)
  1562. __field(u64, pfn)
  1563. __field(u64, error_code)
  1564. __field(int, rmp_level)
  1565. __field(int, psmash_ret)
  1566. ),
  1567. TP_fast_assign(
  1568. __entry->vcpu_id = vcpu->vcpu_id;
  1569. __entry->gpa = gpa;
  1570. __entry->pfn = pfn;
  1571. __entry->error_code = error_code;
  1572. __entry->rmp_level = rmp_level;
  1573. __entry->psmash_ret = psmash_ret;
  1574. ),
  1575. TP_printk("vcpu %u gpa %016llx pfn 0x%llx error_code 0x%llx rmp_level %d psmash_ret %d",
  1576. __entry->vcpu_id, __entry->gpa, __entry->pfn,
  1577. __entry->error_code, __entry->rmp_level, __entry->psmash_ret)
  1578. );
  1579. #endif /* _TRACE_KVM_H */
  1580. #undef TRACE_INCLUDE_PATH
  1581. #define TRACE_INCLUDE_PATH ../../arch/x86/kvm
  1582. #undef TRACE_INCLUDE_FILE
  1583. #define TRACE_INCLUDE_FILE trace
  1584. /* This part must be outside protection */
  1585. #include <trace/define_trace.h>