trace.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4. */
  5. #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  6. #define _TRACE_KVM_H
  7. #include <linux/tracepoint.h>
  8. #include <asm/kvm_csr.h>
  9. #undef TRACE_SYSTEM
  10. #define TRACE_SYSTEM kvm
  11. /*
  12. * Tracepoints for VM enters
  13. */
  14. DECLARE_EVENT_CLASS(kvm_transition,
  15. TP_PROTO(struct kvm_vcpu *vcpu),
  16. TP_ARGS(vcpu),
  17. TP_STRUCT__entry(
  18. __field(unsigned int, vcpu_id)
  19. __field(unsigned long, pc)
  20. ),
  21. TP_fast_assign(
  22. __entry->vcpu_id = vcpu->vcpu_id;
  23. __entry->pc = vcpu->arch.pc;
  24. ),
  25. TP_printk("vcpu %u PC: 0x%08lx", __entry->vcpu_id, __entry->pc)
  26. );
  27. DEFINE_EVENT(kvm_transition, kvm_enter,
  28. TP_PROTO(struct kvm_vcpu *vcpu),
  29. TP_ARGS(vcpu));
  30. DEFINE_EVENT(kvm_transition, kvm_reenter,
  31. TP_PROTO(struct kvm_vcpu *vcpu),
  32. TP_ARGS(vcpu));
  33. DEFINE_EVENT(kvm_transition, kvm_out,
  34. TP_PROTO(struct kvm_vcpu *vcpu),
  35. TP_ARGS(vcpu));
  36. /* Further exit reasons */
  37. #define KVM_TRACE_EXIT_IDLE 64
  38. #define KVM_TRACE_EXIT_CACHE 65
  39. /* Tracepoints for VM exits */
  40. #define kvm_trace_symbol_exit_types \
  41. { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
  42. { KVM_TRACE_EXIT_CACHE, "CACHE" }
  43. DECLARE_EVENT_CLASS(kvm_exit,
  44. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
  45. TP_ARGS(vcpu, reason),
  46. TP_STRUCT__entry(
  47. __field(unsigned int, vcpu_id)
  48. __field(unsigned long, pc)
  49. __field(unsigned int, reason)
  50. ),
  51. TP_fast_assign(
  52. __entry->vcpu_id = vcpu->vcpu_id;
  53. __entry->pc = vcpu->arch.pc;
  54. __entry->reason = reason;
  55. ),
  56. TP_printk("vcpu %u [%s] PC: 0x%08lx",
  57. __entry->vcpu_id,
  58. __print_symbolic(__entry->reason,
  59. kvm_trace_symbol_exit_types),
  60. __entry->pc)
  61. );
  62. DEFINE_EVENT(kvm_exit, kvm_exit_idle,
  63. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
  64. TP_ARGS(vcpu, reason));
  65. DEFINE_EVENT(kvm_exit, kvm_exit_cache,
  66. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
  67. TP_ARGS(vcpu, reason));
  68. DEFINE_EVENT(kvm_exit, kvm_exit,
  69. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
  70. TP_ARGS(vcpu, reason));
  71. TRACE_EVENT(kvm_exit_gspr,
  72. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
  73. TP_ARGS(vcpu, inst_word),
  74. TP_STRUCT__entry(
  75. __field(unsigned int, vcpu_id)
  76. __field(unsigned int, inst_word)
  77. ),
  78. TP_fast_assign(
  79. __entry->vcpu_id = vcpu->vcpu_id;
  80. __entry->inst_word = inst_word;
  81. ),
  82. TP_printk("vcpu %u Inst word: 0x%08x", __entry->vcpu_id,
  83. __entry->inst_word)
  84. );
  85. #define KVM_TRACE_AUX_SAVE 0
  86. #define KVM_TRACE_AUX_RESTORE 1
  87. #define KVM_TRACE_AUX_ENABLE 2
  88. #define KVM_TRACE_AUX_DISABLE 3
  89. #define KVM_TRACE_AUX_DISCARD 4
  90. #define KVM_TRACE_AUX_FPU 1
  91. #define KVM_TRACE_AUX_LSX 2
  92. #define KVM_TRACE_AUX_LASX 3
  93. #define kvm_trace_symbol_aux_op \
  94. { KVM_TRACE_AUX_SAVE, "save" }, \
  95. { KVM_TRACE_AUX_RESTORE, "restore" }, \
  96. { KVM_TRACE_AUX_ENABLE, "enable" }, \
  97. { KVM_TRACE_AUX_DISABLE, "disable" }, \
  98. { KVM_TRACE_AUX_DISCARD, "discard" }
  99. #define kvm_trace_symbol_aux_state \
  100. { KVM_TRACE_AUX_FPU, "FPU" }, \
  101. { KVM_TRACE_AUX_LSX, "LSX" }, \
  102. { KVM_TRACE_AUX_LASX, "LASX" }
  103. TRACE_EVENT(kvm_aux,
  104. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
  105. unsigned int state),
  106. TP_ARGS(vcpu, op, state),
  107. TP_STRUCT__entry(
  108. __field(unsigned long, pc)
  109. __field(u8, op)
  110. __field(u8, state)
  111. ),
  112. TP_fast_assign(
  113. __entry->pc = vcpu->arch.pc;
  114. __entry->op = op;
  115. __entry->state = state;
  116. ),
  117. TP_printk("%s %s PC: 0x%08lx",
  118. __print_symbolic(__entry->op,
  119. kvm_trace_symbol_aux_op),
  120. __print_symbolic(__entry->state,
  121. kvm_trace_symbol_aux_state),
  122. __entry->pc)
  123. );
  124. TRACE_EVENT(kvm_vpid_change,
  125. TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
  126. TP_ARGS(vcpu, vpid),
  127. TP_STRUCT__entry(
  128. __field(unsigned long, vpid)
  129. ),
  130. TP_fast_assign(
  131. __entry->vpid = vpid;
  132. ),
  133. TP_printk("VPID: 0x%08lx", __entry->vpid)
  134. );
  135. #endif /* _TRACE_KVM_H */
  136. #undef TRACE_INCLUDE_PATH
  137. #define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
  138. #undef TRACE_INCLUDE_FILE
  139. #define TRACE_INCLUDE_FILE trace
  140. /* This part must be outside protection */
  141. #include <trace/define_trace.h>