trace.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Intel IOMMU trace support
  4. *
  5. * Copyright (C) 2019 Intel Corporation
  6. *
  7. * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8. */
  9. #undef TRACE_SYSTEM
  10. #define TRACE_SYSTEM intel_iommu
  11. #if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
  12. #define _TRACE_INTEL_IOMMU_H
  13. #include <linux/tracepoint.h>
  14. #include "iommu.h"
  15. #define MSG_MAX 256
  16. TRACE_EVENT(qi_submit,
  17. TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
  18. TP_ARGS(iommu, qw0, qw1, qw2, qw3),
  19. TP_STRUCT__entry(
  20. __field(u64, qw0)
  21. __field(u64, qw1)
  22. __field(u64, qw2)
  23. __field(u64, qw3)
  24. __string(iommu, iommu->name)
  25. ),
  26. TP_fast_assign(
  27. __assign_str(iommu);
  28. __entry->qw0 = qw0;
  29. __entry->qw1 = qw1;
  30. __entry->qw2 = qw2;
  31. __entry->qw3 = qw3;
  32. ),
  33. TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
  34. __print_symbolic(__entry->qw0 & 0xf,
  35. { QI_CC_TYPE, "cc_inv" },
  36. { QI_IOTLB_TYPE, "iotlb_inv" },
  37. { QI_DIOTLB_TYPE, "dev_tlb_inv" },
  38. { QI_IEC_TYPE, "iec_inv" },
  39. { QI_IWD_TYPE, "inv_wait" },
  40. { QI_EIOTLB_TYPE, "p_iotlb_inv" },
  41. { QI_PC_TYPE, "pc_inv" },
  42. { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" },
  43. { QI_PGRP_RESP_TYPE, "page_grp_resp" }),
  44. __get_str(iommu),
  45. __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
  46. )
  47. );
  48. TRACE_EVENT(prq_report,
  49. TP_PROTO(struct intel_iommu *iommu, struct device *dev,
  50. u64 dw0, u64 dw1, u64 dw2, u64 dw3,
  51. unsigned long seq),
  52. TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
  53. TP_STRUCT__entry(
  54. __field(u64, dw0)
  55. __field(u64, dw1)
  56. __field(u64, dw2)
  57. __field(u64, dw3)
  58. __field(unsigned long, seq)
  59. __string(iommu, iommu->name)
  60. __string(dev, dev_name(dev))
  61. __dynamic_array(char, buff, MSG_MAX)
  62. ),
  63. TP_fast_assign(
  64. __entry->dw0 = dw0;
  65. __entry->dw1 = dw1;
  66. __entry->dw2 = dw2;
  67. __entry->dw3 = dw3;
  68. __entry->seq = seq;
  69. __assign_str(iommu);
  70. __assign_str(dev);
  71. ),
  72. TP_printk("%s/%s seq# %ld: %s",
  73. __get_str(iommu), __get_str(dev), __entry->seq,
  74. decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
  75. __entry->dw1, __entry->dw2, __entry->dw3)
  76. )
  77. );
  78. DECLARE_EVENT_CLASS(cache_tag_log,
  79. TP_PROTO(struct cache_tag *tag),
  80. TP_ARGS(tag),
  81. TP_STRUCT__entry(
  82. __string(iommu, tag->iommu->name)
  83. __string(dev, dev_name(tag->dev))
  84. __field(u16, type)
  85. __field(u16, domain_id)
  86. __field(u32, pasid)
  87. __field(u32, users)
  88. ),
  89. TP_fast_assign(
  90. __assign_str(iommu);
  91. __assign_str(dev);
  92. __entry->type = tag->type;
  93. __entry->domain_id = tag->domain_id;
  94. __entry->pasid = tag->pasid;
  95. __entry->users = tag->users;
  96. ),
  97. TP_printk("%s/%s type %s did %d pasid %d ref %d",
  98. __get_str(iommu), __get_str(dev),
  99. __print_symbolic(__entry->type,
  100. { CACHE_TAG_IOTLB, "iotlb" },
  101. { CACHE_TAG_DEVTLB, "devtlb" },
  102. { CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
  103. { CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
  104. __entry->domain_id, __entry->pasid, __entry->users
  105. )
  106. );
  107. DEFINE_EVENT(cache_tag_log, cache_tag_assign,
  108. TP_PROTO(struct cache_tag *tag),
  109. TP_ARGS(tag)
  110. );
  111. DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
  112. TP_PROTO(struct cache_tag *tag),
  113. TP_ARGS(tag)
  114. );
  115. DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
  116. TP_PROTO(struct cache_tag *tag),
  117. TP_ARGS(tag)
  118. );
  119. DECLARE_EVENT_CLASS(cache_tag_flush,
  120. TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
  121. unsigned long addr, unsigned long pages, unsigned long mask),
  122. TP_ARGS(tag, start, end, addr, pages, mask),
  123. TP_STRUCT__entry(
  124. __string(iommu, tag->iommu->name)
  125. __string(dev, dev_name(tag->dev))
  126. __field(u16, type)
  127. __field(u16, domain_id)
  128. __field(u32, pasid)
  129. __field(unsigned long, start)
  130. __field(unsigned long, end)
  131. __field(unsigned long, addr)
  132. __field(unsigned long, pages)
  133. __field(unsigned long, mask)
  134. ),
  135. TP_fast_assign(
  136. __assign_str(iommu);
  137. __assign_str(dev);
  138. __entry->type = tag->type;
  139. __entry->domain_id = tag->domain_id;
  140. __entry->pasid = tag->pasid;
  141. __entry->start = start;
  142. __entry->end = end;
  143. __entry->addr = addr;
  144. __entry->pages = pages;
  145. __entry->mask = mask;
  146. ),
  147. TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
  148. __get_str(iommu), __get_str(dev), __entry->pasid,
  149. __print_symbolic(__entry->type,
  150. { CACHE_TAG_IOTLB, "iotlb" },
  151. { CACHE_TAG_DEVTLB, "devtlb" },
  152. { CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
  153. { CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
  154. __entry->domain_id, __entry->start, __entry->end,
  155. __entry->addr, __entry->pages, __entry->mask
  156. )
  157. );
  158. DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
  159. TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
  160. unsigned long addr, unsigned long pages, unsigned long mask),
  161. TP_ARGS(tag, start, end, addr, pages, mask)
  162. );
  163. DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
  164. TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
  165. unsigned long addr, unsigned long pages, unsigned long mask),
  166. TP_ARGS(tag, start, end, addr, pages, mask)
  167. );
  168. #endif /* _TRACE_INTEL_IOMMU_H */
  169. /* This part must be outside protection */
  170. #undef TRACE_INCLUDE_PATH
  171. #undef TRACE_INCLUDE_FILE
  172. #define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
  173. #define TRACE_INCLUDE_FILE trace
  174. #include <trace/define_trace.h>