event_analyzing_sample.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # event_analyzing_sample.py: general event handler in python
  2. # SPDX-License-Identifier: GPL-2.0
  3. #
  4. # Current perf report is already very powerful with the annotation integrated,
  5. # and this script is not trying to be as powerful as perf report, but
  6. # providing end user/developer a flexible way to analyze the events other
  7. # than trace points.
  8. #
  9. # The 2 database related functions in this script just show how to gather
  10. # the basic information, and users can modify and write their own functions
  11. # according to their specific requirement.
  12. #
  13. # The first function "show_general_events" just does a basic grouping for all
  14. # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
  15. # for a x86 HW PMU event: PEBS with load latency data.
  16. #
  17. import os
  18. import sys
  19. import math
  20. import struct
  21. import sqlite3
  22. sys.path.append(os.environ['PERF_EXEC_PATH'] + \
  23. '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
  24. from perf_trace_context import *
  25. from EventClass import *
  26. #
  27. # If the perf.data has a big number of samples, then the insert operation
  28. # will be very time consuming (about 10+ minutes for 10000 samples) if the
  29. # .db database is on disk. Move the .db file to RAM based FS to speedup
  30. # the handling, which will cut the time down to several seconds.
  31. #
  32. con = sqlite3.connect("/dev/shm/perf.db")
  33. con.isolation_level = None
  34. def trace_begin():
  35. print "In trace_begin:\n"
  36. #
  37. # Will create several tables at the start, pebs_ll is for PEBS data with
  38. # load latency info, while gen_events is for general event.
  39. #
  40. con.execute("""
  41. create table if not exists gen_events (
  42. name text,
  43. symbol text,
  44. comm text,
  45. dso text
  46. );""")
  47. con.execute("""
  48. create table if not exists pebs_ll (
  49. name text,
  50. symbol text,
  51. comm text,
  52. dso text,
  53. flags integer,
  54. ip integer,
  55. status integer,
  56. dse integer,
  57. dla integer,
  58. lat integer
  59. );""")
  60. #
  61. # Create and insert event object to a database so that user could
  62. # do more analysis with simple database commands.
  63. #
  64. def process_event(param_dict):
  65. event_attr = param_dict["attr"]
  66. sample = param_dict["sample"]
  67. raw_buf = param_dict["raw_buf"]
  68. comm = param_dict["comm"]
  69. name = param_dict["ev_name"]
  70. # Symbol and dso info are not always resolved
  71. if (param_dict.has_key("dso")):
  72. dso = param_dict["dso"]
  73. else:
  74. dso = "Unknown_dso"
  75. if (param_dict.has_key("symbol")):
  76. symbol = param_dict["symbol"]
  77. else:
  78. symbol = "Unknown_symbol"
  79. # Create the event object and insert it to the right table in database
  80. event = create_event(name, comm, dso, symbol, raw_buf)
  81. insert_db(event)
  82. def insert_db(event):
  83. if event.ev_type == EVTYPE_GENERIC:
  84. con.execute("insert into gen_events values(?, ?, ?, ?)",
  85. (event.name, event.symbol, event.comm, event.dso))
  86. elif event.ev_type == EVTYPE_PEBS_LL:
  87. event.ip &= 0x7fffffffffffffff
  88. event.dla &= 0x7fffffffffffffff
  89. con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
  90. (event.name, event.symbol, event.comm, event.dso, event.flags,
  91. event.ip, event.status, event.dse, event.dla, event.lat))
  92. def trace_end():
  93. print "In trace_end:\n"
  94. # We show the basic info for the 2 type of event classes
  95. show_general_events()
  96. show_pebs_ll()
  97. con.close()
  98. #
  99. # As the event number may be very big, so we can't use linear way
  100. # to show the histogram in real number, but use a log2 algorithm.
  101. #
  102. def num2sym(num):
  103. # Each number will have at least one '#'
  104. snum = '#' * (int)(math.log(num, 2) + 1)
  105. return snum
  106. def show_general_events():
  107. # Check the total record number in the table
  108. count = con.execute("select count(*) from gen_events")
  109. for t in count:
  110. print "There is %d records in gen_events table" % t[0]
  111. if t[0] == 0:
  112. return
  113. print "Statistics about the general events grouped by thread/symbol/dso: \n"
  114. # Group by thread
  115. commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
  116. print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
  117. for row in commq:
  118. print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
  119. # Group by symbol
  120. print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
  121. symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
  122. for row in symbolq:
  123. print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
  124. # Group by dso
  125. print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
  126. dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
  127. for row in dsoq:
  128. print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
  129. #
  130. # This function just shows the basic info, and we could do more with the
  131. # data in the tables, like checking the function parameters when some
  132. # big latency events happen.
  133. #
  134. def show_pebs_ll():
  135. count = con.execute("select count(*) from pebs_ll")
  136. for t in count:
  137. print "There is %d records in pebs_ll table" % t[0]
  138. if t[0] == 0:
  139. return
  140. print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
  141. # Group by thread
  142. commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
  143. print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
  144. for row in commq:
  145. print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
  146. # Group by symbol
  147. print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
  148. symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
  149. for row in symbolq:
  150. print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
  151. # Group by dse
  152. dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
  153. print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
  154. for row in dseq:
  155. print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
  156. # Group by latency
  157. latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
  158. print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
  159. for row in latq:
  160. print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
  161. def trace_unhandled(event_name, context, event_fields_dict):
  162. print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])