ordered-events.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <linux/list.h>
  5. #include <linux/compiler.h>
  6. #include <linux/string.h>
  7. #include "ordered-events.h"
  8. #include "session.h"
  9. #include "asm/bug.h"
  10. #include "debug.h"
  11. #define pr_N(n, fmt, ...) \
  12. eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  13. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  14. static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  15. {
  16. struct ordered_event *last = oe->last;
  17. u64 timestamp = new->timestamp;
  18. struct list_head *p;
  19. ++oe->nr_events;
  20. oe->last = new;
  21. pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  22. if (!last) {
  23. list_add(&new->list, &oe->events);
  24. oe->max_timestamp = timestamp;
  25. return;
  26. }
  27. /*
  28. * last event might point to some random place in the list as it's
  29. * the last queued event. We expect that the new event is close to
  30. * this.
  31. */
  32. if (last->timestamp <= timestamp) {
  33. while (last->timestamp <= timestamp) {
  34. p = last->list.next;
  35. if (p == &oe->events) {
  36. list_add_tail(&new->list, &oe->events);
  37. oe->max_timestamp = timestamp;
  38. return;
  39. }
  40. last = list_entry(p, struct ordered_event, list);
  41. }
  42. list_add_tail(&new->list, &last->list);
  43. } else {
  44. while (last->timestamp > timestamp) {
  45. p = last->list.prev;
  46. if (p == &oe->events) {
  47. list_add(&new->list, &oe->events);
  48. return;
  49. }
  50. last = list_entry(p, struct ordered_event, list);
  51. }
  52. list_add(&new->list, &last->list);
  53. }
  54. }
  55. static union perf_event *__dup_event(struct ordered_events *oe,
  56. union perf_event *event)
  57. {
  58. union perf_event *new_event = NULL;
  59. if (oe->cur_alloc_size < oe->max_alloc_size) {
  60. new_event = memdup(event, event->header.size);
  61. if (new_event)
  62. oe->cur_alloc_size += event->header.size;
  63. }
  64. return new_event;
  65. }
  66. static union perf_event *dup_event(struct ordered_events *oe,
  67. union perf_event *event)
  68. {
  69. return oe->copy_on_queue ? __dup_event(oe, event) : event;
  70. }
  71. static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  72. {
  73. if (event && oe->copy_on_queue) {
  74. oe->cur_alloc_size -= event->header.size;
  75. free(event);
  76. }
  77. }
  78. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
  79. static struct ordered_event *alloc_event(struct ordered_events *oe,
  80. union perf_event *event)
  81. {
  82. struct list_head *cache = &oe->cache;
  83. struct ordered_event *new = NULL;
  84. union perf_event *new_event;
  85. new_event = dup_event(oe, event);
  86. if (!new_event)
  87. return NULL;
  88. if (!list_empty(cache)) {
  89. new = list_entry(cache->next, struct ordered_event, list);
  90. list_del(&new->list);
  91. } else if (oe->buffer) {
  92. new = oe->buffer + oe->buffer_idx;
  93. if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
  94. oe->buffer = NULL;
  95. } else if (oe->cur_alloc_size < oe->max_alloc_size) {
  96. size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
  97. oe->buffer = malloc(size);
  98. if (!oe->buffer) {
  99. free_dup_event(oe, new_event);
  100. return NULL;
  101. }
  102. pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
  103. oe->cur_alloc_size, size, oe->max_alloc_size);
  104. oe->cur_alloc_size += size;
  105. list_add(&oe->buffer->list, &oe->to_free);
  106. /* First entry is abused to maintain the to_free list. */
  107. oe->buffer_idx = 2;
  108. new = oe->buffer + 1;
  109. } else {
  110. pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
  111. }
  112. new->event = new_event;
  113. return new;
  114. }
  115. static struct ordered_event *
  116. ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
  117. union perf_event *event)
  118. {
  119. struct ordered_event *new;
  120. new = alloc_event(oe, event);
  121. if (new) {
  122. new->timestamp = timestamp;
  123. queue_event(oe, new);
  124. }
  125. return new;
  126. }
  127. void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
  128. {
  129. list_move(&event->list, &oe->cache);
  130. oe->nr_events--;
  131. free_dup_event(oe, event->event);
  132. event->event = NULL;
  133. }
  134. int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
  135. u64 timestamp, u64 file_offset)
  136. {
  137. struct ordered_event *oevent;
  138. if (!timestamp || timestamp == ~0ULL)
  139. return -ETIME;
  140. if (timestamp < oe->last_flush) {
  141. pr_oe_time(timestamp, "out of order event\n");
  142. pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
  143. oe->last_flush_type);
  144. oe->nr_unordered_events++;
  145. }
  146. oevent = ordered_events__new_event(oe, timestamp, event);
  147. if (!oevent) {
  148. ordered_events__flush(oe, OE_FLUSH__HALF);
  149. oevent = ordered_events__new_event(oe, timestamp, event);
  150. }
  151. if (!oevent)
  152. return -ENOMEM;
  153. oevent->file_offset = file_offset;
  154. return 0;
  155. }
  156. static int __ordered_events__flush(struct ordered_events *oe)
  157. {
  158. struct list_head *head = &oe->events;
  159. struct ordered_event *tmp, *iter;
  160. u64 limit = oe->next_flush;
  161. u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
  162. bool show_progress = limit == ULLONG_MAX;
  163. struct ui_progress prog;
  164. int ret;
  165. if (!limit)
  166. return 0;
  167. if (show_progress)
  168. ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
  169. list_for_each_entry_safe(iter, tmp, head, list) {
  170. if (session_done())
  171. return 0;
  172. if (iter->timestamp > limit)
  173. break;
  174. ret = oe->deliver(oe, iter);
  175. if (ret)
  176. return ret;
  177. ordered_events__delete(oe, iter);
  178. oe->last_flush = iter->timestamp;
  179. if (show_progress)
  180. ui_progress__update(&prog, 1);
  181. }
  182. if (list_empty(head))
  183. oe->last = NULL;
  184. else if (last_ts <= limit)
  185. oe->last = list_entry(head->prev, struct ordered_event, list);
  186. if (show_progress)
  187. ui_progress__finish();
  188. return 0;
  189. }
  190. int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
  191. {
  192. static const char * const str[] = {
  193. "NONE",
  194. "FINAL",
  195. "ROUND",
  196. "HALF ",
  197. };
  198. int err;
  199. if (oe->nr_events == 0)
  200. return 0;
  201. switch (how) {
  202. case OE_FLUSH__FINAL:
  203. oe->next_flush = ULLONG_MAX;
  204. break;
  205. case OE_FLUSH__HALF:
  206. {
  207. struct ordered_event *first, *last;
  208. struct list_head *head = &oe->events;
  209. first = list_entry(head->next, struct ordered_event, list);
  210. last = oe->last;
  211. /* Warn if we are called before any event got allocated. */
  212. if (WARN_ONCE(!last || list_empty(head), "empty queue"))
  213. return 0;
  214. oe->next_flush = first->timestamp;
  215. oe->next_flush += (last->timestamp - first->timestamp) / 2;
  216. break;
  217. }
  218. case OE_FLUSH__ROUND:
  219. case OE_FLUSH__NONE:
  220. default:
  221. break;
  222. };
  223. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
  224. str[how], oe->nr_events);
  225. pr_oe_time(oe->max_timestamp, "max_timestamp\n");
  226. err = __ordered_events__flush(oe);
  227. if (!err) {
  228. if (how == OE_FLUSH__ROUND)
  229. oe->next_flush = oe->max_timestamp;
  230. oe->last_flush_type = how;
  231. }
  232. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
  233. str[how], oe->nr_events);
  234. pr_oe_time(oe->last_flush, "last_flush\n");
  235. return err;
  236. }
  237. void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
  238. {
  239. INIT_LIST_HEAD(&oe->events);
  240. INIT_LIST_HEAD(&oe->cache);
  241. INIT_LIST_HEAD(&oe->to_free);
  242. oe->max_alloc_size = (u64) -1;
  243. oe->cur_alloc_size = 0;
  244. oe->deliver = deliver;
  245. }
  246. void ordered_events__free(struct ordered_events *oe)
  247. {
  248. while (!list_empty(&oe->to_free)) {
  249. struct ordered_event *event;
  250. event = list_entry(oe->to_free.next, struct ordered_event, list);
  251. list_del(&event->list);
  252. free_dup_event(oe, event->event);
  253. free(event);
  254. }
  255. }
  256. void ordered_events__reinit(struct ordered_events *oe)
  257. {
  258. ordered_events__deliver_t old_deliver = oe->deliver;
  259. ordered_events__free(oe);
  260. memset(oe, '\0', sizeof(*oe));
  261. ordered_events__init(oe, old_deliver);
  262. }