ring_buffer_benchmark.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ring buffer tester and benchmark
  4. *
  5. * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
  6. */
  7. #include <linux/ring_buffer.h>
  8. #include <linux/completion.h>
  9. #include <linux/kthread.h>
  10. #include <uapi/linux/sched/types.h>
  11. #include <linux/module.h>
  12. #include <linux/ktime.h>
  13. #include <asm/local.h>
  14. struct rb_page {
  15. u64 ts;
  16. local_t commit;
  17. char data[4080];
  18. };
  19. /* run time and sleep time in seconds */
  20. #define RUN_TIME 10ULL
  21. #define SLEEP_TIME 10
  22. /* number of events for writer to wake up the reader */
  23. static int wakeup_interval = 100;
  24. static int reader_finish;
  25. static DECLARE_COMPLETION(read_start);
  26. static DECLARE_COMPLETION(read_done);
  27. static struct trace_buffer *buffer;
  28. static struct task_struct *producer;
  29. static struct task_struct *consumer;
  30. static unsigned long read;
  31. static unsigned int disable_reader;
  32. module_param(disable_reader, uint, 0644);
  33. MODULE_PARM_DESC(disable_reader, "only run producer");
  34. static unsigned int write_iteration = 50;
  35. module_param(write_iteration, uint, 0644);
  36. MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
  37. static int producer_nice = MAX_NICE;
  38. static int consumer_nice = MAX_NICE;
  39. static int producer_fifo;
  40. static int consumer_fifo;
  41. module_param(producer_nice, int, 0644);
  42. MODULE_PARM_DESC(producer_nice, "nice prio for producer");
  43. module_param(consumer_nice, int, 0644);
  44. MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
  45. module_param(producer_fifo, int, 0644);
  46. MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
  47. module_param(consumer_fifo, int, 0644);
  48. MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
  49. static int read_events;
  50. static int test_error;
  51. #define TEST_ERROR() \
  52. do { \
  53. if (!test_error) { \
  54. test_error = 1; \
  55. WARN_ON(1); \
  56. } \
  57. } while (0)
  58. enum event_status {
  59. EVENT_FOUND,
  60. EVENT_DROPPED,
  61. };
  62. static bool break_test(void)
  63. {
  64. return test_error || kthread_should_stop();
  65. }
  66. static enum event_status read_event(int cpu)
  67. {
  68. struct ring_buffer_event *event;
  69. int *entry;
  70. u64 ts;
  71. event = ring_buffer_consume(buffer, cpu, &ts, NULL);
  72. if (!event)
  73. return EVENT_DROPPED;
  74. entry = ring_buffer_event_data(event);
  75. if (*entry != cpu) {
  76. TEST_ERROR();
  77. return EVENT_DROPPED;
  78. }
  79. read++;
  80. return EVENT_FOUND;
  81. }
  82. static enum event_status read_page(int cpu)
  83. {
  84. struct buffer_data_read_page *bpage;
  85. struct ring_buffer_event *event;
  86. struct rb_page *rpage;
  87. unsigned long commit;
  88. int page_size;
  89. int *entry;
  90. int ret;
  91. int inc;
  92. int i;
  93. bpage = ring_buffer_alloc_read_page(buffer, cpu);
  94. if (IS_ERR(bpage))
  95. return EVENT_DROPPED;
  96. page_size = ring_buffer_subbuf_size_get(buffer);
  97. ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
  98. if (ret >= 0) {
  99. rpage = ring_buffer_read_page_data(bpage);
  100. /* The commit may have missed event flags set, clear them */
  101. commit = local_read(&rpage->commit) & 0xfffff;
  102. for (i = 0; i < commit && !test_error ; i += inc) {
  103. if (i >= (page_size - offsetof(struct rb_page, data))) {
  104. TEST_ERROR();
  105. break;
  106. }
  107. inc = -1;
  108. event = (void *)&rpage->data[i];
  109. switch (event->type_len) {
  110. case RINGBUF_TYPE_PADDING:
  111. /* failed writes may be discarded events */
  112. if (!event->time_delta)
  113. TEST_ERROR();
  114. inc = event->array[0] + 4;
  115. break;
  116. case RINGBUF_TYPE_TIME_EXTEND:
  117. inc = 8;
  118. break;
  119. case 0:
  120. entry = ring_buffer_event_data(event);
  121. if (*entry != cpu) {
  122. TEST_ERROR();
  123. break;
  124. }
  125. read++;
  126. if (!event->array[0]) {
  127. TEST_ERROR();
  128. break;
  129. }
  130. inc = event->array[0] + 4;
  131. break;
  132. default:
  133. entry = ring_buffer_event_data(event);
  134. if (*entry != cpu) {
  135. TEST_ERROR();
  136. break;
  137. }
  138. read++;
  139. inc = ((event->type_len + 1) * 4);
  140. }
  141. if (test_error)
  142. break;
  143. if (inc <= 0) {
  144. TEST_ERROR();
  145. break;
  146. }
  147. }
  148. }
  149. ring_buffer_free_read_page(buffer, cpu, bpage);
  150. if (ret < 0)
  151. return EVENT_DROPPED;
  152. return EVENT_FOUND;
  153. }
  154. static void ring_buffer_consumer(void)
  155. {
  156. /* toggle between reading pages and events */
  157. read_events ^= 1;
  158. read = 0;
  159. /*
  160. * Continue running until the producer specifically asks to stop
  161. * and is ready for the completion.
  162. */
  163. while (!READ_ONCE(reader_finish)) {
  164. int found = 1;
  165. while (found && !test_error) {
  166. int cpu;
  167. found = 0;
  168. for_each_online_cpu(cpu) {
  169. enum event_status stat;
  170. if (read_events)
  171. stat = read_event(cpu);
  172. else
  173. stat = read_page(cpu);
  174. if (test_error)
  175. break;
  176. if (stat == EVENT_FOUND)
  177. found = 1;
  178. }
  179. }
  180. /* Wait till the producer wakes us up when there is more data
  181. * available or when the producer wants us to finish reading.
  182. */
  183. set_current_state(TASK_INTERRUPTIBLE);
  184. if (reader_finish)
  185. break;
  186. schedule();
  187. }
  188. __set_current_state(TASK_RUNNING);
  189. reader_finish = 0;
  190. complete(&read_done);
  191. }
  192. static void ring_buffer_producer(void)
  193. {
  194. ktime_t start_time, end_time, timeout;
  195. unsigned long long time;
  196. unsigned long long entries;
  197. unsigned long long overruns;
  198. unsigned long missed = 0;
  199. unsigned long hit = 0;
  200. unsigned long avg;
  201. int cnt = 0;
  202. /*
  203. * Hammer the buffer for 10 secs (this may
  204. * make the system stall)
  205. */
  206. trace_printk("Starting ring buffer hammer\n");
  207. start_time = ktime_get();
  208. timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
  209. do {
  210. struct ring_buffer_event *event;
  211. int *entry;
  212. int i;
  213. for (i = 0; i < write_iteration; i++) {
  214. event = ring_buffer_lock_reserve(buffer, 10);
  215. if (!event) {
  216. missed++;
  217. } else {
  218. hit++;
  219. entry = ring_buffer_event_data(event);
  220. *entry = smp_processor_id();
  221. ring_buffer_unlock_commit(buffer);
  222. }
  223. }
  224. end_time = ktime_get();
  225. cnt++;
  226. if (consumer && !(cnt % wakeup_interval))
  227. wake_up_process(consumer);
  228. #ifndef CONFIG_PREEMPTION
  229. /*
  230. * If we are a non preempt kernel, the 10 seconds run will
  231. * stop everything while it runs. Instead, we will call
  232. * cond_resched and also add any time that was lost by a
  233. * reschedule.
  234. *
  235. * Do a cond resched at the same frequency we would wake up
  236. * the reader.
  237. */
  238. if (cnt % wakeup_interval)
  239. cond_resched();
  240. #endif
  241. } while (ktime_before(end_time, timeout) && !break_test());
  242. trace_printk("End ring buffer hammer\n");
  243. if (consumer) {
  244. /* Init both completions here to avoid races */
  245. init_completion(&read_start);
  246. init_completion(&read_done);
  247. /* the completions must be visible before the finish var */
  248. smp_wmb();
  249. reader_finish = 1;
  250. wake_up_process(consumer);
  251. wait_for_completion(&read_done);
  252. }
  253. time = ktime_us_delta(end_time, start_time);
  254. entries = ring_buffer_entries(buffer);
  255. overruns = ring_buffer_overruns(buffer);
  256. if (test_error)
  257. trace_printk("ERROR!\n");
  258. if (!disable_reader) {
  259. if (consumer_fifo)
  260. trace_printk("Running Consumer at SCHED_FIFO %s\n",
  261. consumer_fifo == 1 ? "low" : "high");
  262. else
  263. trace_printk("Running Consumer at nice: %d\n",
  264. consumer_nice);
  265. }
  266. if (producer_fifo)
  267. trace_printk("Running Producer at SCHED_FIFO %s\n",
  268. producer_fifo == 1 ? "low" : "high");
  269. else
  270. trace_printk("Running Producer at nice: %d\n",
  271. producer_nice);
  272. /* Let the user know that the test is running at low priority */
  273. if (!producer_fifo && !consumer_fifo &&
  274. producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
  275. trace_printk("WARNING!!! This test is running at lowest priority.\n");
  276. trace_printk("Time: %lld (usecs)\n", time);
  277. trace_printk("Overruns: %lld\n", overruns);
  278. if (disable_reader)
  279. trace_printk("Read: (reader disabled)\n");
  280. else
  281. trace_printk("Read: %ld (by %s)\n", read,
  282. read_events ? "events" : "pages");
  283. trace_printk("Entries: %lld\n", entries);
  284. trace_printk("Total: %lld\n", entries + overruns + read);
  285. trace_printk("Missed: %ld\n", missed);
  286. trace_printk("Hit: %ld\n", hit);
  287. /* Convert time from usecs to millisecs */
  288. do_div(time, USEC_PER_MSEC);
  289. if (time)
  290. hit /= (long)time;
  291. else
  292. trace_printk("TIME IS ZERO??\n");
  293. trace_printk("Entries per millisec: %ld\n", hit);
  294. if (hit) {
  295. /* Calculate the average time in nanosecs */
  296. avg = NSEC_PER_MSEC / hit;
  297. trace_printk("%ld ns per entry\n", avg);
  298. }
  299. if (missed) {
  300. if (time)
  301. missed /= (long)time;
  302. trace_printk("Total iterations per millisec: %ld\n",
  303. hit + missed);
  304. /* it is possible that hit + missed will overflow and be zero */
  305. if (!(hit + missed)) {
  306. trace_printk("hit + missed overflowed and totalled zero!\n");
  307. hit--; /* make it non zero */
  308. }
  309. /* Calculate the average time in nanosecs */
  310. avg = NSEC_PER_MSEC / (hit + missed);
  311. trace_printk("%ld ns per entry\n", avg);
  312. }
  313. }
  314. static void wait_to_die(void)
  315. {
  316. set_current_state(TASK_INTERRUPTIBLE);
  317. while (!kthread_should_stop()) {
  318. schedule();
  319. set_current_state(TASK_INTERRUPTIBLE);
  320. }
  321. __set_current_state(TASK_RUNNING);
  322. }
  323. static int ring_buffer_consumer_thread(void *arg)
  324. {
  325. while (!break_test()) {
  326. complete(&read_start);
  327. ring_buffer_consumer();
  328. set_current_state(TASK_INTERRUPTIBLE);
  329. if (break_test())
  330. break;
  331. schedule();
  332. }
  333. __set_current_state(TASK_RUNNING);
  334. if (!kthread_should_stop())
  335. wait_to_die();
  336. return 0;
  337. }
  338. static int ring_buffer_producer_thread(void *arg)
  339. {
  340. while (!break_test()) {
  341. ring_buffer_reset(buffer);
  342. if (consumer) {
  343. wake_up_process(consumer);
  344. wait_for_completion(&read_start);
  345. }
  346. ring_buffer_producer();
  347. if (break_test())
  348. goto out_kill;
  349. trace_printk("Sleeping for 10 secs\n");
  350. set_current_state(TASK_INTERRUPTIBLE);
  351. if (break_test())
  352. goto out_kill;
  353. schedule_timeout(HZ * SLEEP_TIME);
  354. }
  355. out_kill:
  356. __set_current_state(TASK_RUNNING);
  357. if (!kthread_should_stop())
  358. wait_to_die();
  359. return 0;
  360. }
  361. static int __init ring_buffer_benchmark_init(void)
  362. {
  363. int ret;
  364. /* make a one meg buffer in overwite mode */
  365. buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
  366. if (!buffer)
  367. return -ENOMEM;
  368. if (!disable_reader) {
  369. consumer = kthread_create(ring_buffer_consumer_thread,
  370. NULL, "rb_consumer");
  371. ret = PTR_ERR(consumer);
  372. if (IS_ERR(consumer))
  373. goto out_fail;
  374. }
  375. producer = kthread_run(ring_buffer_producer_thread,
  376. NULL, "rb_producer");
  377. ret = PTR_ERR(producer);
  378. if (IS_ERR(producer))
  379. goto out_kill;
  380. /*
  381. * Run them as low-prio background tasks by default:
  382. */
  383. if (!disable_reader) {
  384. if (consumer_fifo >= 2)
  385. sched_set_fifo(consumer);
  386. else if (consumer_fifo == 1)
  387. sched_set_fifo_low(consumer);
  388. else
  389. set_user_nice(consumer, consumer_nice);
  390. }
  391. if (producer_fifo >= 2)
  392. sched_set_fifo(producer);
  393. else if (producer_fifo == 1)
  394. sched_set_fifo_low(producer);
  395. else
  396. set_user_nice(producer, producer_nice);
  397. return 0;
  398. out_kill:
  399. if (consumer)
  400. kthread_stop(consumer);
  401. out_fail:
  402. ring_buffer_free(buffer);
  403. return ret;
  404. }
  405. static void __exit ring_buffer_benchmark_exit(void)
  406. {
  407. kthread_stop(producer);
  408. if (consumer)
  409. kthread_stop(consumer);
  410. ring_buffer_free(buffer);
  411. }
  412. module_init(ring_buffer_benchmark_init);
  413. module_exit(ring_buffer_benchmark_exit);
  414. MODULE_AUTHOR("Steven Rostedt");
  415. MODULE_DESCRIPTION("ring_buffer_benchmark");
  416. MODULE_LICENSE("GPL");