scx_qmap.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
  4. * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
  5. * Copyright (c) 2022 David Vernet <dvernet@meta.com>
  6. */
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <unistd.h>
  10. #include <inttypes.h>
  11. #include <signal.h>
  12. #include <libgen.h>
  13. #include <bpf/bpf.h>
  14. #include <scx/common.h>
  15. #include "scx_qmap.bpf.skel.h"
  16. const char help_fmt[] =
  17. "A simple five-level FIFO queue sched_ext scheduler.\n"
  18. "\n"
  19. "See the top-level comment in .bpf.c for more details.\n"
  20. "\n"
  21. "Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n"
  22. " [-P] [-d PID] [-D LEN] [-p] [-v]\n"
  23. "\n"
  24. " -s SLICE_US Override slice duration\n"
  25. " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n"
  26. " -t COUNT Stall every COUNT'th user thread\n"
  27. " -T COUNT Stall every COUNT'th kernel thread\n"
  28. " -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n"
  29. " -b COUNT Dispatch upto COUNT tasks together\n"
  30. " -P Print out DSQ content to trace_pipe every second, use with -b\n"
  31. " -H Boost nice -20 tasks in SHARED_DSQ, use with -b\n"
  32. " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n"
  33. " -D LEN Set scx_exit_info.dump buffer length\n"
  34. " -S Suppress qmap-specific debug dump\n"
  35. " -p Switch only tasks on SCHED_EXT policy instead of all\n"
  36. " -v Print libbpf debug messages\n"
  37. " -h Display this help and exit\n";
  38. static bool verbose;
  39. static volatile int exit_req;
  40. static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
  41. {
  42. if (level == LIBBPF_DEBUG && !verbose)
  43. return 0;
  44. return vfprintf(stderr, format, args);
  45. }
  46. static void sigint_handler(int dummy)
  47. {
  48. exit_req = 1;
  49. }
  50. int main(int argc, char **argv)
  51. {
  52. struct scx_qmap *skel;
  53. struct bpf_link *link;
  54. int opt;
  55. libbpf_set_print(libbpf_print_fn);
  56. signal(SIGINT, sigint_handler);
  57. signal(SIGTERM, sigint_handler);
  58. skel = SCX_OPS_OPEN(qmap_ops, scx_qmap);
  59. while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) {
  60. switch (opt) {
  61. case 's':
  62. skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
  63. break;
  64. case 'e':
  65. skel->bss->test_error_cnt = strtoul(optarg, NULL, 0);
  66. break;
  67. case 't':
  68. skel->rodata->stall_user_nth = strtoul(optarg, NULL, 0);
  69. break;
  70. case 'T':
  71. skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0);
  72. break;
  73. case 'l':
  74. skel->rodata->dsp_inf_loop_after = strtoul(optarg, NULL, 0);
  75. break;
  76. case 'b':
  77. skel->rodata->dsp_batch = strtoul(optarg, NULL, 0);
  78. break;
  79. case 'P':
  80. skel->rodata->print_shared_dsq = true;
  81. break;
  82. case 'H':
  83. skel->rodata->highpri_boosting = true;
  84. break;
  85. case 'd':
  86. skel->rodata->disallow_tgid = strtol(optarg, NULL, 0);
  87. if (skel->rodata->disallow_tgid < 0)
  88. skel->rodata->disallow_tgid = getpid();
  89. break;
  90. case 'D':
  91. skel->struct_ops.qmap_ops->exit_dump_len = strtoul(optarg, NULL, 0);
  92. break;
  93. case 'S':
  94. skel->rodata->suppress_dump = true;
  95. break;
  96. case 'p':
  97. skel->struct_ops.qmap_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
  98. break;
  99. case 'v':
  100. verbose = true;
  101. break;
  102. default:
  103. fprintf(stderr, help_fmt, basename(argv[0]));
  104. return opt != 'h';
  105. }
  106. }
  107. SCX_OPS_LOAD(skel, qmap_ops, scx_qmap, uei);
  108. link = SCX_OPS_ATTACH(skel, qmap_ops, scx_qmap);
  109. while (!exit_req && !UEI_EXITED(skel, uei)) {
  110. long nr_enqueued = skel->bss->nr_enqueued;
  111. long nr_dispatched = skel->bss->nr_dispatched;
  112. printf("stats : enq=%lu dsp=%lu delta=%ld reenq=%"PRIu64" deq=%"PRIu64" core=%"PRIu64" enq_ddsp=%"PRIu64"\n",
  113. nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched,
  114. skel->bss->nr_reenqueued, skel->bss->nr_dequeued,
  115. skel->bss->nr_core_sched_execed,
  116. skel->bss->nr_ddsp_from_enq);
  117. printf(" exp_local=%"PRIu64" exp_remote=%"PRIu64" exp_timer=%"PRIu64" exp_lost=%"PRIu64"\n",
  118. skel->bss->nr_expedited_local,
  119. skel->bss->nr_expedited_remote,
  120. skel->bss->nr_expedited_from_timer,
  121. skel->bss->nr_expedited_lost);
  122. if (__COMPAT_has_ksym("scx_bpf_cpuperf_cur"))
  123. printf("cpuperf: cur min/avg/max=%u/%u/%u target min/avg/max=%u/%u/%u\n",
  124. skel->bss->cpuperf_min,
  125. skel->bss->cpuperf_avg,
  126. skel->bss->cpuperf_max,
  127. skel->bss->cpuperf_target_min,
  128. skel->bss->cpuperf_target_avg,
  129. skel->bss->cpuperf_target_max);
  130. fflush(stdout);
  131. sleep(1);
  132. }
  133. bpf_link__destroy(link);
  134. UEI_REPORT(skel, uei);
  135. scx_qmap__destroy(skel);
  136. /*
  137. * scx_qmap implements ops.cpu_on/offline() and doesn't need to restart
  138. * on CPU hotplug events.
  139. */
  140. return 0;
  141. }