scx_central.bpf.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * A central FIFO sched_ext scheduler which demonstrates the followings:
  4. *
  5. * a. Making all scheduling decisions from one CPU:
  6. *
  7. * The central CPU is the only one making scheduling decisions. All other
  8. * CPUs kick the central CPU when they run out of tasks to run.
  9. *
  10. * There is one global BPF queue and the central CPU schedules all CPUs by
  11. * dispatching from the global queue to each CPU's local dsq from dispatch().
  12. * This isn't the most straightforward. e.g. It'd be easier to bounce
  13. * through per-CPU BPF queues. The current design is chosen to maximally
  14. * utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
  15. *
  16. * b. Tickless operation
  17. *
  18. * All tasks are dispatched with the infinite slice which allows stopping the
  19. * ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
  20. * parameter. The tickless operation can be observed through
  21. * /proc/interrupts.
  22. *
  23. * Periodic switching is enforced by a periodic timer checking all CPUs and
  24. * preempting them as necessary. Unfortunately, BPF timer currently doesn't
  25. * have a way to pin to a specific CPU, so the periodic timer isn't pinned to
  26. * the central CPU.
  27. *
  28. * c. Preemption
  29. *
  30. * Kthreads are unconditionally queued to the head of a matching local dsq
  31. * and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
  32. * prioritized over user threads, which is required for ensuring forward
  33. * progress as e.g. the periodic timer may run on a ksoftirqd and if the
  34. * ksoftirqd gets starved by a user thread, there may not be anything else to
  35. * vacate that user thread.
  36. *
  37. * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
  38. * next tasks.
  39. *
  40. * This scheduler is designed to maximize usage of various SCX mechanisms. A
  41. * more practical implementation would likely put the scheduling loop outside
  42. * the central CPU's dispatch() path and add some form of priority mechanism.
  43. *
  44. * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
  45. * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
  46. * Copyright (c) 2022 David Vernet <dvernet@meta.com>
  47. */
  48. #include <scx/common.bpf.h>
  49. char _license[] SEC("license") = "GPL";
  50. enum {
  51. FALLBACK_DSQ_ID = 0,
  52. MS_TO_NS = 1000LLU * 1000,
  53. TIMER_INTERVAL_NS = 1 * MS_TO_NS,
  54. };
  55. const volatile s32 central_cpu;
  56. const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */
  57. const volatile u64 slice_ns = SCX_SLICE_DFL;
  58. bool timer_pinned = true;
  59. u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
  60. u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
  61. u64 nr_overflows;
  62. UEI_DEFINE(uei);
  63. struct {
  64. __uint(type, BPF_MAP_TYPE_QUEUE);
  65. __uint(max_entries, 4096);
  66. __type(value, s32);
  67. } central_q SEC(".maps");
  68. /* can't use percpu map due to bad lookups */
  69. bool RESIZABLE_ARRAY(data, cpu_gimme_task);
  70. u64 RESIZABLE_ARRAY(data, cpu_started_at);
  71. struct central_timer {
  72. struct bpf_timer timer;
  73. };
  74. struct {
  75. __uint(type, BPF_MAP_TYPE_ARRAY);
  76. __uint(max_entries, 1);
  77. __type(key, u32);
  78. __type(value, struct central_timer);
  79. } central_timer SEC(".maps");
  80. static bool vtime_before(u64 a, u64 b)
  81. {
  82. return (s64)(a - b) < 0;
  83. }
  84. s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
  85. s32 prev_cpu, u64 wake_flags)
  86. {
  87. /*
  88. * Steer wakeups to the central CPU as much as possible to avoid
  89. * disturbing other CPUs. It's safe to blindly return the central cpu as
  90. * select_cpu() is a hint and if @p can't be on it, the kernel will
  91. * automatically pick a fallback CPU.
  92. */
  93. return central_cpu;
  94. }
  95. void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
  96. {
  97. s32 pid = p->pid;
  98. __sync_fetch_and_add(&nr_total, 1);
  99. /*
  100. * Push per-cpu kthreads at the head of local dsq's and preempt the
  101. * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
  102. * behind other threads which is necessary for forward progress
  103. * guarantee as we depend on the BPF timer which may run from ksoftirqd.
  104. */
  105. if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
  106. __sync_fetch_and_add(&nr_locals, 1);
  107. scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
  108. enq_flags | SCX_ENQ_PREEMPT);
  109. return;
  110. }
  111. if (bpf_map_push_elem(&central_q, &pid, 0)) {
  112. __sync_fetch_and_add(&nr_overflows, 1);
  113. scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
  114. return;
  115. }
  116. __sync_fetch_and_add(&nr_queued, 1);
  117. if (!scx_bpf_task_running(p))
  118. scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
  119. }
  120. static bool dispatch_to_cpu(s32 cpu)
  121. {
  122. struct task_struct *p;
  123. s32 pid;
  124. bpf_repeat(BPF_MAX_LOOPS) {
  125. if (bpf_map_pop_elem(&central_q, &pid))
  126. break;
  127. __sync_fetch_and_sub(&nr_queued, 1);
  128. p = bpf_task_from_pid(pid);
  129. if (!p) {
  130. __sync_fetch_and_add(&nr_lost_pids, 1);
  131. continue;
  132. }
  133. /*
  134. * If we can't run the task at the top, do the dumb thing and
  135. * bounce it to the fallback dsq.
  136. */
  137. if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
  138. __sync_fetch_and_add(&nr_mismatches, 1);
  139. scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
  140. bpf_task_release(p);
  141. /*
  142. * We might run out of dispatch buffer slots if we continue dispatching
  143. * to the fallback DSQ, without dispatching to the local DSQ of the
  144. * target CPU. In such a case, break the loop now as will fail the
  145. * next dispatch operation.
  146. */
  147. if (!scx_bpf_dispatch_nr_slots())
  148. break;
  149. continue;
  150. }
  151. /* dispatch to local and mark that @cpu doesn't need more */
  152. scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
  153. if (cpu != central_cpu)
  154. scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
  155. bpf_task_release(p);
  156. return true;
  157. }
  158. return false;
  159. }
  160. void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
  161. {
  162. if (cpu == central_cpu) {
  163. /* dispatch for all other CPUs first */
  164. __sync_fetch_and_add(&nr_dispatches, 1);
  165. bpf_for(cpu, 0, nr_cpu_ids) {
  166. bool *gimme;
  167. if (!scx_bpf_dispatch_nr_slots())
  168. break;
  169. /* central's gimme is never set */
  170. gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
  171. if (!gimme || !*gimme)
  172. continue;
  173. if (dispatch_to_cpu(cpu))
  174. *gimme = false;
  175. }
  176. /*
  177. * Retry if we ran out of dispatch buffer slots as we might have
  178. * skipped some CPUs and also need to dispatch for self. The ext
  179. * core automatically retries if the local dsq is empty but we
  180. * can't rely on that as we're dispatching for other CPUs too.
  181. * Kick self explicitly to retry.
  182. */
  183. if (!scx_bpf_dispatch_nr_slots()) {
  184. __sync_fetch_and_add(&nr_retries, 1);
  185. scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
  186. return;
  187. }
  188. /* look for a task to run on the central CPU */
  189. if (scx_bpf_consume(FALLBACK_DSQ_ID))
  190. return;
  191. dispatch_to_cpu(central_cpu);
  192. } else {
  193. bool *gimme;
  194. if (scx_bpf_consume(FALLBACK_DSQ_ID))
  195. return;
  196. gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
  197. if (gimme)
  198. *gimme = true;
  199. /*
  200. * Force dispatch on the scheduling CPU so that it finds a task
  201. * to run for us.
  202. */
  203. scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
  204. }
  205. }
  206. void BPF_STRUCT_OPS(central_running, struct task_struct *p)
  207. {
  208. s32 cpu = scx_bpf_task_cpu(p);
  209. u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
  210. if (started_at)
  211. *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */
  212. }
  213. void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
  214. {
  215. s32 cpu = scx_bpf_task_cpu(p);
  216. u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
  217. if (started_at)
  218. *started_at = 0;
  219. }
  220. static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
  221. {
  222. u64 now = bpf_ktime_get_ns();
  223. u64 nr_to_kick = nr_queued;
  224. s32 i, curr_cpu;
  225. curr_cpu = bpf_get_smp_processor_id();
  226. if (timer_pinned && (curr_cpu != central_cpu)) {
  227. scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
  228. curr_cpu, central_cpu);
  229. return 0;
  230. }
  231. bpf_for(i, 0, nr_cpu_ids) {
  232. s32 cpu = (nr_timers + i) % nr_cpu_ids;
  233. u64 *started_at;
  234. if (cpu == central_cpu)
  235. continue;
  236. /* kick iff the current one exhausted its slice */
  237. started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
  238. if (started_at && *started_at &&
  239. vtime_before(now, *started_at + slice_ns))
  240. continue;
  241. /* and there's something pending */
  242. if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
  243. scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
  244. ;
  245. else if (nr_to_kick)
  246. nr_to_kick--;
  247. else
  248. continue;
  249. scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
  250. }
  251. bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
  252. __sync_fetch_and_add(&nr_timers, 1);
  253. return 0;
  254. }
  255. int BPF_STRUCT_OPS_SLEEPABLE(central_init)
  256. {
  257. u32 key = 0;
  258. struct bpf_timer *timer;
  259. int ret;
  260. ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
  261. if (ret)
  262. return ret;
  263. timer = bpf_map_lookup_elem(&central_timer, &key);
  264. if (!timer)
  265. return -ESRCH;
  266. if (bpf_get_smp_processor_id() != central_cpu) {
  267. scx_bpf_error("init from non-central CPU");
  268. return -EINVAL;
  269. }
  270. bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
  271. bpf_timer_set_callback(timer, central_timerfn);
  272. ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
  273. /*
  274. * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
  275. * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
  276. * Retry without the PIN. This would be the perfect use case for
  277. * bpf_core_enum_value_exists() but the enum type doesn't have a name
  278. * and can't be used with bpf_core_enum_value_exists(). Oh well...
  279. */
  280. if (ret == -EINVAL) {
  281. timer_pinned = false;
  282. ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
  283. }
  284. if (ret)
  285. scx_bpf_error("bpf_timer_start failed (%d)", ret);
  286. return ret;
  287. }
  288. void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
  289. {
  290. UEI_RECORD(uei, ei);
  291. }
  292. SCX_OPS_DEFINE(central_ops,
  293. /*
  294. * We are offloading all scheduling decisions to the central CPU
  295. * and thus being the last task on a given CPU doesn't mean
  296. * anything special. Enqueue the last tasks like any other tasks.
  297. */
  298. .flags = SCX_OPS_ENQ_LAST,
  299. .select_cpu = (void *)central_select_cpu,
  300. .enqueue = (void *)central_enqueue,
  301. .dispatch = (void *)central_dispatch,
  302. .running = (void *)central_running,
  303. .stopping = (void *)central_stopping,
  304. .init = (void *)central_init,
  305. .exit = (void *)central_exit,
  306. .name = "central");