stop_task.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * stop-task scheduling class.
  4. *
  5. * The stop task is the highest priority task in the system, it preempts
  6. * everything and will be preempted by nothing.
  7. *
  8. * See kernel/stop_machine.c
  9. */
  10. #ifdef CONFIG_SMP
  11. static int
  12. select_task_rq_stop(struct task_struct *p, int cpu, int flags)
  13. {
  14. return task_cpu(p); /* stop tasks as never migrate */
  15. }
  16. static int
  17. balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  18. {
  19. return sched_stop_runnable(rq);
  20. }
  21. #endif /* CONFIG_SMP */
  22. static void
  23. wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags)
  24. {
  25. /* we're never preempted */
  26. }
  27. static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
  28. {
  29. stop->se.exec_start = rq_clock_task(rq);
  30. }
  31. static struct task_struct *pick_task_stop(struct rq *rq)
  32. {
  33. if (!sched_stop_runnable(rq))
  34. return NULL;
  35. return rq->stop;
  36. }
  37. static void
  38. enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  39. {
  40. add_nr_running(rq, 1);
  41. }
  42. static bool
  43. dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  44. {
  45. sub_nr_running(rq, 1);
  46. return true;
  47. }
  48. static void yield_task_stop(struct rq *rq)
  49. {
  50. BUG(); /* the stop task should never yield, its pointless. */
  51. }
  52. static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next)
  53. {
  54. update_curr_common(rq);
  55. }
  56. /*
  57. * scheduler tick hitting a task of our scheduling class.
  58. *
  59. * NOTE: This function can be called remotely by the tick offload that
  60. * goes along full dynticks. Therefore no local assumption can be made
  61. * and everything must be accessed through the @rq and @curr passed in
  62. * parameters.
  63. */
  64. static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
  65. {
  66. }
  67. static void switched_to_stop(struct rq *rq, struct task_struct *p)
  68. {
  69. BUG(); /* its impossible to change to this class */
  70. }
  71. static void
  72. prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
  73. {
  74. BUG(); /* how!?, what priority? */
  75. }
  76. static void update_curr_stop(struct rq *rq)
  77. {
  78. }
  79. /*
  80. * Simple, special scheduling class for the per-CPU stop tasks:
  81. */
  82. DEFINE_SCHED_CLASS(stop) = {
  83. .enqueue_task = enqueue_task_stop,
  84. .dequeue_task = dequeue_task_stop,
  85. .yield_task = yield_task_stop,
  86. .wakeup_preempt = wakeup_preempt_stop,
  87. .pick_task = pick_task_stop,
  88. .put_prev_task = put_prev_task_stop,
  89. .set_next_task = set_next_task_stop,
  90. #ifdef CONFIG_SMP
  91. .balance = balance_stop,
  92. .select_task_rq = select_task_rq_stop,
  93. .set_cpus_allowed = set_cpus_allowed_common,
  94. #endif
  95. .task_tick = task_tick_stop,
  96. .prio_changed = prio_changed_stop,
  97. .switched_to = switched_to_stop,
  98. .update_curr = update_curr_stop,
  99. };