stats.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifdef CONFIG_SCHEDSTATS
  3. /*
  4. * Expects runqueue lock to be held for atomicity of update
  5. */
  6. static inline void
  7. rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  8. {
  9. if (rq) {
  10. rq->rq_sched_info.run_delay += delta;
  11. rq->rq_sched_info.pcount++;
  12. }
  13. }
  14. /*
  15. * Expects runqueue lock to be held for atomicity of update
  16. */
  17. static inline void
  18. rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  19. {
  20. if (rq)
  21. rq->rq_cpu_time += delta;
  22. }
  23. static inline void
  24. rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  25. {
  26. if (rq)
  27. rq->rq_sched_info.run_delay += delta;
  28. }
  29. #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
  30. #define __schedstat_inc(var) do { var++; } while (0)
  31. #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
  32. #define __schedstat_add(var, amt) do { var += (amt); } while (0)
  33. #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
  34. #define __schedstat_set(var, val) do { var = (val); } while (0)
  35. #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
  36. #define schedstat_val(var) (var)
  37. #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
  38. #else /* !CONFIG_SCHEDSTATS: */
  39. static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
  40. static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
  41. static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
  42. # define schedstat_enabled() 0
  43. # define __schedstat_inc(var) do { } while (0)
  44. # define schedstat_inc(var) do { } while (0)
  45. # define __schedstat_add(var, amt) do { } while (0)
  46. # define schedstat_add(var, amt) do { } while (0)
  47. # define __schedstat_set(var, val) do { } while (0)
  48. # define schedstat_set(var, val) do { } while (0)
  49. # define schedstat_val(var) 0
  50. # define schedstat_val_or_zero(var) 0
  51. #endif /* CONFIG_SCHEDSTATS */
  52. #ifdef CONFIG_SCHED_INFO
  53. static inline void sched_info_reset_dequeued(struct task_struct *t)
  54. {
  55. t->sched_info.last_queued = 0;
  56. }
  57. /*
  58. * We are interested in knowing how long it was from the *first* time a
  59. * task was queued to the time that it finally hit a CPU, we call this routine
  60. * from dequeue_task() to account for possible rq->clock skew across CPUs. The
  61. * delta taken on each CPU would annul the skew.
  62. */
  63. static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
  64. {
  65. unsigned long long now = rq_clock(rq), delta = 0;
  66. if (unlikely(sched_info_on()))
  67. if (t->sched_info.last_queued)
  68. delta = now - t->sched_info.last_queued;
  69. sched_info_reset_dequeued(t);
  70. t->sched_info.run_delay += delta;
  71. rq_sched_info_dequeued(rq, delta);
  72. }
  73. /*
  74. * Called when a task finally hits the CPU. We can now calculate how
  75. * long it was waiting to run. We also note when it began so that we
  76. * can keep stats on how long its timeslice is.
  77. */
  78. static void sched_info_arrive(struct rq *rq, struct task_struct *t)
  79. {
  80. unsigned long long now = rq_clock(rq), delta = 0;
  81. if (t->sched_info.last_queued)
  82. delta = now - t->sched_info.last_queued;
  83. sched_info_reset_dequeued(t);
  84. t->sched_info.run_delay += delta;
  85. t->sched_info.last_arrival = now;
  86. t->sched_info.pcount++;
  87. rq_sched_info_arrive(rq, delta);
  88. }
  89. /*
  90. * This function is only called from enqueue_task(), but also only updates
  91. * the timestamp if it is already not set. It's assumed that
  92. * sched_info_dequeued() will clear that stamp when appropriate.
  93. */
  94. static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
  95. {
  96. if (unlikely(sched_info_on())) {
  97. if (!t->sched_info.last_queued)
  98. t->sched_info.last_queued = rq_clock(rq);
  99. }
  100. }
  101. /*
  102. * Called when a process ceases being the active-running process involuntarily
  103. * due, typically, to expiring its time slice (this may also be called when
  104. * switching to the idle task). Now we can calculate how long we ran.
  105. * Also, if the process is still in the TASK_RUNNING state, call
  106. * sched_info_queued() to mark that it has now again started waiting on
  107. * the runqueue.
  108. */
  109. static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
  110. {
  111. unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
  112. rq_sched_info_depart(rq, delta);
  113. if (t->state == TASK_RUNNING)
  114. sched_info_queued(rq, t);
  115. }
  116. /*
  117. * Called when tasks are switched involuntarily due, typically, to expiring
  118. * their time slice. (This may also be called when switching to or from
  119. * the idle task.) We are only called when prev != next.
  120. */
  121. static inline void
  122. __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
  123. {
  124. /*
  125. * prev now departs the CPU. It's not interesting to record
  126. * stats about how efficient we were at scheduling the idle
  127. * process, however.
  128. */
  129. if (prev != rq->idle)
  130. sched_info_depart(rq, prev);
  131. if (next != rq->idle)
  132. sched_info_arrive(rq, next);
  133. }
  134. static inline void
  135. sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
  136. {
  137. if (unlikely(sched_info_on()))
  138. __sched_info_switch(rq, prev, next);
  139. }
  140. #else /* !CONFIG_SCHED_INFO: */
  141. # define sched_info_queued(rq, t) do { } while (0)
  142. # define sched_info_reset_dequeued(t) do { } while (0)
  143. # define sched_info_dequeued(rq, t) do { } while (0)
  144. # define sched_info_depart(rq, t) do { } while (0)
  145. # define sched_info_arrive(rq, next) do { } while (0)
  146. # define sched_info_switch(rq, t, next) do { } while (0)
  147. #endif /* CONFIG_SCHED_INFO */