cpuacct.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * CPU accounting code for task groups.
  4. *
  5. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  6. * (balbir@in.ibm.com).
  7. */
  8. #include "sched.h"
  9. /* Time spent by the tasks of the CPU accounting group executing in ... */
  10. enum cpuacct_stat_index {
  11. CPUACCT_STAT_USER, /* ... user mode */
  12. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  13. CPUACCT_STAT_NSTATS,
  14. };
  15. static const char * const cpuacct_stat_desc[] = {
  16. [CPUACCT_STAT_USER] = "user",
  17. [CPUACCT_STAT_SYSTEM] = "system",
  18. };
  19. struct cpuacct_usage {
  20. u64 usages[CPUACCT_STAT_NSTATS];
  21. };
  22. /* track CPU usage of a group of tasks and its child groups */
  23. struct cpuacct {
  24. struct cgroup_subsys_state css;
  25. /* cpuusage holds pointer to a u64-type object on every CPU */
  26. struct cpuacct_usage __percpu *cpuusage;
  27. struct kernel_cpustat __percpu *cpustat;
  28. };
  29. static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
  30. {
  31. return css ? container_of(css, struct cpuacct, css) : NULL;
  32. }
  33. /* Return CPU accounting group to which this task belongs */
  34. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  35. {
  36. return css_ca(task_css(tsk, cpuacct_cgrp_id));
  37. }
  38. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  39. {
  40. return css_ca(ca->css.parent);
  41. }
  42. static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
  43. static struct cpuacct root_cpuacct = {
  44. .cpustat = &kernel_cpustat,
  45. .cpuusage = &root_cpuacct_cpuusage,
  46. };
  47. /* Create a new CPU accounting group */
  48. static struct cgroup_subsys_state *
  49. cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
  50. {
  51. struct cpuacct *ca;
  52. if (!parent_css)
  53. return &root_cpuacct.css;
  54. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  55. if (!ca)
  56. goto out;
  57. ca->cpuusage = alloc_percpu(struct cpuacct_usage);
  58. if (!ca->cpuusage)
  59. goto out_free_ca;
  60. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  61. if (!ca->cpustat)
  62. goto out_free_cpuusage;
  63. return &ca->css;
  64. out_free_cpuusage:
  65. free_percpu(ca->cpuusage);
  66. out_free_ca:
  67. kfree(ca);
  68. out:
  69. return ERR_PTR(-ENOMEM);
  70. }
  71. /* Destroy an existing CPU accounting group */
  72. static void cpuacct_css_free(struct cgroup_subsys_state *css)
  73. {
  74. struct cpuacct *ca = css_ca(css);
  75. free_percpu(ca->cpustat);
  76. free_percpu(ca->cpuusage);
  77. kfree(ca);
  78. }
  79. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
  80. enum cpuacct_stat_index index)
  81. {
  82. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  83. u64 data;
  84. /*
  85. * We allow index == CPUACCT_STAT_NSTATS here to read
  86. * the sum of suages.
  87. */
  88. BUG_ON(index > CPUACCT_STAT_NSTATS);
  89. #ifndef CONFIG_64BIT
  90. /*
  91. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  92. */
  93. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  94. #endif
  95. if (index == CPUACCT_STAT_NSTATS) {
  96. int i = 0;
  97. data = 0;
  98. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  99. data += cpuusage->usages[i];
  100. } else {
  101. data = cpuusage->usages[index];
  102. }
  103. #ifndef CONFIG_64BIT
  104. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  105. #endif
  106. return data;
  107. }
  108. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  109. {
  110. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  111. int i;
  112. #ifndef CONFIG_64BIT
  113. /*
  114. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  115. */
  116. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  117. #endif
  118. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  119. cpuusage->usages[i] = val;
  120. #ifndef CONFIG_64BIT
  121. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  122. #endif
  123. }
  124. /* Return total CPU usage (in nanoseconds) of a group */
  125. static u64 __cpuusage_read(struct cgroup_subsys_state *css,
  126. enum cpuacct_stat_index index)
  127. {
  128. struct cpuacct *ca = css_ca(css);
  129. u64 totalcpuusage = 0;
  130. int i;
  131. for_each_possible_cpu(i)
  132. totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
  133. return totalcpuusage;
  134. }
  135. static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
  136. struct cftype *cft)
  137. {
  138. return __cpuusage_read(css, CPUACCT_STAT_USER);
  139. }
  140. static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
  141. struct cftype *cft)
  142. {
  143. return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
  144. }
  145. static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
  146. {
  147. return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
  148. }
  149. static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
  150. u64 val)
  151. {
  152. struct cpuacct *ca = css_ca(css);
  153. int cpu;
  154. /*
  155. * Only allow '0' here to do a reset.
  156. */
  157. if (val)
  158. return -EINVAL;
  159. for_each_possible_cpu(cpu)
  160. cpuacct_cpuusage_write(ca, cpu, 0);
  161. return 0;
  162. }
  163. static int __cpuacct_percpu_seq_show(struct seq_file *m,
  164. enum cpuacct_stat_index index)
  165. {
  166. struct cpuacct *ca = css_ca(seq_css(m));
  167. u64 percpu;
  168. int i;
  169. for_each_possible_cpu(i) {
  170. percpu = cpuacct_cpuusage_read(ca, i, index);
  171. seq_printf(m, "%llu ", (unsigned long long) percpu);
  172. }
  173. seq_printf(m, "\n");
  174. return 0;
  175. }
  176. static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
  177. {
  178. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
  179. }
  180. static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
  181. {
  182. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
  183. }
  184. static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
  185. {
  186. return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
  187. }
  188. static int cpuacct_all_seq_show(struct seq_file *m, void *V)
  189. {
  190. struct cpuacct *ca = css_ca(seq_css(m));
  191. int index;
  192. int cpu;
  193. seq_puts(m, "cpu");
  194. for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
  195. seq_printf(m, " %s", cpuacct_stat_desc[index]);
  196. seq_puts(m, "\n");
  197. for_each_possible_cpu(cpu) {
  198. struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  199. seq_printf(m, "%d", cpu);
  200. for (index = 0; index < CPUACCT_STAT_NSTATS; index++) {
  201. #ifndef CONFIG_64BIT
  202. /*
  203. * Take rq->lock to make 64-bit read safe on 32-bit
  204. * platforms.
  205. */
  206. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  207. #endif
  208. seq_printf(m, " %llu", cpuusage->usages[index]);
  209. #ifndef CONFIG_64BIT
  210. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  211. #endif
  212. }
  213. seq_puts(m, "\n");
  214. }
  215. return 0;
  216. }
  217. static int cpuacct_stats_show(struct seq_file *sf, void *v)
  218. {
  219. struct cpuacct *ca = css_ca(seq_css(sf));
  220. s64 val[CPUACCT_STAT_NSTATS];
  221. int cpu;
  222. int stat;
  223. memset(val, 0, sizeof(val));
  224. for_each_possible_cpu(cpu) {
  225. u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
  226. val[CPUACCT_STAT_USER] += cpustat[CPUTIME_USER];
  227. val[CPUACCT_STAT_USER] += cpustat[CPUTIME_NICE];
  228. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SYSTEM];
  229. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_IRQ];
  230. val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SOFTIRQ];
  231. }
  232. for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
  233. seq_printf(sf, "%s %lld\n",
  234. cpuacct_stat_desc[stat],
  235. (long long)nsec_to_clock_t(val[stat]));
  236. }
  237. return 0;
  238. }
  239. static struct cftype files[] = {
  240. {
  241. .name = "usage",
  242. .read_u64 = cpuusage_read,
  243. .write_u64 = cpuusage_write,
  244. },
  245. {
  246. .name = "usage_user",
  247. .read_u64 = cpuusage_user_read,
  248. },
  249. {
  250. .name = "usage_sys",
  251. .read_u64 = cpuusage_sys_read,
  252. },
  253. {
  254. .name = "usage_percpu",
  255. .seq_show = cpuacct_percpu_seq_show,
  256. },
  257. {
  258. .name = "usage_percpu_user",
  259. .seq_show = cpuacct_percpu_user_seq_show,
  260. },
  261. {
  262. .name = "usage_percpu_sys",
  263. .seq_show = cpuacct_percpu_sys_seq_show,
  264. },
  265. {
  266. .name = "usage_all",
  267. .seq_show = cpuacct_all_seq_show,
  268. },
  269. {
  270. .name = "stat",
  271. .seq_show = cpuacct_stats_show,
  272. },
  273. { } /* terminate */
  274. };
  275. /*
  276. * charge this task's execution time to its accounting group.
  277. *
  278. * called with rq->lock held.
  279. */
  280. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  281. {
  282. struct cpuacct *ca;
  283. int index = CPUACCT_STAT_SYSTEM;
  284. struct pt_regs *regs = task_pt_regs(tsk);
  285. if (regs && user_mode(regs))
  286. index = CPUACCT_STAT_USER;
  287. rcu_read_lock();
  288. for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
  289. this_cpu_ptr(ca->cpuusage)->usages[index] += cputime;
  290. rcu_read_unlock();
  291. }
  292. /*
  293. * Add user/system time to cpuacct.
  294. *
  295. * Note: it's the caller that updates the account of the root cgroup.
  296. */
  297. void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
  298. {
  299. struct cpuacct *ca;
  300. rcu_read_lock();
  301. for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
  302. this_cpu_ptr(ca->cpustat)->cpustat[index] += val;
  303. rcu_read_unlock();
  304. }
  305. struct cgroup_subsys cpuacct_cgrp_subsys = {
  306. .css_alloc = cpuacct_css_alloc,
  307. .css_free = cpuacct_css_free,
  308. .legacy_cftypes = files,
  309. .early_init = true,
  310. };