cpufreq_stats.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * drivers/cpufreq/cpufreq_stats.c
  4. *
  5. * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/cpufreq.h>
  10. #include <linux/module.h>
  11. #include <linux/sched/clock.h>
  12. #include <linux/slab.h>
  13. struct cpufreq_stats {
  14. unsigned int total_trans;
  15. unsigned long long last_time;
  16. unsigned int max_state;
  17. unsigned int state_num;
  18. unsigned int last_index;
  19. u64 *time_in_state;
  20. unsigned int *freq_table;
  21. unsigned int *trans_table;
  22. /* Deferred reset */
  23. unsigned int reset_pending;
  24. unsigned long long reset_time;
  25. };
  26. static void cpufreq_stats_update(struct cpufreq_stats *stats,
  27. unsigned long long time)
  28. {
  29. unsigned long long cur_time = local_clock();
  30. stats->time_in_state[stats->last_index] += cur_time - time;
  31. stats->last_time = cur_time;
  32. }
  33. static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
  34. {
  35. unsigned int count = stats->max_state;
  36. memset(stats->time_in_state, 0, count * sizeof(u64));
  37. memset(stats->trans_table, 0, count * count * sizeof(int));
  38. stats->last_time = local_clock();
  39. stats->total_trans = 0;
  40. /* Adjust for the time elapsed since reset was requested */
  41. WRITE_ONCE(stats->reset_pending, 0);
  42. /*
  43. * Prevent the reset_time read from being reordered before the
  44. * reset_pending accesses in cpufreq_stats_record_transition().
  45. */
  46. smp_rmb();
  47. cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
  48. }
  49. static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  50. {
  51. struct cpufreq_stats *stats = policy->stats;
  52. if (READ_ONCE(stats->reset_pending))
  53. return sprintf(buf, "%d\n", 0);
  54. else
  55. return sprintf(buf, "%u\n", stats->total_trans);
  56. }
  57. cpufreq_freq_attr_ro(total_trans);
  58. static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  59. {
  60. struct cpufreq_stats *stats = policy->stats;
  61. bool pending = READ_ONCE(stats->reset_pending);
  62. unsigned long long time;
  63. ssize_t len = 0;
  64. int i;
  65. for (i = 0; i < stats->state_num; i++) {
  66. if (pending) {
  67. if (i == stats->last_index) {
  68. /*
  69. * Prevent the reset_time read from occurring
  70. * before the reset_pending read above.
  71. */
  72. smp_rmb();
  73. time = local_clock() - READ_ONCE(stats->reset_time);
  74. } else {
  75. time = 0;
  76. }
  77. } else {
  78. time = stats->time_in_state[i];
  79. if (i == stats->last_index)
  80. time += local_clock() - stats->last_time;
  81. }
  82. len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
  83. nsec_to_clock_t(time));
  84. }
  85. return len;
  86. }
  87. cpufreq_freq_attr_ro(time_in_state);
  88. /* We don't care what is written to the attribute */
  89. static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
  90. size_t count)
  91. {
  92. struct cpufreq_stats *stats = policy->stats;
  93. /*
  94. * Defer resetting of stats to cpufreq_stats_record_transition() to
  95. * avoid races.
  96. */
  97. WRITE_ONCE(stats->reset_time, local_clock());
  98. /*
  99. * The memory barrier below is to prevent the readers of reset_time from
  100. * seeing a stale or partially updated value.
  101. */
  102. smp_wmb();
  103. WRITE_ONCE(stats->reset_pending, 1);
  104. return count;
  105. }
  106. cpufreq_freq_attr_wo(reset);
  107. static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  108. {
  109. struct cpufreq_stats *stats = policy->stats;
  110. bool pending = READ_ONCE(stats->reset_pending);
  111. ssize_t len = 0;
  112. int i, j, count;
  113. len += sysfs_emit_at(buf, len, " From : To\n");
  114. len += sysfs_emit_at(buf, len, " : ");
  115. for (i = 0; i < stats->state_num; i++) {
  116. if (len >= PAGE_SIZE - 1)
  117. break;
  118. len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
  119. }
  120. if (len >= PAGE_SIZE - 1)
  121. return PAGE_SIZE - 1;
  122. len += sysfs_emit_at(buf, len, "\n");
  123. for (i = 0; i < stats->state_num; i++) {
  124. if (len >= PAGE_SIZE - 1)
  125. break;
  126. len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
  127. for (j = 0; j < stats->state_num; j++) {
  128. if (len >= PAGE_SIZE - 1)
  129. break;
  130. if (pending)
  131. count = 0;
  132. else
  133. count = stats->trans_table[i * stats->max_state + j];
  134. len += sysfs_emit_at(buf, len, "%9u ", count);
  135. }
  136. if (len >= PAGE_SIZE - 1)
  137. break;
  138. len += sysfs_emit_at(buf, len, "\n");
  139. }
  140. if (len >= PAGE_SIZE - 1) {
  141. pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
  142. return -EFBIG;
  143. }
  144. return len;
  145. }
  146. cpufreq_freq_attr_ro(trans_table);
  147. static struct attribute *default_attrs[] = {
  148. &total_trans.attr,
  149. &time_in_state.attr,
  150. &reset.attr,
  151. &trans_table.attr,
  152. NULL
  153. };
  154. static const struct attribute_group stats_attr_group = {
  155. .attrs = default_attrs,
  156. .name = "stats"
  157. };
  158. static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
  159. {
  160. int index;
  161. for (index = 0; index < stats->max_state; index++)
  162. if (stats->freq_table[index] == freq)
  163. return index;
  164. return -1;
  165. }
  166. void cpufreq_stats_free_table(struct cpufreq_policy *policy)
  167. {
  168. struct cpufreq_stats *stats = policy->stats;
  169. /* Already freed */
  170. if (!stats)
  171. return;
  172. pr_debug("%s: Free stats table\n", __func__);
  173. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  174. kfree(stats->time_in_state);
  175. kfree(stats);
  176. policy->stats = NULL;
  177. }
  178. void cpufreq_stats_create_table(struct cpufreq_policy *policy)
  179. {
  180. unsigned int i = 0, count;
  181. struct cpufreq_stats *stats;
  182. unsigned int alloc_size;
  183. struct cpufreq_frequency_table *pos;
  184. count = cpufreq_table_count_valid_entries(policy);
  185. if (!count)
  186. return;
  187. /* stats already initialized */
  188. if (policy->stats)
  189. return;
  190. stats = kzalloc(sizeof(*stats), GFP_KERNEL);
  191. if (!stats)
  192. return;
  193. alloc_size = count * sizeof(int) + count * sizeof(u64);
  194. alloc_size += count * count * sizeof(int);
  195. /* Allocate memory for time_in_state/freq_table/trans_table in one go */
  196. stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  197. if (!stats->time_in_state)
  198. goto free_stat;
  199. stats->freq_table = (unsigned int *)(stats->time_in_state + count);
  200. stats->trans_table = stats->freq_table + count;
  201. stats->max_state = count;
  202. /* Find valid-unique entries */
  203. cpufreq_for_each_valid_entry(pos, policy->freq_table)
  204. if (policy->freq_table_sorted != CPUFREQ_TABLE_UNSORTED ||
  205. freq_table_get_index(stats, pos->frequency) == -1)
  206. stats->freq_table[i++] = pos->frequency;
  207. stats->state_num = i;
  208. stats->last_time = local_clock();
  209. stats->last_index = freq_table_get_index(stats, policy->cur);
  210. policy->stats = stats;
  211. if (!sysfs_create_group(&policy->kobj, &stats_attr_group))
  212. return;
  213. /* We failed, release resources */
  214. policy->stats = NULL;
  215. kfree(stats->time_in_state);
  216. free_stat:
  217. kfree(stats);
  218. }
  219. void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  220. unsigned int new_freq)
  221. {
  222. struct cpufreq_stats *stats = policy->stats;
  223. int old_index, new_index;
  224. if (unlikely(!stats))
  225. return;
  226. if (unlikely(READ_ONCE(stats->reset_pending)))
  227. cpufreq_stats_reset_table(stats);
  228. old_index = stats->last_index;
  229. new_index = freq_table_get_index(stats, new_freq);
  230. /* We can't do stats->time_in_state[-1]= .. */
  231. if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index))
  232. return;
  233. cpufreq_stats_update(stats, stats->last_time);
  234. stats->last_index = new_index;
  235. stats->trans_table[old_index * stats->max_state + new_index]++;
  236. stats->total_trans++;
  237. }