wakelock.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * kernel/power/wakelock.c
  4. *
  5. * User space wakeup sources support.
  6. *
  7. * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
  8. *
  9. * This code is based on the analogous interface allowing user space to
  10. * manipulate wakelocks on Android.
  11. */
  12. #include <linux/capability.h>
  13. #include <linux/ctype.h>
  14. #include <linux/device.h>
  15. #include <linux/err.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/list.h>
  18. #include <linux/rbtree.h>
  19. #include <linux/slab.h>
  20. #include <linux/workqueue.h>
  21. #include "power.h"
  22. static DEFINE_MUTEX(wakelocks_lock);
  23. struct wakelock {
  24. char *name;
  25. struct rb_node node;
  26. struct wakeup_source *ws;
  27. #ifdef CONFIG_PM_WAKELOCKS_GC
  28. struct list_head lru;
  29. #endif
  30. };
  31. static struct rb_root wakelocks_tree = RB_ROOT;
  32. ssize_t pm_show_wakelocks(char *buf, bool show_active)
  33. {
  34. struct rb_node *node;
  35. struct wakelock *wl;
  36. int len = 0;
  37. mutex_lock(&wakelocks_lock);
  38. for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
  39. wl = rb_entry(node, struct wakelock, node);
  40. if (wl->ws->active == show_active)
  41. len += sysfs_emit_at(buf, len, "%s ", wl->name);
  42. }
  43. if (len > 0)
  44. --len;
  45. len += sysfs_emit_at(buf, len, "\n");
  46. mutex_unlock(&wakelocks_lock);
  47. return len;
  48. }
  49. #if CONFIG_PM_WAKELOCKS_LIMIT > 0
  50. static unsigned int number_of_wakelocks;
  51. static inline bool wakelocks_limit_exceeded(void)
  52. {
  53. return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
  54. }
  55. static inline void increment_wakelocks_number(void)
  56. {
  57. number_of_wakelocks++;
  58. }
  59. static inline void decrement_wakelocks_number(void)
  60. {
  61. number_of_wakelocks--;
  62. }
  63. #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
  64. static inline bool wakelocks_limit_exceeded(void) { return false; }
  65. static inline void increment_wakelocks_number(void) {}
  66. static inline void decrement_wakelocks_number(void) {}
  67. #endif /* CONFIG_PM_WAKELOCKS_LIMIT */
  68. #ifdef CONFIG_PM_WAKELOCKS_GC
  69. #define WL_GC_COUNT_MAX 100
  70. #define WL_GC_TIME_SEC 300
  71. static void __wakelocks_gc(struct work_struct *work);
  72. static LIST_HEAD(wakelocks_lru_list);
  73. static DECLARE_WORK(wakelock_work, __wakelocks_gc);
  74. static unsigned int wakelocks_gc_count;
  75. static inline void wakelocks_lru_add(struct wakelock *wl)
  76. {
  77. list_add(&wl->lru, &wakelocks_lru_list);
  78. }
  79. static inline void wakelocks_lru_most_recent(struct wakelock *wl)
  80. {
  81. list_move(&wl->lru, &wakelocks_lru_list);
  82. }
  83. static void __wakelocks_gc(struct work_struct *work)
  84. {
  85. struct wakelock *wl, *aux;
  86. ktime_t now;
  87. mutex_lock(&wakelocks_lock);
  88. now = ktime_get();
  89. list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
  90. u64 idle_time_ns;
  91. bool active;
  92. spin_lock_irq(&wl->ws->lock);
  93. idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time));
  94. active = wl->ws->active;
  95. spin_unlock_irq(&wl->ws->lock);
  96. if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
  97. break;
  98. if (!active) {
  99. wakeup_source_unregister(wl->ws);
  100. rb_erase(&wl->node, &wakelocks_tree);
  101. list_del(&wl->lru);
  102. kfree(wl->name);
  103. kfree(wl);
  104. decrement_wakelocks_number();
  105. }
  106. }
  107. wakelocks_gc_count = 0;
  108. mutex_unlock(&wakelocks_lock);
  109. }
  110. static void wakelocks_gc(void)
  111. {
  112. if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
  113. return;
  114. schedule_work(&wakelock_work);
  115. }
  116. #else /* !CONFIG_PM_WAKELOCKS_GC */
  117. static inline void wakelocks_lru_add(struct wakelock *wl) {}
  118. static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
  119. static inline void wakelocks_gc(void) {}
  120. #endif /* !CONFIG_PM_WAKELOCKS_GC */
  121. static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
  122. bool add_if_not_found)
  123. {
  124. struct rb_node **node = &wakelocks_tree.rb_node;
  125. struct rb_node *parent = *node;
  126. struct wakelock *wl;
  127. while (*node) {
  128. int diff;
  129. parent = *node;
  130. wl = rb_entry(*node, struct wakelock, node);
  131. diff = strncmp(name, wl->name, len);
  132. if (diff == 0) {
  133. if (wl->name[len])
  134. diff = -1;
  135. else
  136. return wl;
  137. }
  138. if (diff < 0)
  139. node = &(*node)->rb_left;
  140. else
  141. node = &(*node)->rb_right;
  142. }
  143. if (!add_if_not_found)
  144. return ERR_PTR(-EINVAL);
  145. if (wakelocks_limit_exceeded())
  146. return ERR_PTR(-ENOSPC);
  147. /* Not found, we have to add a new one. */
  148. wl = kzalloc(sizeof(*wl), GFP_KERNEL);
  149. if (!wl)
  150. return ERR_PTR(-ENOMEM);
  151. wl->name = kstrndup(name, len, GFP_KERNEL);
  152. if (!wl->name) {
  153. kfree(wl);
  154. return ERR_PTR(-ENOMEM);
  155. }
  156. wl->ws = wakeup_source_register(NULL, wl->name);
  157. if (!wl->ws) {
  158. kfree(wl->name);
  159. kfree(wl);
  160. return ERR_PTR(-ENOMEM);
  161. }
  162. wl->ws->last_time = ktime_get();
  163. rb_link_node(&wl->node, parent, node);
  164. rb_insert_color(&wl->node, &wakelocks_tree);
  165. wakelocks_lru_add(wl);
  166. increment_wakelocks_number();
  167. return wl;
  168. }
  169. int pm_wake_lock(const char *buf)
  170. {
  171. const char *str = buf;
  172. struct wakelock *wl;
  173. u64 timeout_ns = 0;
  174. size_t len;
  175. int ret = 0;
  176. if (!capable(CAP_BLOCK_SUSPEND))
  177. return -EPERM;
  178. while (*str && !isspace(*str))
  179. str++;
  180. len = str - buf;
  181. if (!len)
  182. return -EINVAL;
  183. if (*str && *str != '\n') {
  184. /* Find out if there's a valid timeout string appended. */
  185. ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
  186. if (ret)
  187. return -EINVAL;
  188. }
  189. mutex_lock(&wakelocks_lock);
  190. wl = wakelock_lookup_add(buf, len, true);
  191. if (IS_ERR(wl)) {
  192. ret = PTR_ERR(wl);
  193. goto out;
  194. }
  195. if (timeout_ns) {
  196. u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
  197. do_div(timeout_ms, NSEC_PER_MSEC);
  198. __pm_wakeup_event(wl->ws, timeout_ms);
  199. } else {
  200. __pm_stay_awake(wl->ws);
  201. }
  202. wakelocks_lru_most_recent(wl);
  203. out:
  204. mutex_unlock(&wakelocks_lock);
  205. return ret;
  206. }
  207. int pm_wake_unlock(const char *buf)
  208. {
  209. struct wakelock *wl;
  210. size_t len;
  211. int ret = 0;
  212. if (!capable(CAP_BLOCK_SUSPEND))
  213. return -EPERM;
  214. len = strlen(buf);
  215. if (!len)
  216. return -EINVAL;
  217. if (buf[len-1] == '\n')
  218. len--;
  219. if (!len)
  220. return -EINVAL;
  221. mutex_lock(&wakelocks_lock);
  222. wl = wakelock_lookup_add(buf, len, false);
  223. if (IS_ERR(wl)) {
  224. ret = PTR_ERR(wl);
  225. goto out;
  226. }
  227. __pm_relax(wl->ws);
  228. wakelocks_lru_most_recent(wl);
  229. wakelocks_gc();
  230. out:
  231. mutex_unlock(&wakelocks_lock);
  232. return ret;
  233. }