drm_managed.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Intel
  4. *
  5. * Based on drivers/base/devres.c
  6. */
  7. #include <drm/drm_managed.h>
  8. #include <linux/list.h>
  9. #include <linux/mutex.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <drm/drm_device.h>
  13. #include <drm/drm_print.h>
  14. #include "drm_internal.h"
  15. /**
  16. * DOC: managed resources
  17. *
  18. * Inspired by struct &device managed resources, but tied to the lifetime of
  19. * struct &drm_device, which can outlive the underlying physical device, usually
  20. * when userspace has some open files and other handles to resources still open.
  21. *
  22. * Release actions can be added with drmm_add_action(), memory allocations can
  23. * be done directly with drmm_kmalloc() and the related functions. Everything
  24. * will be released on the final drm_dev_put() in reverse order of how the
  25. * release actions have been added and memory has been allocated since driver
  26. * loading started with devm_drm_dev_alloc().
  27. *
  28. * Note that release actions and managed memory can also be added and removed
  29. * during the lifetime of the driver, all the functions are fully concurrent
  30. * safe. But it is recommended to use managed resources only for resources that
  31. * change rarely, if ever, during the lifetime of the &drm_device instance.
  32. */
  33. struct drmres_node {
  34. struct list_head entry;
  35. drmres_release_t release;
  36. const char *name;
  37. size_t size;
  38. };
  39. struct drmres {
  40. struct drmres_node node;
  41. /*
  42. * Some archs want to perform DMA into kmalloc caches
  43. * and need a guaranteed alignment larger than
  44. * the alignment of a 64-bit integer.
  45. * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
  46. * alignment for struct drmres when allocated by kmalloc().
  47. */
  48. u8 __aligned(ARCH_DMA_MINALIGN) data[];
  49. };
  50. static void free_dr(struct drmres *dr)
  51. {
  52. kfree_const(dr->node.name);
  53. kfree(dr);
  54. }
  55. void drm_managed_release(struct drm_device *dev)
  56. {
  57. struct drmres *dr, *tmp;
  58. drm_dbg_drmres(dev, "drmres release begin\n");
  59. list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
  60. drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
  61. dr, dr->node.name, dr->node.size);
  62. if (dr->node.release)
  63. dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
  64. list_del(&dr->node.entry);
  65. free_dr(dr);
  66. }
  67. drm_dbg_drmres(dev, "drmres release end\n");
  68. }
  69. /*
  70. * Always inline so that kmalloc_track_caller tracks the actual interesting
  71. * caller outside of drm_managed.c.
  72. */
  73. static __always_inline struct drmres * alloc_dr(drmres_release_t release,
  74. size_t size, gfp_t gfp, int nid)
  75. {
  76. size_t tot_size;
  77. struct drmres *dr;
  78. /* We must catch any near-SIZE_MAX cases that could overflow. */
  79. if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
  80. return NULL;
  81. dr = kmalloc_node_track_caller(tot_size, gfp, nid);
  82. if (unlikely(!dr))
  83. return NULL;
  84. memset(dr, 0, offsetof(struct drmres, data));
  85. INIT_LIST_HEAD(&dr->node.entry);
  86. dr->node.release = release;
  87. dr->node.size = size;
  88. return dr;
  89. }
  90. static void del_dr(struct drm_device *dev, struct drmres *dr)
  91. {
  92. list_del_init(&dr->node.entry);
  93. drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
  94. dr, dr->node.name, (unsigned long) dr->node.size);
  95. }
  96. static void add_dr(struct drm_device *dev, struct drmres *dr)
  97. {
  98. unsigned long flags;
  99. spin_lock_irqsave(&dev->managed.lock, flags);
  100. list_add(&dr->node.entry, &dev->managed.resources);
  101. spin_unlock_irqrestore(&dev->managed.lock, flags);
  102. drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
  103. dr, dr->node.name, (unsigned long) dr->node.size);
  104. }
  105. void drmm_add_final_kfree(struct drm_device *dev, void *container)
  106. {
  107. WARN_ON(dev->managed.final_kfree);
  108. WARN_ON(dev < (struct drm_device *) container);
  109. WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
  110. dev->managed.final_kfree = container;
  111. }
  112. int __drmm_add_action(struct drm_device *dev,
  113. drmres_release_t action,
  114. void *data, const char *name)
  115. {
  116. struct drmres *dr;
  117. void **void_ptr;
  118. dr = alloc_dr(action, data ? sizeof(void*) : 0,
  119. GFP_KERNEL | __GFP_ZERO,
  120. dev_to_node(dev->dev));
  121. if (!dr) {
  122. drm_dbg_drmres(dev, "failed to add action %s for %p\n",
  123. name, data);
  124. return -ENOMEM;
  125. }
  126. dr->node.name = kstrdup_const(name, GFP_KERNEL);
  127. if (data) {
  128. void_ptr = (void **)&dr->data;
  129. *void_ptr = data;
  130. }
  131. add_dr(dev, dr);
  132. return 0;
  133. }
  134. EXPORT_SYMBOL(__drmm_add_action);
  135. int __drmm_add_action_or_reset(struct drm_device *dev,
  136. drmres_release_t action,
  137. void *data, const char *name)
  138. {
  139. int ret;
  140. ret = __drmm_add_action(dev, action, data, name);
  141. if (ret)
  142. action(dev, data);
  143. return ret;
  144. }
  145. EXPORT_SYMBOL(__drmm_add_action_or_reset);
  146. /**
  147. * drmm_release_action - release a managed action from a &drm_device
  148. * @dev: DRM device
  149. * @action: function which would be called when @dev is released
  150. * @data: opaque pointer, passed to @action
  151. *
  152. * This function calls the @action previously added by drmm_add_action()
  153. * immediately.
  154. * The @action is removed from the list of cleanup actions for @dev,
  155. * which means that it won't be called in the final drm_dev_put().
  156. */
  157. void drmm_release_action(struct drm_device *dev,
  158. drmres_release_t action,
  159. void *data)
  160. {
  161. struct drmres *dr_match = NULL, *dr;
  162. unsigned long flags;
  163. spin_lock_irqsave(&dev->managed.lock, flags);
  164. list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
  165. if (dr->node.release == action) {
  166. if (!data || *(void **)dr->data == data) {
  167. dr_match = dr;
  168. del_dr(dev, dr_match);
  169. break;
  170. }
  171. }
  172. }
  173. spin_unlock_irqrestore(&dev->managed.lock, flags);
  174. if (WARN_ON(!dr_match))
  175. return;
  176. action(dev, data);
  177. free_dr(dr_match);
  178. }
  179. EXPORT_SYMBOL(drmm_release_action);
  180. /**
  181. * drmm_kmalloc - &drm_device managed kmalloc()
  182. * @dev: DRM device
  183. * @size: size of the memory allocation
  184. * @gfp: GFP allocation flags
  185. *
  186. * This is a &drm_device managed version of kmalloc(). The allocated memory is
  187. * automatically freed on the final drm_dev_put(). Memory can also be freed
  188. * before the final drm_dev_put() by calling drmm_kfree().
  189. */
  190. void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
  191. {
  192. struct drmres *dr;
  193. dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
  194. if (!dr) {
  195. drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
  196. size, gfp);
  197. return NULL;
  198. }
  199. dr->node.name = kstrdup_const("kmalloc", gfp);
  200. add_dr(dev, dr);
  201. return dr->data;
  202. }
  203. EXPORT_SYMBOL(drmm_kmalloc);
  204. /**
  205. * drmm_kstrdup - &drm_device managed kstrdup()
  206. * @dev: DRM device
  207. * @s: 0-terminated string to be duplicated
  208. * @gfp: GFP allocation flags
  209. *
  210. * This is a &drm_device managed version of kstrdup(). The allocated memory is
  211. * automatically freed on the final drm_dev_put() and works exactly like a
  212. * memory allocation obtained by drmm_kmalloc().
  213. */
  214. char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
  215. {
  216. size_t size;
  217. char *buf;
  218. if (!s)
  219. return NULL;
  220. size = strlen(s) + 1;
  221. buf = drmm_kmalloc(dev, size, gfp);
  222. if (buf)
  223. memcpy(buf, s, size);
  224. return buf;
  225. }
  226. EXPORT_SYMBOL_GPL(drmm_kstrdup);
  227. /**
  228. * drmm_kfree - &drm_device managed kfree()
  229. * @dev: DRM device
  230. * @data: memory allocation to be freed
  231. *
  232. * This is a &drm_device managed version of kfree() which can be used to
  233. * release memory allocated through drmm_kmalloc() or any of its related
  234. * functions before the final drm_dev_put() of @dev.
  235. */
  236. void drmm_kfree(struct drm_device *dev, void *data)
  237. {
  238. struct drmres *dr_match = NULL, *dr;
  239. unsigned long flags;
  240. if (!data)
  241. return;
  242. spin_lock_irqsave(&dev->managed.lock, flags);
  243. list_for_each_entry(dr, &dev->managed.resources, node.entry) {
  244. if (dr->data == data) {
  245. dr_match = dr;
  246. del_dr(dev, dr_match);
  247. break;
  248. }
  249. }
  250. spin_unlock_irqrestore(&dev->managed.lock, flags);
  251. if (WARN_ON(!dr_match))
  252. return;
  253. free_dr(dr_match);
  254. }
  255. EXPORT_SYMBOL(drmm_kfree);
  256. void __drmm_mutex_release(struct drm_device *dev, void *res)
  257. {
  258. struct mutex *lock = res;
  259. mutex_destroy(lock);
  260. }
  261. EXPORT_SYMBOL(__drmm_mutex_release);