drm_vblank_work.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. // SPDX-License-Identifier: MIT
  2. #include <uapi/linux/sched/types.h>
  3. #include <drm/drm_print.h>
  4. #include <drm/drm_vblank.h>
  5. #include <drm/drm_vblank_work.h>
  6. #include <drm/drm_crtc.h>
  7. #include "drm_internal.h"
  8. /**
  9. * DOC: vblank works
  10. *
  11. * Many DRM drivers need to program hardware in a time-sensitive manner, many
  12. * times with a deadline of starting and finishing within a certain region of
  13. * the scanout. Most of the time the safest way to accomplish this is to
  14. * simply do said time-sensitive programming in the driver's IRQ handler,
  15. * which allows drivers to avoid being preempted during these critical
  16. * regions. Or even better, the hardware may even handle applying such
  17. * time-critical programming independently of the CPU.
  18. *
  19. * While there's a decent amount of hardware that's designed so that the CPU
  20. * doesn't need to be concerned with extremely time-sensitive programming,
  21. * there's a few situations where it can't be helped. Some unforgiving
  22. * hardware may require that certain time-sensitive programming be handled
  23. * completely by the CPU, and said programming may even take too long to
  24. * handle in an IRQ handler. Another such situation would be where the driver
  25. * needs to perform a task that needs to complete within a specific scanout
  26. * period, but might possibly block and thus cannot be handled in an IRQ
  27. * context. Both of these situations can't be solved perfectly in Linux since
  28. * we're not a realtime kernel, and thus the scheduler may cause us to miss
  29. * our deadline if it decides to preempt us. But for some drivers, it's good
  30. * enough if we can lower our chance of being preempted to an absolute
  31. * minimum.
  32. *
  33. * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
  34. * generic delayed work implementation which delays work execution until a
  35. * particular vblank has passed, and then executes the work at realtime
  36. * priority. This provides the best possible chance at performing
  37. * time-sensitive hardware programming on time, even when the system is under
  38. * heavy load. &drm_vblank_work also supports rescheduling, so that self
  39. * re-arming work items can be easily implemented.
  40. */
  41. void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
  42. {
  43. struct drm_vblank_work *work, *next;
  44. u64 count = atomic64_read(&vblank->count);
  45. bool wake = false;
  46. assert_spin_locked(&vblank->dev->event_lock);
  47. list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
  48. if (!drm_vblank_passed(count, work->count))
  49. continue;
  50. list_del_init(&work->node);
  51. drm_vblank_put(vblank->dev, vblank->pipe);
  52. kthread_queue_work(vblank->worker, &work->base);
  53. wake = true;
  54. }
  55. if (wake)
  56. wake_up_all(&vblank->work_wait_queue);
  57. }
  58. /* Handle cancelling any pending vblank work items and drop respective vblank
  59. * references in response to vblank interrupts being disabled.
  60. */
  61. void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
  62. {
  63. struct drm_vblank_work *work, *next;
  64. assert_spin_locked(&vblank->dev->event_lock);
  65. drm_WARN_ONCE(vblank->dev, !list_empty(&vblank->pending_work),
  66. "Cancelling pending vblank works!\n");
  67. list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
  68. list_del_init(&work->node);
  69. drm_vblank_put(vblank->dev, vblank->pipe);
  70. }
  71. wake_up_all(&vblank->work_wait_queue);
  72. }
  73. /**
  74. * drm_vblank_work_schedule - schedule a vblank work
  75. * @work: vblank work to schedule
  76. * @count: target vblank count
  77. * @nextonmiss: defer until the next vblank if target vblank was missed
  78. *
  79. * Schedule @work for execution once the crtc vblank count reaches @count.
  80. *
  81. * If the crtc vblank count has already reached @count and @nextonmiss is
  82. * %false the work starts to execute immediately.
  83. *
  84. * If the crtc vblank count has already reached @count and @nextonmiss is
  85. * %true the work is deferred until the next vblank (as if @count has been
  86. * specified as crtc vblank count + 1).
  87. *
  88. * If @work is already scheduled, this function will reschedule said work
  89. * using the new @count. This can be used for self-rearming work items.
  90. *
  91. * Returns:
  92. * %1 if @work was successfully (re)scheduled, %0 if it was either already
  93. * scheduled or cancelled, or a negative error code on failure.
  94. */
  95. int drm_vblank_work_schedule(struct drm_vblank_work *work,
  96. u64 count, bool nextonmiss)
  97. {
  98. struct drm_vblank_crtc *vblank = work->vblank;
  99. struct drm_device *dev = vblank->dev;
  100. u64 cur_vbl;
  101. unsigned long irqflags;
  102. bool passed, inmodeset, rescheduling = false, wake = false;
  103. int ret = 0;
  104. spin_lock_irqsave(&dev->event_lock, irqflags);
  105. if (work->cancelling)
  106. goto out;
  107. spin_lock(&dev->vbl_lock);
  108. inmodeset = vblank->inmodeset;
  109. spin_unlock(&dev->vbl_lock);
  110. if (inmodeset)
  111. goto out;
  112. if (list_empty(&work->node)) {
  113. ret = drm_vblank_get(dev, vblank->pipe);
  114. if (ret < 0)
  115. goto out;
  116. } else if (work->count == count) {
  117. /* Already scheduled w/ same vbl count */
  118. goto out;
  119. } else {
  120. rescheduling = true;
  121. }
  122. work->count = count;
  123. cur_vbl = drm_vblank_count(dev, vblank->pipe);
  124. passed = drm_vblank_passed(cur_vbl, count);
  125. if (passed)
  126. drm_dbg_core(dev,
  127. "crtc %d vblank %llu already passed (current %llu)\n",
  128. vblank->pipe, count, cur_vbl);
  129. if (!nextonmiss && passed) {
  130. drm_vblank_put(dev, vblank->pipe);
  131. ret = kthread_queue_work(vblank->worker, &work->base);
  132. if (rescheduling) {
  133. list_del_init(&work->node);
  134. wake = true;
  135. }
  136. } else {
  137. if (!rescheduling)
  138. list_add_tail(&work->node, &vblank->pending_work);
  139. ret = true;
  140. }
  141. out:
  142. spin_unlock_irqrestore(&dev->event_lock, irqflags);
  143. if (wake)
  144. wake_up_all(&vblank->work_wait_queue);
  145. return ret;
  146. }
  147. EXPORT_SYMBOL(drm_vblank_work_schedule);
  148. /**
  149. * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
  150. * finish executing
  151. * @work: vblank work to cancel
  152. *
  153. * Cancel an already scheduled vblank work and wait for its
  154. * execution to finish.
  155. *
  156. * On return, @work is guaranteed to no longer be scheduled or running, even
  157. * if it's self-arming.
  158. *
  159. * Returns:
  160. * %True if the work was cancelled before it started to execute, %false
  161. * otherwise.
  162. */
  163. bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
  164. {
  165. struct drm_vblank_crtc *vblank = work->vblank;
  166. struct drm_device *dev = vblank->dev;
  167. bool ret = false;
  168. spin_lock_irq(&dev->event_lock);
  169. if (!list_empty(&work->node)) {
  170. list_del_init(&work->node);
  171. drm_vblank_put(vblank->dev, vblank->pipe);
  172. ret = true;
  173. }
  174. work->cancelling++;
  175. spin_unlock_irq(&dev->event_lock);
  176. wake_up_all(&vblank->work_wait_queue);
  177. if (kthread_cancel_work_sync(&work->base))
  178. ret = true;
  179. spin_lock_irq(&dev->event_lock);
  180. work->cancelling--;
  181. spin_unlock_irq(&dev->event_lock);
  182. return ret;
  183. }
  184. EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
  185. /**
  186. * drm_vblank_work_flush - wait for a scheduled vblank work to finish
  187. * executing
  188. * @work: vblank work to flush
  189. *
  190. * Wait until @work has finished executing once.
  191. */
  192. void drm_vblank_work_flush(struct drm_vblank_work *work)
  193. {
  194. struct drm_vblank_crtc *vblank = work->vblank;
  195. struct drm_device *dev = vblank->dev;
  196. spin_lock_irq(&dev->event_lock);
  197. wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
  198. dev->event_lock);
  199. spin_unlock_irq(&dev->event_lock);
  200. kthread_flush_work(&work->base);
  201. }
  202. EXPORT_SYMBOL(drm_vblank_work_flush);
  203. /**
  204. * drm_vblank_work_flush_all - flush all currently pending vblank work on crtc.
  205. * @crtc: crtc for which vblank work to flush
  206. *
  207. * Wait until all currently queued vblank work on @crtc
  208. * has finished executing once.
  209. */
  210. void drm_vblank_work_flush_all(struct drm_crtc *crtc)
  211. {
  212. struct drm_device *dev = crtc->dev;
  213. struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)];
  214. spin_lock_irq(&dev->event_lock);
  215. wait_event_lock_irq(vblank->work_wait_queue,
  216. list_empty(&vblank->pending_work),
  217. dev->event_lock);
  218. spin_unlock_irq(&dev->event_lock);
  219. kthread_flush_worker(vblank->worker);
  220. }
  221. EXPORT_SYMBOL(drm_vblank_work_flush_all);
  222. /**
  223. * drm_vblank_work_init - initialize a vblank work item
  224. * @work: vblank work item
  225. * @crtc: CRTC whose vblank will trigger the work execution
  226. * @func: work function to be executed
  227. *
  228. * Initialize a vblank work item for a specific crtc.
  229. */
  230. void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
  231. void (*func)(struct kthread_work *work))
  232. {
  233. kthread_init_work(&work->base, func);
  234. INIT_LIST_HEAD(&work->node);
  235. work->vblank = drm_crtc_vblank_crtc(crtc);
  236. }
  237. EXPORT_SYMBOL(drm_vblank_work_init);
  238. int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
  239. {
  240. struct kthread_worker *worker;
  241. INIT_LIST_HEAD(&vblank->pending_work);
  242. init_waitqueue_head(&vblank->work_wait_queue);
  243. worker = kthread_create_worker(0, "card%d-crtc%d",
  244. vblank->dev->primary->index,
  245. vblank->pipe);
  246. if (IS_ERR(worker))
  247. return PTR_ERR(worker);
  248. vblank->worker = worker;
  249. sched_set_fifo(worker->task);
  250. return 0;
  251. }