fence.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Syncpoint dma_fence implementation
  4. *
  5. * Copyright (c) 2020, NVIDIA Corporation.
  6. */
  7. #include <linux/dma-fence.h>
  8. #include <linux/file.h>
  9. #include <linux/fs.h>
  10. #include <linux/slab.h>
  11. #include <linux/sync_file.h>
  12. #include "fence.h"
  13. #include "intr.h"
  14. #include "syncpt.h"
  15. static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
  16. {
  17. return "host1x";
  18. }
  19. static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
  20. {
  21. return "syncpoint";
  22. }
  23. static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
  24. {
  25. return container_of(f, struct host1x_syncpt_fence, base);
  26. }
  27. static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
  28. {
  29. struct host1x_syncpt_fence *sf = to_host1x_fence(f);
  30. if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
  31. return false;
  32. /* Reference for interrupt path. */
  33. dma_fence_get(f);
  34. /*
  35. * The dma_fence framework requires the fence driver to keep a
  36. * reference to any fences for which 'enable_signaling' has been
  37. * called (and that have not been signalled).
  38. *
  39. * We cannot currently always guarantee that all fences get signalled
  40. * or cancelled. As such, for such situations, set up a timeout, so
  41. * that long-lasting fences will get reaped eventually.
  42. */
  43. if (sf->timeout) {
  44. /* Reference for timeout path. */
  45. dma_fence_get(f);
  46. schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
  47. }
  48. host1x_intr_add_fence_locked(sf->sp->host, sf);
  49. /*
  50. * The fence may get signalled at any time after the above call,
  51. * so we need to initialize all state used by signalling
  52. * before it.
  53. */
  54. return true;
  55. }
  56. static const struct dma_fence_ops host1x_syncpt_fence_ops = {
  57. .get_driver_name = host1x_syncpt_fence_get_driver_name,
  58. .get_timeline_name = host1x_syncpt_fence_get_timeline_name,
  59. .enable_signaling = host1x_syncpt_fence_enable_signaling,
  60. };
  61. void host1x_fence_signal(struct host1x_syncpt_fence *f)
  62. {
  63. if (atomic_xchg(&f->signaling, 1)) {
  64. /*
  65. * Already on timeout path, but we removed the fence before
  66. * timeout path could, so drop interrupt path reference.
  67. */
  68. dma_fence_put(&f->base);
  69. return;
  70. }
  71. if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
  72. /*
  73. * We know that the timeout path will not be entered.
  74. * Safe to drop the timeout path's reference now.
  75. */
  76. dma_fence_put(&f->base);
  77. }
  78. dma_fence_signal_locked(&f->base);
  79. dma_fence_put(&f->base);
  80. }
  81. static void do_fence_timeout(struct work_struct *work)
  82. {
  83. struct delayed_work *dwork = (struct delayed_work *)work;
  84. struct host1x_syncpt_fence *f =
  85. container_of(dwork, struct host1x_syncpt_fence, timeout_work);
  86. if (atomic_xchg(&f->signaling, 1)) {
  87. /* Already on interrupt path, drop timeout path reference if any. */
  88. if (f->timeout)
  89. dma_fence_put(&f->base);
  90. return;
  91. }
  92. if (host1x_intr_remove_fence(f->sp->host, f)) {
  93. /*
  94. * Managed to remove fence from queue, so it's safe to drop
  95. * the interrupt path's reference.
  96. */
  97. dma_fence_put(&f->base);
  98. }
  99. dma_fence_set_error(&f->base, -ETIMEDOUT);
  100. dma_fence_signal(&f->base);
  101. if (f->timeout)
  102. dma_fence_put(&f->base);
  103. }
  104. struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
  105. bool timeout)
  106. {
  107. struct host1x_syncpt_fence *fence;
  108. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  109. if (!fence)
  110. return ERR_PTR(-ENOMEM);
  111. fence->sp = sp;
  112. fence->threshold = threshold;
  113. fence->timeout = timeout;
  114. dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
  115. dma_fence_context_alloc(1), 0);
  116. INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
  117. return &fence->base;
  118. }
  119. EXPORT_SYMBOL(host1x_fence_create);
  120. void host1x_fence_cancel(struct dma_fence *f)
  121. {
  122. struct host1x_syncpt_fence *sf = to_host1x_fence(f);
  123. schedule_delayed_work(&sf->timeout_work, 0);
  124. flush_delayed_work(&sf->timeout_work);
  125. }
  126. EXPORT_SYMBOL(host1x_fence_cancel);