dma-fence-unwrap.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * dma-fence-util: misc functions for dma_fence objects
  4. *
  5. * Copyright (C) 2022 Advanced Micro Devices, Inc.
  6. * Authors:
  7. * Christian König <christian.koenig@amd.com>
  8. */
  9. #include <linux/dma-fence.h>
  10. #include <linux/dma-fence-array.h>
  11. #include <linux/dma-fence-chain.h>
  12. #include <linux/dma-fence-unwrap.h>
  13. #include <linux/slab.h>
  14. #include <linux/sort.h>
  15. /* Internal helper to start new array iteration, don't use directly */
  16. static struct dma_fence *
  17. __dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
  18. {
  19. cursor->array = dma_fence_chain_contained(cursor->chain);
  20. cursor->index = 0;
  21. return dma_fence_array_first(cursor->array);
  22. }
  23. /**
  24. * dma_fence_unwrap_first - return the first fence from fence containers
  25. * @head: the entrypoint into the containers
  26. * @cursor: current position inside the containers
  27. *
  28. * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
  29. * first fence.
  30. */
  31. struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
  32. struct dma_fence_unwrap *cursor)
  33. {
  34. cursor->chain = dma_fence_get(head);
  35. return __dma_fence_unwrap_array(cursor);
  36. }
  37. EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
  38. /**
  39. * dma_fence_unwrap_next - return the next fence from a fence containers
  40. * @cursor: current position inside the containers
  41. *
  42. * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
  43. * the next fence from them.
  44. */
  45. struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
  46. {
  47. struct dma_fence *tmp;
  48. ++cursor->index;
  49. tmp = dma_fence_array_next(cursor->array, cursor->index);
  50. if (tmp)
  51. return tmp;
  52. cursor->chain = dma_fence_chain_walk(cursor->chain);
  53. return __dma_fence_unwrap_array(cursor);
  54. }
  55. EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
  56. static int fence_cmp(const void *_a, const void *_b)
  57. {
  58. struct dma_fence *a = *(struct dma_fence **)_a;
  59. struct dma_fence *b = *(struct dma_fence **)_b;
  60. if (a->context < b->context)
  61. return -1;
  62. else if (a->context > b->context)
  63. return 1;
  64. if (dma_fence_is_later(b, a))
  65. return 1;
  66. else if (dma_fence_is_later(a, b))
  67. return -1;
  68. return 0;
  69. }
  70. /* Implementation for the dma_fence_merge() marco, don't use directly */
  71. struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
  72. struct dma_fence **fences,
  73. struct dma_fence_unwrap *iter)
  74. {
  75. struct dma_fence_array *result;
  76. struct dma_fence *tmp, **array;
  77. ktime_t timestamp;
  78. int i, j, count;
  79. count = 0;
  80. timestamp = ns_to_ktime(0);
  81. for (i = 0; i < num_fences; ++i) {
  82. dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
  83. if (!dma_fence_is_signaled(tmp)) {
  84. ++count;
  85. } else {
  86. ktime_t t = dma_fence_timestamp(tmp);
  87. if (ktime_after(t, timestamp))
  88. timestamp = t;
  89. }
  90. }
  91. }
  92. /*
  93. * If we couldn't find a pending fence just return a private signaled
  94. * fence with the timestamp of the last signaled one.
  95. */
  96. if (count == 0)
  97. return dma_fence_allocate_private_stub(timestamp);
  98. array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
  99. if (!array)
  100. return NULL;
  101. count = 0;
  102. for (i = 0; i < num_fences; ++i) {
  103. dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
  104. if (!dma_fence_is_signaled(tmp)) {
  105. array[count++] = dma_fence_get(tmp);
  106. } else {
  107. ktime_t t = dma_fence_timestamp(tmp);
  108. if (ktime_after(t, timestamp))
  109. timestamp = t;
  110. }
  111. }
  112. }
  113. if (count == 0 || count == 1)
  114. goto return_fastpath;
  115. sort(array, count, sizeof(*array), fence_cmp, NULL);
  116. /*
  117. * Only keep the most recent fence for each context.
  118. */
  119. j = 0;
  120. for (i = 1; i < count; i++) {
  121. if (array[i]->context == array[j]->context)
  122. dma_fence_put(array[i]);
  123. else
  124. array[++j] = array[i];
  125. }
  126. count = ++j;
  127. if (count > 1) {
  128. result = dma_fence_array_create(count, array,
  129. dma_fence_context_alloc(1),
  130. 1, false);
  131. if (!result) {
  132. for (i = 0; i < count; i++)
  133. dma_fence_put(array[i]);
  134. tmp = NULL;
  135. goto return_tmp;
  136. }
  137. return &result->base;
  138. }
  139. return_fastpath:
  140. if (count == 0)
  141. tmp = dma_fence_allocate_private_stub(timestamp);
  142. else
  143. tmp = array[0];
  144. return_tmp:
  145. kfree(array);
  146. return tmp;
  147. }
  148. EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);