ttm_execbuf_util.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <drm/ttm/ttm_execbuf_util.h>
  29. #include <drm/ttm/ttm_bo.h>
  30. static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
  31. struct ttm_validate_buffer *entry)
  32. {
  33. list_for_each_entry_continue_reverse(entry, list, head) {
  34. struct ttm_buffer_object *bo = entry->bo;
  35. dma_resv_unlock(bo->base.resv);
  36. }
  37. }
  38. void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  39. struct list_head *list)
  40. {
  41. struct ttm_validate_buffer *entry;
  42. if (list_empty(list))
  43. return;
  44. list_for_each_entry(entry, list, head) {
  45. struct ttm_buffer_object *bo = entry->bo;
  46. ttm_bo_move_to_lru_tail_unlocked(bo);
  47. dma_resv_unlock(bo->base.resv);
  48. }
  49. if (ticket)
  50. ww_acquire_fini(ticket);
  51. }
  52. EXPORT_SYMBOL(ttm_eu_backoff_reservation);
  53. /*
  54. * Reserve buffers for validation.
  55. *
  56. * If a buffer in the list is marked for CPU access, we back off and
  57. * wait for that buffer to become free for GPU access.
  58. *
  59. * If a buffer is reserved for another validation, the validator with
  60. * the highest validation sequence backs off and waits for that buffer
  61. * to become unreserved. This prevents deadlocks when validating multiple
  62. * buffers in different orders.
  63. */
  64. int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
  65. struct list_head *list, bool intr,
  66. struct list_head *dups)
  67. {
  68. struct ttm_validate_buffer *entry;
  69. int ret;
  70. if (list_empty(list))
  71. return 0;
  72. if (ticket)
  73. ww_acquire_init(ticket, &reservation_ww_class);
  74. list_for_each_entry(entry, list, head) {
  75. struct ttm_buffer_object *bo = entry->bo;
  76. unsigned int num_fences;
  77. ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
  78. if (ret == -EALREADY && dups) {
  79. struct ttm_validate_buffer *safe = entry;
  80. entry = list_prev_entry(entry, head);
  81. list_del(&safe->head);
  82. list_add(&safe->head, dups);
  83. continue;
  84. }
  85. num_fences = max(entry->num_shared, 1u);
  86. if (!ret) {
  87. ret = dma_resv_reserve_fences(bo->base.resv,
  88. num_fences);
  89. if (!ret)
  90. continue;
  91. }
  92. /* uh oh, we lost out, drop every reservation and try
  93. * to only reserve this buffer, then start over if
  94. * this succeeds.
  95. */
  96. ttm_eu_backoff_reservation_reverse(list, entry);
  97. if (ret == -EDEADLK) {
  98. ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
  99. }
  100. if (!ret)
  101. ret = dma_resv_reserve_fences(bo->base.resv,
  102. num_fences);
  103. if (unlikely(ret != 0)) {
  104. if (ticket) {
  105. ww_acquire_done(ticket);
  106. ww_acquire_fini(ticket);
  107. }
  108. return ret;
  109. }
  110. /* move this item to the front of the list,
  111. * forces correct iteration of the loop without keeping track
  112. */
  113. list_del(&entry->head);
  114. list_add(&entry->head, list);
  115. }
  116. return 0;
  117. }
  118. EXPORT_SYMBOL(ttm_eu_reserve_buffers);
  119. void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
  120. struct list_head *list,
  121. struct dma_fence *fence)
  122. {
  123. struct ttm_validate_buffer *entry;
  124. if (list_empty(list))
  125. return;
  126. list_for_each_entry(entry, list, head) {
  127. struct ttm_buffer_object *bo = entry->bo;
  128. dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
  129. DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
  130. ttm_bo_move_to_lru_tail_unlocked(bo);
  131. dma_resv_unlock(bo->base.resv);
  132. }
  133. if (ticket)
  134. ww_acquire_fini(ticket);
  135. }
  136. EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);