syncpt.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Tegra host1x Syncpoints
  4. *
  5. * Copyright (c) 2010-2013, NVIDIA Corporation.
  6. */
  7. #ifndef __HOST1X_SYNCPT_H
  8. #define __HOST1X_SYNCPT_H
  9. #include <linux/atomic.h>
  10. #include <linux/host1x.h>
  11. #include <linux/kernel.h>
  12. #include <linux/kref.h>
  13. #include <linux/sched.h>
  14. #include "fence.h"
  15. #include "intr.h"
  16. struct host1x;
  17. /* Reserved for replacing an expired wait with a NOP */
  18. #define HOST1X_SYNCPT_RESERVED 0
  19. struct host1x_syncpt_base {
  20. unsigned int id;
  21. bool requested;
  22. };
  23. struct host1x_syncpt {
  24. struct kref ref;
  25. unsigned int id;
  26. atomic_t min_val;
  27. atomic_t max_val;
  28. u32 base_val;
  29. const char *name;
  30. bool client_managed;
  31. struct host1x *host;
  32. struct host1x_syncpt_base *base;
  33. /* interrupt data */
  34. struct host1x_fence_list fences;
  35. /*
  36. * If a submission incrementing this syncpoint fails, lock it so that
  37. * further submission cannot be made until application has handled the
  38. * failure.
  39. */
  40. bool locked;
  41. };
  42. /* Initialize sync point array */
  43. int host1x_syncpt_init(struct host1x *host);
  44. /* Free sync point array */
  45. void host1x_syncpt_deinit(struct host1x *host);
  46. /* Return number of sync point supported. */
  47. unsigned int host1x_syncpt_nb_pts(struct host1x *host);
  48. /* Return number of wait bases supported. */
  49. unsigned int host1x_syncpt_nb_bases(struct host1x *host);
  50. /* Return number of mlocks supported. */
  51. unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
  52. /*
  53. * Check sync point sanity. If max is larger than min, there have too many
  54. * sync point increments.
  55. *
  56. * Client managed sync point are not tracked.
  57. * */
  58. static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
  59. {
  60. u32 max;
  61. if (sp->client_managed)
  62. return true;
  63. max = host1x_syncpt_read_max(sp);
  64. return (s32)(max - real) >= 0;
  65. }
  66. /* Return true if sync point is client managed. */
  67. static inline bool host1x_syncpt_client_managed(struct host1x_syncpt *sp)
  68. {
  69. return sp->client_managed;
  70. }
  71. /*
  72. * Returns true if syncpoint min == max, which means that there are no
  73. * outstanding operations.
  74. */
  75. static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
  76. {
  77. int min, max;
  78. smp_rmb();
  79. min = atomic_read(&sp->min_val);
  80. max = atomic_read(&sp->max_val);
  81. return (min == max);
  82. }
  83. /* Load current value from hardware to the shadow register. */
  84. u32 host1x_syncpt_load(struct host1x_syncpt *sp);
  85. /* Check if the given syncpoint value has already passed */
  86. bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
  87. /* Save host1x sync point state into shadow registers. */
  88. void host1x_syncpt_save(struct host1x *host);
  89. /* Reset host1x sync point state from shadow registers. */
  90. void host1x_syncpt_restore(struct host1x *host);
  91. /* Read current wait base value into shadow register and return it. */
  92. u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
  93. /* Indicate future operations by incrementing the sync point max. */
  94. u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
  95. /* Check if sync point id is valid. */
  96. static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
  97. {
  98. return sp->id < host1x_syncpt_nb_pts(sp->host);
  99. }
  100. static inline void host1x_syncpt_set_locked(struct host1x_syncpt *sp)
  101. {
  102. sp->locked = true;
  103. }
  104. #endif