afu_irq.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright 2017 IBM Corp.
  3. #include <linux/interrupt.h>
  4. #include <linux/eventfd.h>
  5. #include <asm/pnv-ocxl.h>
  6. #include "ocxl_internal.h"
  7. #include "trace.h"
  8. struct afu_irq {
  9. int id;
  10. int hw_irq;
  11. unsigned int virq;
  12. char *name;
  13. u64 trigger_page;
  14. struct eventfd_ctx *ev_ctx;
  15. };
  16. static int irq_offset_to_id(struct ocxl_context *ctx, u64 offset)
  17. {
  18. return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT;
  19. }
  20. static u64 irq_id_to_offset(struct ocxl_context *ctx, int id)
  21. {
  22. return ctx->afu->irq_base_offset + (id << PAGE_SHIFT);
  23. }
  24. static irqreturn_t afu_irq_handler(int virq, void *data)
  25. {
  26. struct afu_irq *irq = (struct afu_irq *) data;
  27. trace_ocxl_afu_irq_receive(virq);
  28. if (irq->ev_ctx)
  29. eventfd_signal(irq->ev_ctx, 1);
  30. return IRQ_HANDLED;
  31. }
  32. static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq)
  33. {
  34. int rc;
  35. irq->virq = irq_create_mapping(NULL, irq->hw_irq);
  36. if (!irq->virq) {
  37. pr_err("irq_create_mapping failed\n");
  38. return -ENOMEM;
  39. }
  40. pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq);
  41. irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq);
  42. if (!irq->name) {
  43. irq_dispose_mapping(irq->virq);
  44. return -ENOMEM;
  45. }
  46. rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq);
  47. if (rc) {
  48. kfree(irq->name);
  49. irq->name = NULL;
  50. irq_dispose_mapping(irq->virq);
  51. pr_err("request_irq failed: %d\n", rc);
  52. return rc;
  53. }
  54. return 0;
  55. }
  56. static void release_afu_irq(struct afu_irq *irq)
  57. {
  58. free_irq(irq->virq, irq);
  59. irq_dispose_mapping(irq->virq);
  60. kfree(irq->name);
  61. }
  62. int ocxl_afu_irq_alloc(struct ocxl_context *ctx, u64 *irq_offset)
  63. {
  64. struct afu_irq *irq;
  65. int rc;
  66. irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL);
  67. if (!irq)
  68. return -ENOMEM;
  69. /*
  70. * We limit the number of afu irqs per context and per link to
  71. * avoid a single process or user depleting the pool of IPIs
  72. */
  73. mutex_lock(&ctx->irq_lock);
  74. irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT,
  75. GFP_KERNEL);
  76. if (irq->id < 0) {
  77. rc = -ENOSPC;
  78. goto err_unlock;
  79. }
  80. rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq,
  81. &irq->trigger_page);
  82. if (rc)
  83. goto err_idr;
  84. rc = setup_afu_irq(ctx, irq);
  85. if (rc)
  86. goto err_alloc;
  87. *irq_offset = irq_id_to_offset(ctx, irq->id);
  88. trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq,
  89. *irq_offset);
  90. mutex_unlock(&ctx->irq_lock);
  91. return 0;
  92. err_alloc:
  93. ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
  94. err_idr:
  95. idr_remove(&ctx->irq_idr, irq->id);
  96. err_unlock:
  97. mutex_unlock(&ctx->irq_lock);
  98. kfree(irq);
  99. return rc;
  100. }
  101. static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx)
  102. {
  103. trace_ocxl_afu_irq_free(ctx->pasid, irq->id);
  104. if (ctx->mapping)
  105. unmap_mapping_range(ctx->mapping,
  106. irq_id_to_offset(ctx, irq->id),
  107. 1 << PAGE_SHIFT, 1);
  108. release_afu_irq(irq);
  109. if (irq->ev_ctx)
  110. eventfd_ctx_put(irq->ev_ctx);
  111. ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
  112. kfree(irq);
  113. }
  114. int ocxl_afu_irq_free(struct ocxl_context *ctx, u64 irq_offset)
  115. {
  116. struct afu_irq *irq;
  117. int id = irq_offset_to_id(ctx, irq_offset);
  118. mutex_lock(&ctx->irq_lock);
  119. irq = idr_find(&ctx->irq_idr, id);
  120. if (!irq) {
  121. mutex_unlock(&ctx->irq_lock);
  122. return -EINVAL;
  123. }
  124. idr_remove(&ctx->irq_idr, irq->id);
  125. afu_irq_free(irq, ctx);
  126. mutex_unlock(&ctx->irq_lock);
  127. return 0;
  128. }
  129. void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
  130. {
  131. struct afu_irq *irq;
  132. int id;
  133. mutex_lock(&ctx->irq_lock);
  134. idr_for_each_entry(&ctx->irq_idr, irq, id)
  135. afu_irq_free(irq, ctx);
  136. mutex_unlock(&ctx->irq_lock);
  137. }
  138. int ocxl_afu_irq_set_fd(struct ocxl_context *ctx, u64 irq_offset, int eventfd)
  139. {
  140. struct afu_irq *irq;
  141. struct eventfd_ctx *ev_ctx;
  142. int rc = 0, id = irq_offset_to_id(ctx, irq_offset);
  143. mutex_lock(&ctx->irq_lock);
  144. irq = idr_find(&ctx->irq_idr, id);
  145. if (!irq) {
  146. rc = -EINVAL;
  147. goto unlock;
  148. }
  149. ev_ctx = eventfd_ctx_fdget(eventfd);
  150. if (IS_ERR(ev_ctx)) {
  151. rc = -EINVAL;
  152. goto unlock;
  153. }
  154. irq->ev_ctx = ev_ctx;
  155. unlock:
  156. mutex_unlock(&ctx->irq_lock);
  157. return rc;
  158. }
  159. u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, u64 irq_offset)
  160. {
  161. struct afu_irq *irq;
  162. int id = irq_offset_to_id(ctx, irq_offset);
  163. u64 addr = 0;
  164. mutex_lock(&ctx->irq_lock);
  165. irq = idr_find(&ctx->irq_idr, id);
  166. if (irq)
  167. addr = irq->trigger_page;
  168. mutex_unlock(&ctx->irq_lock);
  169. return addr;
  170. }