ioeventfd.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ACRN HSM eventfd - use eventfd objects to signal expected I/O requests
  4. *
  5. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  6. *
  7. * Authors:
  8. * Shuo Liu <shuo.a.liu@intel.com>
  9. * Yakui Zhao <yakui.zhao@intel.com>
  10. */
  11. #include <linux/eventfd.h>
  12. #include <linux/slab.h>
  13. #include "acrn_drv.h"
  14. /**
  15. * struct hsm_ioeventfd - Properties of HSM ioeventfd
  16. * @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM
  17. * @eventfd: Eventfd of the HSM ioeventfd
  18. * @addr: Address of I/O range
  19. * @data: Data for matching
  20. * @length: Length of I/O range
  21. * @type: Type of I/O range (ACRN_IOREQ_TYPE_MMIO/ACRN_IOREQ_TYPE_PORTIO)
  22. * @wildcard: Data matching or not
  23. */
  24. struct hsm_ioeventfd {
  25. struct list_head list;
  26. struct eventfd_ctx *eventfd;
  27. u64 addr;
  28. u64 data;
  29. int length;
  30. int type;
  31. bool wildcard;
  32. };
  33. static inline int ioreq_type_from_flags(int flags)
  34. {
  35. return flags & ACRN_IOEVENTFD_FLAG_PIO ?
  36. ACRN_IOREQ_TYPE_PORTIO : ACRN_IOREQ_TYPE_MMIO;
  37. }
  38. static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p)
  39. {
  40. lockdep_assert_held(&vm->ioeventfds_lock);
  41. eventfd_ctx_put(p->eventfd);
  42. list_del(&p->list);
  43. kfree(p);
  44. }
  45. static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm,
  46. struct hsm_ioeventfd *ioeventfd)
  47. {
  48. struct hsm_ioeventfd *p;
  49. lockdep_assert_held(&vm->ioeventfds_lock);
  50. /* Either one is wildcard, the data matching will be skipped. */
  51. list_for_each_entry(p, &vm->ioeventfds, list)
  52. if (p->eventfd == ioeventfd->eventfd &&
  53. p->addr == ioeventfd->addr &&
  54. p->type == ioeventfd->type &&
  55. (p->wildcard || ioeventfd->wildcard ||
  56. p->data == ioeventfd->data))
  57. return true;
  58. return false;
  59. }
  60. /*
  61. * Assign an eventfd to a VM and create a HSM ioeventfd associated with the
  62. * eventfd. The properties of the HSM ioeventfd are built from a &struct
  63. * acrn_ioeventfd.
  64. */
  65. static int acrn_ioeventfd_assign(struct acrn_vm *vm,
  66. struct acrn_ioeventfd *args)
  67. {
  68. struct eventfd_ctx *eventfd;
  69. struct hsm_ioeventfd *p;
  70. int ret;
  71. /* Check for range overflow */
  72. if (args->addr + args->len < args->addr)
  73. return -EINVAL;
  74. /*
  75. * Currently, acrn_ioeventfd is used to support vhost. 1,2,4,8 width
  76. * accesses can cover vhost's requirements.
  77. */
  78. if (!(args->len == 1 || args->len == 2 ||
  79. args->len == 4 || args->len == 8))
  80. return -EINVAL;
  81. eventfd = eventfd_ctx_fdget(args->fd);
  82. if (IS_ERR(eventfd))
  83. return PTR_ERR(eventfd);
  84. p = kzalloc(sizeof(*p), GFP_KERNEL);
  85. if (!p) {
  86. ret = -ENOMEM;
  87. goto fail;
  88. }
  89. INIT_LIST_HEAD(&p->list);
  90. p->addr = args->addr;
  91. p->length = args->len;
  92. p->eventfd = eventfd;
  93. p->type = ioreq_type_from_flags(args->flags);
  94. /*
  95. * ACRN_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the
  96. * writing of notification register of each virtqueue may trigger the
  97. * notification. There is no data matching requirement.
  98. */
  99. if (args->flags & ACRN_IOEVENTFD_FLAG_DATAMATCH)
  100. p->data = args->data;
  101. else
  102. p->wildcard = true;
  103. mutex_lock(&vm->ioeventfds_lock);
  104. if (hsm_ioeventfd_is_conflict(vm, p)) {
  105. ret = -EEXIST;
  106. goto unlock_fail;
  107. }
  108. /* register the I/O range into ioreq client */
  109. ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type,
  110. p->addr, p->addr + p->length - 1);
  111. if (ret < 0)
  112. goto unlock_fail;
  113. list_add_tail(&p->list, &vm->ioeventfds);
  114. mutex_unlock(&vm->ioeventfds_lock);
  115. return 0;
  116. unlock_fail:
  117. mutex_unlock(&vm->ioeventfds_lock);
  118. kfree(p);
  119. fail:
  120. eventfd_ctx_put(eventfd);
  121. return ret;
  122. }
  123. static int acrn_ioeventfd_deassign(struct acrn_vm *vm,
  124. struct acrn_ioeventfd *args)
  125. {
  126. struct hsm_ioeventfd *p;
  127. struct eventfd_ctx *eventfd;
  128. eventfd = eventfd_ctx_fdget(args->fd);
  129. if (IS_ERR(eventfd))
  130. return PTR_ERR(eventfd);
  131. mutex_lock(&vm->ioeventfds_lock);
  132. list_for_each_entry(p, &vm->ioeventfds, list) {
  133. if (p->eventfd != eventfd)
  134. continue;
  135. acrn_ioreq_range_del(vm->ioeventfd_client, p->type,
  136. p->addr, p->addr + p->length - 1);
  137. acrn_ioeventfd_shutdown(vm, p);
  138. break;
  139. }
  140. mutex_unlock(&vm->ioeventfds_lock);
  141. eventfd_ctx_put(eventfd);
  142. return 0;
  143. }
  144. static struct hsm_ioeventfd *hsm_ioeventfd_match(struct acrn_vm *vm, u64 addr,
  145. u64 data, int len, int type)
  146. {
  147. struct hsm_ioeventfd *p = NULL;
  148. lockdep_assert_held(&vm->ioeventfds_lock);
  149. list_for_each_entry(p, &vm->ioeventfds, list) {
  150. if (p->type == type && p->addr == addr && p->length >= len &&
  151. (p->wildcard || p->data == data))
  152. return p;
  153. }
  154. return NULL;
  155. }
  156. static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
  157. struct acrn_io_request *req)
  158. {
  159. struct hsm_ioeventfd *p;
  160. u64 addr, val;
  161. int size;
  162. if (req->type == ACRN_IOREQ_TYPE_MMIO) {
  163. /*
  164. * I/O requests are dispatched by range check only, so a
  165. * acrn_ioreq_client need process both READ and WRITE accesses
  166. * of same range. READ accesses are safe to be ignored here
  167. * because virtio PCI devices write the notify registers for
  168. * notification.
  169. */
  170. if (req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
  171. /* reading does nothing and return 0 */
  172. req->reqs.mmio_request.value = 0;
  173. return 0;
  174. }
  175. addr = req->reqs.mmio_request.address;
  176. size = req->reqs.mmio_request.size;
  177. val = req->reqs.mmio_request.value;
  178. } else {
  179. if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ) {
  180. /* reading does nothing and return 0 */
  181. req->reqs.pio_request.value = 0;
  182. return 0;
  183. }
  184. addr = req->reqs.pio_request.address;
  185. size = req->reqs.pio_request.size;
  186. val = req->reqs.pio_request.value;
  187. }
  188. mutex_lock(&client->vm->ioeventfds_lock);
  189. p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
  190. if (p)
  191. eventfd_signal(p->eventfd);
  192. mutex_unlock(&client->vm->ioeventfds_lock);
  193. return 0;
  194. }
  195. int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args)
  196. {
  197. int ret;
  198. if (args->flags & ACRN_IOEVENTFD_FLAG_DEASSIGN)
  199. ret = acrn_ioeventfd_deassign(vm, args);
  200. else
  201. ret = acrn_ioeventfd_assign(vm, args);
  202. return ret;
  203. }
  204. int acrn_ioeventfd_init(struct acrn_vm *vm)
  205. {
  206. char name[ACRN_NAME_LEN];
  207. mutex_init(&vm->ioeventfds_lock);
  208. INIT_LIST_HEAD(&vm->ioeventfds);
  209. snprintf(name, sizeof(name), "ioeventfd-%u", vm->vmid);
  210. vm->ioeventfd_client = acrn_ioreq_client_create(vm,
  211. acrn_ioeventfd_handler,
  212. NULL, false, name);
  213. if (!vm->ioeventfd_client) {
  214. dev_err(acrn_dev.this_device, "Failed to create ioeventfd ioreq client!\n");
  215. return -EINVAL;
  216. }
  217. dev_dbg(acrn_dev.this_device, "VM %u ioeventfd init.\n", vm->vmid);
  218. return 0;
  219. }
  220. void acrn_ioeventfd_deinit(struct acrn_vm *vm)
  221. {
  222. struct hsm_ioeventfd *p, *next;
  223. dev_dbg(acrn_dev.this_device, "VM %u ioeventfd deinit.\n", vm->vmid);
  224. acrn_ioreq_client_destroy(vm->ioeventfd_client);
  225. mutex_lock(&vm->ioeventfds_lock);
  226. list_for_each_entry_safe(p, next, &vm->ioeventfds, list)
  227. acrn_ioeventfd_shutdown(vm, p);
  228. mutex_unlock(&vm->ioeventfds_lock);
  229. }