xen_drm_front_evtchnl.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Xen para-virtual DRM device
  4. *
  5. * Copyright (C) 2016-2018 EPAM Systems Inc.
  6. *
  7. * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  8. */
  9. #include <drm/drmP.h>
  10. #include <linux/errno.h>
  11. #include <linux/irq.h>
  12. #include <xen/xenbus.h>
  13. #include <xen/events.h>
  14. #include <xen/grant_table.h>
  15. #include "xen_drm_front.h"
  16. #include "xen_drm_front_evtchnl.h"
  17. static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
  18. {
  19. struct xen_drm_front_evtchnl *evtchnl = dev_id;
  20. struct xen_drm_front_info *front_info = evtchnl->front_info;
  21. struct xendispl_resp *resp;
  22. RING_IDX i, rp;
  23. unsigned long flags;
  24. if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
  25. return IRQ_HANDLED;
  26. spin_lock_irqsave(&front_info->io_lock, flags);
  27. again:
  28. rp = evtchnl->u.req.ring.sring->rsp_prod;
  29. /* ensure we see queued responses up to rp */
  30. virt_rmb();
  31. for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
  32. resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
  33. if (unlikely(resp->id != evtchnl->evt_id))
  34. continue;
  35. switch (resp->operation) {
  36. case XENDISPL_OP_PG_FLIP:
  37. case XENDISPL_OP_FB_ATTACH:
  38. case XENDISPL_OP_FB_DETACH:
  39. case XENDISPL_OP_DBUF_CREATE:
  40. case XENDISPL_OP_DBUF_DESTROY:
  41. case XENDISPL_OP_SET_CONFIG:
  42. evtchnl->u.req.resp_status = resp->status;
  43. complete(&evtchnl->u.req.completion);
  44. break;
  45. default:
  46. DRM_ERROR("Operation %d is not supported\n",
  47. resp->operation);
  48. break;
  49. }
  50. }
  51. evtchnl->u.req.ring.rsp_cons = i;
  52. if (i != evtchnl->u.req.ring.req_prod_pvt) {
  53. int more_to_do;
  54. RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
  55. more_to_do);
  56. if (more_to_do)
  57. goto again;
  58. } else {
  59. evtchnl->u.req.ring.sring->rsp_event = i + 1;
  60. }
  61. spin_unlock_irqrestore(&front_info->io_lock, flags);
  62. return IRQ_HANDLED;
  63. }
  64. static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
  65. {
  66. struct xen_drm_front_evtchnl *evtchnl = dev_id;
  67. struct xen_drm_front_info *front_info = evtchnl->front_info;
  68. struct xendispl_event_page *page = evtchnl->u.evt.page;
  69. u32 cons, prod;
  70. unsigned long flags;
  71. if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
  72. return IRQ_HANDLED;
  73. spin_lock_irqsave(&front_info->io_lock, flags);
  74. prod = page->in_prod;
  75. /* ensure we see ring contents up to prod */
  76. virt_rmb();
  77. if (prod == page->in_cons)
  78. goto out;
  79. for (cons = page->in_cons; cons != prod; cons++) {
  80. struct xendispl_evt *event;
  81. event = &XENDISPL_IN_RING_REF(page, cons);
  82. if (unlikely(event->id != evtchnl->evt_id++))
  83. continue;
  84. switch (event->type) {
  85. case XENDISPL_EVT_PG_FLIP:
  86. xen_drm_front_on_frame_done(front_info, evtchnl->index,
  87. event->op.pg_flip.fb_cookie);
  88. break;
  89. }
  90. }
  91. page->in_cons = cons;
  92. /* ensure ring contents */
  93. virt_wmb();
  94. out:
  95. spin_unlock_irqrestore(&front_info->io_lock, flags);
  96. return IRQ_HANDLED;
  97. }
  98. static void evtchnl_free(struct xen_drm_front_info *front_info,
  99. struct xen_drm_front_evtchnl *evtchnl)
  100. {
  101. unsigned long page = 0;
  102. if (evtchnl->type == EVTCHNL_TYPE_REQ)
  103. page = (unsigned long)evtchnl->u.req.ring.sring;
  104. else if (evtchnl->type == EVTCHNL_TYPE_EVT)
  105. page = (unsigned long)evtchnl->u.evt.page;
  106. if (!page)
  107. return;
  108. evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
  109. if (evtchnl->type == EVTCHNL_TYPE_REQ) {
  110. /* release all who still waits for response if any */
  111. evtchnl->u.req.resp_status = -EIO;
  112. complete_all(&evtchnl->u.req.completion);
  113. }
  114. if (evtchnl->irq)
  115. unbind_from_irqhandler(evtchnl->irq, evtchnl);
  116. if (evtchnl->port)
  117. xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
  118. /* end access and free the page */
  119. if (evtchnl->gref != GRANT_INVALID_REF)
  120. gnttab_end_foreign_access(evtchnl->gref, 0, page);
  121. memset(evtchnl, 0, sizeof(*evtchnl));
  122. }
  123. static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
  124. struct xen_drm_front_evtchnl *evtchnl,
  125. enum xen_drm_front_evtchnl_type type)
  126. {
  127. struct xenbus_device *xb_dev = front_info->xb_dev;
  128. unsigned long page;
  129. grant_ref_t gref;
  130. irq_handler_t handler;
  131. int ret;
  132. memset(evtchnl, 0, sizeof(*evtchnl));
  133. evtchnl->type = type;
  134. evtchnl->index = index;
  135. evtchnl->front_info = front_info;
  136. evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
  137. evtchnl->gref = GRANT_INVALID_REF;
  138. page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
  139. if (!page) {
  140. ret = -ENOMEM;
  141. goto fail;
  142. }
  143. if (type == EVTCHNL_TYPE_REQ) {
  144. struct xen_displif_sring *sring;
  145. init_completion(&evtchnl->u.req.completion);
  146. mutex_init(&evtchnl->u.req.req_io_lock);
  147. sring = (struct xen_displif_sring *)page;
  148. SHARED_RING_INIT(sring);
  149. FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
  150. ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
  151. if (ret < 0) {
  152. evtchnl->u.req.ring.sring = NULL;
  153. free_page(page);
  154. goto fail;
  155. }
  156. handler = evtchnl_interrupt_ctrl;
  157. } else {
  158. ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
  159. virt_to_gfn((void *)page), 0);
  160. if (ret < 0) {
  161. free_page(page);
  162. goto fail;
  163. }
  164. evtchnl->u.evt.page = (struct xendispl_event_page *)page;
  165. gref = ret;
  166. handler = evtchnl_interrupt_evt;
  167. }
  168. evtchnl->gref = gref;
  169. ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
  170. if (ret < 0)
  171. goto fail;
  172. ret = bind_evtchn_to_irqhandler(evtchnl->port,
  173. handler, 0, xb_dev->devicetype,
  174. evtchnl);
  175. if (ret < 0)
  176. goto fail;
  177. evtchnl->irq = ret;
  178. return 0;
  179. fail:
  180. DRM_ERROR("Failed to allocate ring: %d\n", ret);
  181. return ret;
  182. }
  183. int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
  184. {
  185. struct xen_drm_front_cfg *cfg;
  186. int ret, conn;
  187. cfg = &front_info->cfg;
  188. front_info->evt_pairs =
  189. kcalloc(cfg->num_connectors,
  190. sizeof(struct xen_drm_front_evtchnl_pair),
  191. GFP_KERNEL);
  192. if (!front_info->evt_pairs) {
  193. ret = -ENOMEM;
  194. goto fail;
  195. }
  196. for (conn = 0; conn < cfg->num_connectors; conn++) {
  197. ret = evtchnl_alloc(front_info, conn,
  198. &front_info->evt_pairs[conn].req,
  199. EVTCHNL_TYPE_REQ);
  200. if (ret < 0) {
  201. DRM_ERROR("Error allocating control channel\n");
  202. goto fail;
  203. }
  204. ret = evtchnl_alloc(front_info, conn,
  205. &front_info->evt_pairs[conn].evt,
  206. EVTCHNL_TYPE_EVT);
  207. if (ret < 0) {
  208. DRM_ERROR("Error allocating in-event channel\n");
  209. goto fail;
  210. }
  211. }
  212. front_info->num_evt_pairs = cfg->num_connectors;
  213. return 0;
  214. fail:
  215. xen_drm_front_evtchnl_free_all(front_info);
  216. return ret;
  217. }
  218. static int evtchnl_publish(struct xenbus_transaction xbt,
  219. struct xen_drm_front_evtchnl *evtchnl,
  220. const char *path, const char *node_ring,
  221. const char *node_chnl)
  222. {
  223. struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
  224. int ret;
  225. /* write control channel ring reference */
  226. ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
  227. if (ret < 0) {
  228. xenbus_dev_error(xb_dev, ret, "writing ring-ref");
  229. return ret;
  230. }
  231. /* write event channel ring reference */
  232. ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
  233. if (ret < 0) {
  234. xenbus_dev_error(xb_dev, ret, "writing event channel");
  235. return ret;
  236. }
  237. return 0;
  238. }
  239. int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
  240. {
  241. struct xenbus_transaction xbt;
  242. struct xen_drm_front_cfg *plat_data;
  243. int ret, conn;
  244. plat_data = &front_info->cfg;
  245. again:
  246. ret = xenbus_transaction_start(&xbt);
  247. if (ret < 0) {
  248. xenbus_dev_fatal(front_info->xb_dev, ret,
  249. "starting transaction");
  250. return ret;
  251. }
  252. for (conn = 0; conn < plat_data->num_connectors; conn++) {
  253. ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
  254. plat_data->connectors[conn].xenstore_path,
  255. XENDISPL_FIELD_REQ_RING_REF,
  256. XENDISPL_FIELD_REQ_CHANNEL);
  257. if (ret < 0)
  258. goto fail;
  259. ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
  260. plat_data->connectors[conn].xenstore_path,
  261. XENDISPL_FIELD_EVT_RING_REF,
  262. XENDISPL_FIELD_EVT_CHANNEL);
  263. if (ret < 0)
  264. goto fail;
  265. }
  266. ret = xenbus_transaction_end(xbt, 0);
  267. if (ret < 0) {
  268. if (ret == -EAGAIN)
  269. goto again;
  270. xenbus_dev_fatal(front_info->xb_dev, ret,
  271. "completing transaction");
  272. goto fail_to_end;
  273. }
  274. return 0;
  275. fail:
  276. xenbus_transaction_end(xbt, 1);
  277. fail_to_end:
  278. xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
  279. return ret;
  280. }
  281. void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
  282. {
  283. int notify;
  284. evtchnl->u.req.ring.req_prod_pvt++;
  285. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
  286. if (notify)
  287. notify_remote_via_irq(evtchnl->irq);
  288. }
  289. void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
  290. enum xen_drm_front_evtchnl_state state)
  291. {
  292. unsigned long flags;
  293. int i;
  294. if (!front_info->evt_pairs)
  295. return;
  296. spin_lock_irqsave(&front_info->io_lock, flags);
  297. for (i = 0; i < front_info->num_evt_pairs; i++) {
  298. front_info->evt_pairs[i].req.state = state;
  299. front_info->evt_pairs[i].evt.state = state;
  300. }
  301. spin_unlock_irqrestore(&front_info->io_lock, flags);
  302. }
  303. void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
  304. {
  305. int i;
  306. if (!front_info->evt_pairs)
  307. return;
  308. for (i = 0; i < front_info->num_evt_pairs; i++) {
  309. evtchnl_free(front_info, &front_info->evt_pairs[i].req);
  310. evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
  311. }
  312. kfree(front_info->evt_pairs);
  313. front_info->evt_pairs = NULL;
  314. }