vfio_ccw_fsm.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Finite state machine for vfio-ccw device handling
  4. *
  5. * Copyright IBM Corp. 2017
  6. *
  7. * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
  8. */
  9. #include <linux/vfio.h>
  10. #include <linux/mdev.h>
  11. #include "ioasm.h"
  12. #include "vfio_ccw_private.h"
  13. #define CREATE_TRACE_POINTS
  14. #include "vfio_ccw_trace.h"
  15. static int fsm_io_helper(struct vfio_ccw_private *private)
  16. {
  17. struct subchannel *sch;
  18. union orb *orb;
  19. int ccode;
  20. __u8 lpm;
  21. unsigned long flags;
  22. int ret;
  23. sch = private->sch;
  24. spin_lock_irqsave(sch->lock, flags);
  25. private->state = VFIO_CCW_STATE_BUSY;
  26. orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
  27. /* Issue "Start Subchannel" */
  28. ccode = ssch(sch->schid, orb);
  29. switch (ccode) {
  30. case 0:
  31. /*
  32. * Initialize device status information
  33. */
  34. sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
  35. ret = 0;
  36. break;
  37. case 1: /* Status pending */
  38. case 2: /* Busy */
  39. ret = -EBUSY;
  40. break;
  41. case 3: /* Device/path not operational */
  42. {
  43. lpm = orb->cmd.lpm;
  44. if (lpm != 0)
  45. sch->lpm &= ~lpm;
  46. else
  47. sch->lpm = 0;
  48. if (cio_update_schib(sch))
  49. ret = -ENODEV;
  50. else
  51. ret = sch->lpm ? -EACCES : -ENODEV;
  52. break;
  53. }
  54. default:
  55. ret = ccode;
  56. }
  57. spin_unlock_irqrestore(sch->lock, flags);
  58. return ret;
  59. }
  60. static void fsm_notoper(struct vfio_ccw_private *private,
  61. enum vfio_ccw_event event)
  62. {
  63. struct subchannel *sch = private->sch;
  64. /*
  65. * TODO:
  66. * Probably we should send the machine check to the guest.
  67. */
  68. css_sched_sch_todo(sch, SCH_TODO_UNREG);
  69. private->state = VFIO_CCW_STATE_NOT_OPER;
  70. }
  71. /*
  72. * No operation action.
  73. */
  74. static void fsm_nop(struct vfio_ccw_private *private,
  75. enum vfio_ccw_event event)
  76. {
  77. }
  78. static void fsm_io_error(struct vfio_ccw_private *private,
  79. enum vfio_ccw_event event)
  80. {
  81. pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
  82. private->io_region->ret_code = -EIO;
  83. }
  84. static void fsm_io_busy(struct vfio_ccw_private *private,
  85. enum vfio_ccw_event event)
  86. {
  87. private->io_region->ret_code = -EBUSY;
  88. }
  89. static void fsm_disabled_irq(struct vfio_ccw_private *private,
  90. enum vfio_ccw_event event)
  91. {
  92. struct subchannel *sch = private->sch;
  93. /*
  94. * An interrupt in a disabled state means a previous disable was not
  95. * successful - should not happen, but we try to disable again.
  96. */
  97. cio_disable_subchannel(sch);
  98. }
  99. inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
  100. {
  101. return p->sch->schid;
  102. }
  103. /*
  104. * Deal with the ccw command request from the userspace.
  105. */
  106. static void fsm_io_request(struct vfio_ccw_private *private,
  107. enum vfio_ccw_event event)
  108. {
  109. union orb *orb;
  110. union scsw *scsw = &private->scsw;
  111. struct ccw_io_region *io_region = private->io_region;
  112. struct mdev_device *mdev = private->mdev;
  113. char *errstr = "request";
  114. private->state = VFIO_CCW_STATE_BOXED;
  115. memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
  116. if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
  117. orb = (union orb *)io_region->orb_area;
  118. /* Don't try to build a cp if transport mode is specified. */
  119. if (orb->tm.b) {
  120. io_region->ret_code = -EOPNOTSUPP;
  121. errstr = "transport mode";
  122. goto err_out;
  123. }
  124. io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
  125. orb);
  126. if (io_region->ret_code) {
  127. errstr = "cp init";
  128. goto err_out;
  129. }
  130. io_region->ret_code = cp_prefetch(&private->cp);
  131. if (io_region->ret_code) {
  132. errstr = "cp prefetch";
  133. cp_free(&private->cp);
  134. goto err_out;
  135. }
  136. /* Start channel program and wait for I/O interrupt. */
  137. io_region->ret_code = fsm_io_helper(private);
  138. if (io_region->ret_code) {
  139. errstr = "cp fsm_io_helper";
  140. cp_free(&private->cp);
  141. goto err_out;
  142. }
  143. return;
  144. } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
  145. /* XXX: Handle halt. */
  146. io_region->ret_code = -EOPNOTSUPP;
  147. goto err_out;
  148. } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
  149. /* XXX: Handle clear. */
  150. io_region->ret_code = -EOPNOTSUPP;
  151. goto err_out;
  152. }
  153. err_out:
  154. private->state = VFIO_CCW_STATE_IDLE;
  155. trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
  156. io_region->ret_code, errstr);
  157. }
  158. /*
  159. * Got an interrupt for a normal io (state busy).
  160. */
  161. static void fsm_irq(struct vfio_ccw_private *private,
  162. enum vfio_ccw_event event)
  163. {
  164. struct irb *irb = this_cpu_ptr(&cio_irb);
  165. memcpy(&private->irb, irb, sizeof(*irb));
  166. queue_work(vfio_ccw_work_q, &private->io_work);
  167. if (private->completion)
  168. complete(private->completion);
  169. }
  170. /*
  171. * Device statemachine
  172. */
  173. fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
  174. [VFIO_CCW_STATE_NOT_OPER] = {
  175. [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
  176. [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
  177. [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
  178. },
  179. [VFIO_CCW_STATE_STANDBY] = {
  180. [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
  181. [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
  182. [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
  183. },
  184. [VFIO_CCW_STATE_IDLE] = {
  185. [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
  186. [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
  187. [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
  188. },
  189. [VFIO_CCW_STATE_BOXED] = {
  190. [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
  191. [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
  192. [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
  193. },
  194. [VFIO_CCW_STATE_BUSY] = {
  195. [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
  196. [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
  197. [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
  198. },
  199. };