vfio_ccw_drv.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * VFIO based Physical Subchannel device driver
  4. *
  5. * Copyright IBM Corp. 2017
  6. * Copyright Red Hat, Inc. 2019
  7. *
  8. * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
  9. * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  10. * Cornelia Huck <cohuck@redhat.com>
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/slab.h>
  15. #include <linux/mdev.h>
  16. #include <asm/isc.h>
  17. #include "chp.h"
  18. #include "ioasm.h"
  19. #include "css.h"
  20. #include "vfio_ccw_private.h"
  21. struct workqueue_struct *vfio_ccw_work_q;
  22. struct kmem_cache *vfio_ccw_io_region;
  23. struct kmem_cache *vfio_ccw_cmd_region;
  24. struct kmem_cache *vfio_ccw_schib_region;
  25. struct kmem_cache *vfio_ccw_crw_region;
  26. debug_info_t *vfio_ccw_debug_msg_id;
  27. debug_info_t *vfio_ccw_debug_trace_id;
  28. /*
  29. * Helpers
  30. */
  31. int vfio_ccw_sch_quiesce(struct subchannel *sch)
  32. {
  33. struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
  34. struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
  35. DECLARE_COMPLETION_ONSTACK(completion);
  36. int iretry, ret = 0;
  37. /*
  38. * Probably an impossible situation, after being called through
  39. * FSM callbacks. But in the event it did, register a warning
  40. * and return as if things were fine.
  41. */
  42. if (WARN_ON(!private))
  43. return 0;
  44. iretry = 255;
  45. do {
  46. ret = cio_cancel_halt_clear(sch, &iretry);
  47. if (ret == -EIO) {
  48. pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
  49. sch->schid.ssid, sch->schid.sch_no);
  50. break;
  51. }
  52. /*
  53. * Flush all I/O and wait for
  54. * cancel/halt/clear completion.
  55. */
  56. private->completion = &completion;
  57. spin_unlock_irq(&sch->lock);
  58. if (ret == -EBUSY)
  59. wait_for_completion_timeout(&completion, 3*HZ);
  60. private->completion = NULL;
  61. flush_workqueue(vfio_ccw_work_q);
  62. spin_lock_irq(&sch->lock);
  63. ret = cio_disable_subchannel(sch);
  64. } while (ret == -EBUSY);
  65. return ret;
  66. }
  67. void vfio_ccw_sch_io_todo(struct work_struct *work)
  68. {
  69. struct vfio_ccw_private *private;
  70. struct irb *irb;
  71. bool is_final;
  72. bool cp_is_finished = false;
  73. private = container_of(work, struct vfio_ccw_private, io_work);
  74. irb = &private->irb;
  75. is_final = !(scsw_actl(&irb->scsw) &
  76. (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
  77. if (scsw_is_solicited(&irb->scsw)) {
  78. cp_update_scsw(&private->cp, &irb->scsw);
  79. if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
  80. cp_free(&private->cp);
  81. cp_is_finished = true;
  82. }
  83. }
  84. mutex_lock(&private->io_mutex);
  85. memcpy(private->io_region->irb_area, irb, sizeof(*irb));
  86. mutex_unlock(&private->io_mutex);
  87. /*
  88. * Reset to IDLE only if processing of a channel program
  89. * has finished. Do not overwrite a possible processing
  90. * state if the interrupt was unsolicited, or if the final
  91. * interrupt was for HSCH or CSCH.
  92. */
  93. if (cp_is_finished)
  94. private->state = VFIO_CCW_STATE_IDLE;
  95. if (private->io_trigger)
  96. eventfd_signal(private->io_trigger);
  97. }
  98. void vfio_ccw_crw_todo(struct work_struct *work)
  99. {
  100. struct vfio_ccw_private *private;
  101. private = container_of(work, struct vfio_ccw_private, crw_work);
  102. if (!list_empty(&private->crw) && private->crw_trigger)
  103. eventfd_signal(private->crw_trigger);
  104. }
  105. /*
  106. * Css driver callbacks
  107. */
  108. static void vfio_ccw_sch_irq(struct subchannel *sch)
  109. {
  110. struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
  111. struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
  112. /*
  113. * The subchannel should still be disabled at this point,
  114. * so an interrupt would be quite surprising. As with an
  115. * interrupt while the FSM is closed, let's attempt to
  116. * disable the subchannel again.
  117. */
  118. if (!private) {
  119. VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: unexpected interrupt\n",
  120. sch->schid.cssid, sch->schid.ssid,
  121. sch->schid.sch_no);
  122. cio_disable_subchannel(sch);
  123. return;
  124. }
  125. inc_irq_stat(IRQIO_CIO);
  126. vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
  127. }
  128. static void vfio_ccw_free_parent(struct device *dev)
  129. {
  130. struct vfio_ccw_parent *parent = container_of(dev, struct vfio_ccw_parent, dev);
  131. kfree(parent);
  132. }
  133. static int vfio_ccw_sch_probe(struct subchannel *sch)
  134. {
  135. struct pmcw *pmcw = &sch->schib.pmcw;
  136. struct vfio_ccw_parent *parent;
  137. int ret = -ENOMEM;
  138. if (pmcw->qf) {
  139. dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
  140. dev_name(&sch->dev));
  141. return -ENODEV;
  142. }
  143. parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL);
  144. if (!parent)
  145. return -ENOMEM;
  146. dev_set_name(&parent->dev, "parent");
  147. parent->dev.parent = &sch->dev;
  148. parent->dev.release = &vfio_ccw_free_parent;
  149. ret = device_register(&parent->dev);
  150. if (ret)
  151. goto out_free;
  152. dev_set_drvdata(&sch->dev, parent);
  153. parent->mdev_type.sysfs_name = "io";
  154. parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
  155. parent->mdev_types[0] = &parent->mdev_type;
  156. ret = mdev_register_parent(&parent->parent, &sch->dev,
  157. &vfio_ccw_mdev_driver,
  158. parent->mdev_types, 1);
  159. if (ret)
  160. goto out_unreg;
  161. VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
  162. sch->schid.cssid, sch->schid.ssid,
  163. sch->schid.sch_no);
  164. return 0;
  165. out_unreg:
  166. device_del(&parent->dev);
  167. out_free:
  168. put_device(&parent->dev);
  169. dev_set_drvdata(&sch->dev, NULL);
  170. return ret;
  171. }
  172. static void vfio_ccw_sch_remove(struct subchannel *sch)
  173. {
  174. struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
  175. mdev_unregister_parent(&parent->parent);
  176. device_unregister(&parent->dev);
  177. dev_set_drvdata(&sch->dev, NULL);
  178. VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
  179. sch->schid.cssid, sch->schid.ssid,
  180. sch->schid.sch_no);
  181. }
  182. static void vfio_ccw_sch_shutdown(struct subchannel *sch)
  183. {
  184. struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
  185. struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
  186. if (!private)
  187. return;
  188. vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
  189. vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
  190. }
  191. /**
  192. * vfio_ccw_sch_event - process subchannel event
  193. * @sch: subchannel
  194. * @process: non-zero if function is called in process context
  195. *
  196. * An unspecified event occurred for this subchannel. Adjust data according
  197. * to the current operational state of the subchannel. Return zero when the
  198. * event has been handled sufficiently or -EAGAIN when this function should
  199. * be called again in process context.
  200. */
  201. static int vfio_ccw_sch_event(struct subchannel *sch, int process)
  202. {
  203. struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
  204. struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
  205. unsigned long flags;
  206. int rc = -EAGAIN;
  207. spin_lock_irqsave(&sch->lock, flags);
  208. if (!device_is_registered(&sch->dev))
  209. goto out_unlock;
  210. if (work_pending(&sch->todo_work))
  211. goto out_unlock;
  212. rc = 0;
  213. if (cio_update_schib(sch)) {
  214. if (private)
  215. vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
  216. }
  217. out_unlock:
  218. spin_unlock_irqrestore(&sch->lock, flags);
  219. return rc;
  220. }
  221. static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
  222. unsigned int rsc,
  223. unsigned int erc,
  224. unsigned int rsid)
  225. {
  226. struct vfio_ccw_crw *crw;
  227. /*
  228. * If unable to allocate a CRW, just drop the event and
  229. * carry on. The guest will either see a later one or
  230. * learn when it issues its own store subchannel.
  231. */
  232. crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
  233. if (!crw)
  234. return;
  235. /*
  236. * Build the CRW based on the inputs given to us.
  237. */
  238. crw->crw.rsc = rsc;
  239. crw->crw.erc = erc;
  240. crw->crw.rsid = rsid;
  241. list_add_tail(&crw->next, &private->crw);
  242. queue_work(vfio_ccw_work_q, &private->crw_work);
  243. }
  244. static int vfio_ccw_chp_event(struct subchannel *sch,
  245. struct chp_link *link, int event)
  246. {
  247. struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
  248. struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
  249. int mask = chp_ssd_get_mask(&sch->ssd_info, link);
  250. int retry = 255;
  251. if (!private || !mask)
  252. return 0;
  253. trace_vfio_ccw_chp_event(sch->schid, mask, event);
  254. VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
  255. sch->schid.cssid,
  256. sch->schid.ssid, sch->schid.sch_no,
  257. mask, event);
  258. if (cio_update_schib(sch))
  259. return -ENODEV;
  260. switch (event) {
  261. case CHP_VARY_OFF:
  262. /* Path logically turned off */
  263. sch->opm &= ~mask;
  264. sch->lpm &= ~mask;
  265. if (sch->schib.pmcw.lpum & mask)
  266. cio_cancel_halt_clear(sch, &retry);
  267. break;
  268. case CHP_OFFLINE:
  269. /* Path is gone */
  270. if (sch->schib.pmcw.lpum & mask)
  271. cio_cancel_halt_clear(sch, &retry);
  272. vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
  273. link->chpid.id);
  274. break;
  275. case CHP_VARY_ON:
  276. /* Path logically turned on */
  277. sch->opm |= mask;
  278. sch->lpm |= mask;
  279. break;
  280. case CHP_ONLINE:
  281. /* Path became available */
  282. sch->lpm |= mask & sch->opm;
  283. vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
  284. link->chpid.id);
  285. break;
  286. }
  287. return 0;
  288. }
  289. static struct css_device_id vfio_ccw_sch_ids[] = {
  290. { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
  291. { /* end of list */ },
  292. };
  293. MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
  294. static struct css_driver vfio_ccw_sch_driver = {
  295. .drv = {
  296. .name = "vfio_ccw",
  297. .owner = THIS_MODULE,
  298. },
  299. .subchannel_type = vfio_ccw_sch_ids,
  300. .irq = vfio_ccw_sch_irq,
  301. .probe = vfio_ccw_sch_probe,
  302. .remove = vfio_ccw_sch_remove,
  303. .shutdown = vfio_ccw_sch_shutdown,
  304. .sch_event = vfio_ccw_sch_event,
  305. .chp_event = vfio_ccw_chp_event,
  306. };
  307. static int __init vfio_ccw_debug_init(void)
  308. {
  309. vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
  310. 11 * sizeof(long));
  311. if (!vfio_ccw_debug_msg_id)
  312. goto out_unregister;
  313. debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
  314. debug_set_level(vfio_ccw_debug_msg_id, 2);
  315. vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
  316. if (!vfio_ccw_debug_trace_id)
  317. goto out_unregister;
  318. debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
  319. debug_set_level(vfio_ccw_debug_trace_id, 2);
  320. return 0;
  321. out_unregister:
  322. debug_unregister(vfio_ccw_debug_msg_id);
  323. debug_unregister(vfio_ccw_debug_trace_id);
  324. return -1;
  325. }
  326. static void vfio_ccw_debug_exit(void)
  327. {
  328. debug_unregister(vfio_ccw_debug_msg_id);
  329. debug_unregister(vfio_ccw_debug_trace_id);
  330. }
  331. static void vfio_ccw_destroy_regions(void)
  332. {
  333. kmem_cache_destroy(vfio_ccw_crw_region);
  334. kmem_cache_destroy(vfio_ccw_schib_region);
  335. kmem_cache_destroy(vfio_ccw_cmd_region);
  336. kmem_cache_destroy(vfio_ccw_io_region);
  337. }
  338. static int __init vfio_ccw_sch_init(void)
  339. {
  340. int ret;
  341. ret = vfio_ccw_debug_init();
  342. if (ret)
  343. return ret;
  344. vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
  345. if (!vfio_ccw_work_q) {
  346. ret = -ENOMEM;
  347. goto out_regions;
  348. }
  349. vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
  350. sizeof(struct ccw_io_region), 0,
  351. SLAB_ACCOUNT, 0,
  352. sizeof(struct ccw_io_region), NULL);
  353. if (!vfio_ccw_io_region) {
  354. ret = -ENOMEM;
  355. goto out_regions;
  356. }
  357. vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
  358. sizeof(struct ccw_cmd_region), 0,
  359. SLAB_ACCOUNT, 0,
  360. sizeof(struct ccw_cmd_region), NULL);
  361. if (!vfio_ccw_cmd_region) {
  362. ret = -ENOMEM;
  363. goto out_regions;
  364. }
  365. vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
  366. sizeof(struct ccw_schib_region), 0,
  367. SLAB_ACCOUNT, 0,
  368. sizeof(struct ccw_schib_region), NULL);
  369. if (!vfio_ccw_schib_region) {
  370. ret = -ENOMEM;
  371. goto out_regions;
  372. }
  373. vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
  374. sizeof(struct ccw_crw_region), 0,
  375. SLAB_ACCOUNT, 0,
  376. sizeof(struct ccw_crw_region), NULL);
  377. if (!vfio_ccw_crw_region) {
  378. ret = -ENOMEM;
  379. goto out_regions;
  380. }
  381. ret = mdev_register_driver(&vfio_ccw_mdev_driver);
  382. if (ret)
  383. goto out_regions;
  384. isc_register(VFIO_CCW_ISC);
  385. ret = css_driver_register(&vfio_ccw_sch_driver);
  386. if (ret) {
  387. isc_unregister(VFIO_CCW_ISC);
  388. goto out_driver;
  389. }
  390. return ret;
  391. out_driver:
  392. mdev_unregister_driver(&vfio_ccw_mdev_driver);
  393. out_regions:
  394. vfio_ccw_destroy_regions();
  395. destroy_workqueue(vfio_ccw_work_q);
  396. vfio_ccw_debug_exit();
  397. return ret;
  398. }
  399. static void __exit vfio_ccw_sch_exit(void)
  400. {
  401. css_driver_unregister(&vfio_ccw_sch_driver);
  402. mdev_unregister_driver(&vfio_ccw_mdev_driver);
  403. isc_unregister(VFIO_CCW_ISC);
  404. vfio_ccw_destroy_regions();
  405. destroy_workqueue(vfio_ccw_work_q);
  406. vfio_ccw_debug_exit();
  407. }
  408. module_init(vfio_ccw_sch_init);
  409. module_exit(vfio_ccw_sch_exit);
  410. MODULE_DESCRIPTION("VFIO based Subchannel device driver");
  411. MODULE_LICENSE("GPL v2");