eadm_sch.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for s390 eadm subchannels
  4. *
  5. * Copyright IBM Corp. 2012
  6. * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
  7. */
  8. #include <linux/kernel_stat.h>
  9. #include <linux/completion.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/device.h>
  13. #include <linux/module.h>
  14. #include <linux/timer.h>
  15. #include <linux/slab.h>
  16. #include <linux/list.h>
  17. #include <linux/io.h>
  18. #include <asm/css_chars.h>
  19. #include <asm/debug.h>
  20. #include <asm/isc.h>
  21. #include <asm/cio.h>
  22. #include <asm/scsw.h>
  23. #include <asm/eadm.h>
  24. #include "eadm_sch.h"
  25. #include "ioasm.h"
  26. #include "cio.h"
  27. #include "css.h"
  28. #include "orb.h"
  29. MODULE_DESCRIPTION("driver for s390 eadm subchannels");
  30. MODULE_LICENSE("GPL");
  31. #define EADM_TIMEOUT (7 * HZ)
  32. static DEFINE_SPINLOCK(list_lock);
  33. static LIST_HEAD(eadm_list);
  34. static debug_info_t *eadm_debug;
  35. #define EADM_LOG(imp, txt) do { \
  36. debug_text_event(eadm_debug, imp, txt); \
  37. } while (0)
  38. static void EADM_LOG_HEX(int level, void *data, int length)
  39. {
  40. debug_event(eadm_debug, level, data, length);
  41. }
  42. static void orb_init(union orb *orb)
  43. {
  44. memset(orb, 0, sizeof(union orb));
  45. orb->eadm.compat1 = 1;
  46. orb->eadm.compat2 = 1;
  47. orb->eadm.fmt = 1;
  48. orb->eadm.x = 1;
  49. }
  50. static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
  51. {
  52. union orb *orb = &get_eadm_private(sch)->orb;
  53. int cc;
  54. orb_init(orb);
  55. orb->eadm.aob = virt_to_dma32(aob);
  56. orb->eadm.intparm = (u32)virt_to_phys(sch);
  57. orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
  58. EADM_LOG(6, "start");
  59. EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
  60. cc = ssch(sch->schid, orb);
  61. switch (cc) {
  62. case 0:
  63. sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
  64. break;
  65. case 1: /* status pending */
  66. case 2: /* busy */
  67. return -EBUSY;
  68. case 3: /* not operational */
  69. return -ENODEV;
  70. }
  71. return 0;
  72. }
  73. static int eadm_subchannel_clear(struct subchannel *sch)
  74. {
  75. int cc;
  76. cc = csch(sch->schid);
  77. if (cc)
  78. return -ENODEV;
  79. sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
  80. return 0;
  81. }
  82. static void eadm_subchannel_timeout(struct timer_list *t)
  83. {
  84. struct eadm_private *private = from_timer(private, t, timer);
  85. struct subchannel *sch = private->sch;
  86. spin_lock_irq(&sch->lock);
  87. EADM_LOG(1, "timeout");
  88. EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
  89. if (eadm_subchannel_clear(sch))
  90. EADM_LOG(0, "clear failed");
  91. spin_unlock_irq(&sch->lock);
  92. }
  93. static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
  94. {
  95. struct eadm_private *private = get_eadm_private(sch);
  96. if (expires == 0)
  97. del_timer(&private->timer);
  98. else
  99. mod_timer(&private->timer, jiffies + expires);
  100. }
  101. static void eadm_subchannel_irq(struct subchannel *sch)
  102. {
  103. struct eadm_private *private = get_eadm_private(sch);
  104. struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
  105. struct irb *irb = this_cpu_ptr(&cio_irb);
  106. blk_status_t error = BLK_STS_OK;
  107. EADM_LOG(6, "irq");
  108. EADM_LOG_HEX(6, irb, sizeof(*irb));
  109. inc_irq_stat(IRQIO_ADM);
  110. if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
  111. && scsw->eswf == 1 && irb->esw.eadm.erw.r)
  112. error = BLK_STS_IOERR;
  113. if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
  114. error = BLK_STS_TIMEOUT;
  115. eadm_subchannel_set_timeout(sch, 0);
  116. if (private->state != EADM_BUSY) {
  117. EADM_LOG(1, "irq unsol");
  118. EADM_LOG_HEX(1, irb, sizeof(*irb));
  119. private->state = EADM_NOT_OPER;
  120. css_sched_sch_todo(sch, SCH_TODO_EVAL);
  121. return;
  122. }
  123. scm_irq_handler(dma32_to_virt(scsw->aob), error);
  124. private->state = EADM_IDLE;
  125. if (private->completion)
  126. complete(private->completion);
  127. }
  128. static struct subchannel *eadm_get_idle_sch(void)
  129. {
  130. struct eadm_private *private;
  131. struct subchannel *sch;
  132. unsigned long flags;
  133. spin_lock_irqsave(&list_lock, flags);
  134. list_for_each_entry(private, &eadm_list, head) {
  135. sch = private->sch;
  136. spin_lock(&sch->lock);
  137. if (private->state == EADM_IDLE) {
  138. private->state = EADM_BUSY;
  139. list_move_tail(&private->head, &eadm_list);
  140. spin_unlock(&sch->lock);
  141. spin_unlock_irqrestore(&list_lock, flags);
  142. return sch;
  143. }
  144. spin_unlock(&sch->lock);
  145. }
  146. spin_unlock_irqrestore(&list_lock, flags);
  147. return NULL;
  148. }
  149. int eadm_start_aob(struct aob *aob)
  150. {
  151. struct eadm_private *private;
  152. struct subchannel *sch;
  153. unsigned long flags;
  154. int ret;
  155. sch = eadm_get_idle_sch();
  156. if (!sch)
  157. return -EBUSY;
  158. spin_lock_irqsave(&sch->lock, flags);
  159. eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
  160. ret = eadm_subchannel_start(sch, aob);
  161. if (!ret)
  162. goto out_unlock;
  163. /* Handle start subchannel failure. */
  164. eadm_subchannel_set_timeout(sch, 0);
  165. private = get_eadm_private(sch);
  166. private->state = EADM_NOT_OPER;
  167. css_sched_sch_todo(sch, SCH_TODO_EVAL);
  168. out_unlock:
  169. spin_unlock_irqrestore(&sch->lock, flags);
  170. return ret;
  171. }
  172. EXPORT_SYMBOL_GPL(eadm_start_aob);
  173. static int eadm_subchannel_probe(struct subchannel *sch)
  174. {
  175. struct eadm_private *private;
  176. int ret;
  177. private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
  178. if (!private)
  179. return -ENOMEM;
  180. INIT_LIST_HEAD(&private->head);
  181. timer_setup(&private->timer, eadm_subchannel_timeout, 0);
  182. spin_lock_irq(&sch->lock);
  183. set_eadm_private(sch, private);
  184. private->state = EADM_IDLE;
  185. private->sch = sch;
  186. sch->isc = EADM_SCH_ISC;
  187. ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
  188. if (ret) {
  189. set_eadm_private(sch, NULL);
  190. spin_unlock_irq(&sch->lock);
  191. kfree(private);
  192. goto out;
  193. }
  194. spin_unlock_irq(&sch->lock);
  195. spin_lock_irq(&list_lock);
  196. list_add(&private->head, &eadm_list);
  197. spin_unlock_irq(&list_lock);
  198. out:
  199. return ret;
  200. }
  201. static void eadm_quiesce(struct subchannel *sch)
  202. {
  203. struct eadm_private *private = get_eadm_private(sch);
  204. DECLARE_COMPLETION_ONSTACK(completion);
  205. int ret;
  206. spin_lock_irq(&sch->lock);
  207. if (private->state != EADM_BUSY)
  208. goto disable;
  209. if (eadm_subchannel_clear(sch))
  210. goto disable;
  211. private->completion = &completion;
  212. spin_unlock_irq(&sch->lock);
  213. wait_for_completion_io(&completion);
  214. spin_lock_irq(&sch->lock);
  215. private->completion = NULL;
  216. disable:
  217. eadm_subchannel_set_timeout(sch, 0);
  218. do {
  219. ret = cio_disable_subchannel(sch);
  220. } while (ret == -EBUSY);
  221. spin_unlock_irq(&sch->lock);
  222. }
  223. static void eadm_subchannel_remove(struct subchannel *sch)
  224. {
  225. struct eadm_private *private = get_eadm_private(sch);
  226. spin_lock_irq(&list_lock);
  227. list_del(&private->head);
  228. spin_unlock_irq(&list_lock);
  229. eadm_quiesce(sch);
  230. spin_lock_irq(&sch->lock);
  231. set_eadm_private(sch, NULL);
  232. spin_unlock_irq(&sch->lock);
  233. kfree(private);
  234. }
  235. static void eadm_subchannel_shutdown(struct subchannel *sch)
  236. {
  237. eadm_quiesce(sch);
  238. }
  239. /**
  240. * eadm_subchannel_sch_event - process subchannel event
  241. * @sch: subchannel
  242. * @process: non-zero if function is called in process context
  243. *
  244. * An unspecified event occurred for this subchannel. Adjust data according
  245. * to the current operational state of the subchannel. Return zero when the
  246. * event has been handled sufficiently or -EAGAIN when this function should
  247. * be called again in process context.
  248. */
  249. static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
  250. {
  251. struct eadm_private *private;
  252. unsigned long flags;
  253. spin_lock_irqsave(&sch->lock, flags);
  254. if (!device_is_registered(&sch->dev))
  255. goto out_unlock;
  256. if (work_pending(&sch->todo_work))
  257. goto out_unlock;
  258. if (cio_update_schib(sch)) {
  259. css_sched_sch_todo(sch, SCH_TODO_UNREG);
  260. goto out_unlock;
  261. }
  262. private = get_eadm_private(sch);
  263. if (private->state == EADM_NOT_OPER)
  264. private->state = EADM_IDLE;
  265. out_unlock:
  266. spin_unlock_irqrestore(&sch->lock, flags);
  267. return 0;
  268. }
  269. static struct css_device_id eadm_subchannel_ids[] = {
  270. { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
  271. { /* end of list */ },
  272. };
  273. MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
  274. static struct css_driver eadm_subchannel_driver = {
  275. .drv = {
  276. .name = "eadm_subchannel",
  277. .owner = THIS_MODULE,
  278. },
  279. .subchannel_type = eadm_subchannel_ids,
  280. .irq = eadm_subchannel_irq,
  281. .probe = eadm_subchannel_probe,
  282. .remove = eadm_subchannel_remove,
  283. .shutdown = eadm_subchannel_shutdown,
  284. .sch_event = eadm_subchannel_sch_event,
  285. };
  286. static int __init eadm_sch_init(void)
  287. {
  288. int ret;
  289. if (!css_general_characteristics.eadm)
  290. return -ENXIO;
  291. eadm_debug = debug_register("eadm_log", 16, 1, 16);
  292. if (!eadm_debug)
  293. return -ENOMEM;
  294. debug_register_view(eadm_debug, &debug_hex_ascii_view);
  295. debug_set_level(eadm_debug, 2);
  296. isc_register(EADM_SCH_ISC);
  297. ret = css_driver_register(&eadm_subchannel_driver);
  298. if (ret)
  299. goto cleanup;
  300. return ret;
  301. cleanup:
  302. isc_unregister(EADM_SCH_ISC);
  303. debug_unregister(eadm_debug);
  304. return ret;
  305. }
  306. static void __exit eadm_sch_exit(void)
  307. {
  308. css_driver_unregister(&eadm_subchannel_driver);
  309. isc_unregister(EADM_SCH_ISC);
  310. debug_unregister(eadm_debug);
  311. }
  312. module_init(eadm_sch_init);
  313. module_exit(eadm_sch_exit);