device_ops.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870
  1. // SPDX-License-Identifier: GPL-1.0+
  2. /*
  3. * Copyright IBM Corp. 2002, 2009
  4. *
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. * Cornelia Huck (cornelia.huck@de.ibm.com)
  7. */
  8. #include <linux/export.h>
  9. #include <linux/init.h>
  10. #include <linux/errno.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <linux/device.h>
  14. #include <linux/delay.h>
  15. #include <linux/completion.h>
  16. #include <asm/ccwdev.h>
  17. #include <asm/idals.h>
  18. #include <asm/chpid.h>
  19. #include <asm/fcx.h>
  20. #include "cio.h"
  21. #include "cio_debug.h"
  22. #include "css.h"
  23. #include "chsc.h"
  24. #include "device.h"
  25. #include "chp.h"
  26. /**
  27. * ccw_device_set_options_mask() - set some options and unset the rest
  28. * @cdev: device for which the options are to be set
  29. * @flags: options to be set
  30. *
  31. * All flags specified in @flags are set, all flags not specified in @flags
  32. * are cleared.
  33. * Returns:
  34. * %0 on success, -%EINVAL on an invalid flag combination.
  35. */
  36. int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
  37. {
  38. /*
  39. * The flag usage is mutal exclusive ...
  40. */
  41. if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
  42. (flags & CCWDEV_REPORT_ALL))
  43. return -EINVAL;
  44. cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
  45. cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
  46. cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
  47. cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
  48. cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
  49. return 0;
  50. }
  51. /**
  52. * ccw_device_set_options() - set some options
  53. * @cdev: device for which the options are to be set
  54. * @flags: options to be set
  55. *
  56. * All flags specified in @flags are set, the remainder is left untouched.
  57. * Returns:
  58. * %0 on success, -%EINVAL if an invalid flag combination would ensue.
  59. */
  60. int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
  61. {
  62. /*
  63. * The flag usage is mutal exclusive ...
  64. */
  65. if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
  66. (flags & CCWDEV_REPORT_ALL)) ||
  67. ((flags & CCWDEV_EARLY_NOTIFICATION) &&
  68. cdev->private->options.repall) ||
  69. ((flags & CCWDEV_REPORT_ALL) &&
  70. cdev->private->options.fast))
  71. return -EINVAL;
  72. cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
  73. cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
  74. cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
  75. cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
  76. cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
  77. return 0;
  78. }
  79. /**
  80. * ccw_device_clear_options() - clear some options
  81. * @cdev: device for which the options are to be cleared
  82. * @flags: options to be cleared
  83. *
  84. * All flags specified in @flags are cleared, the remainder is left untouched.
  85. */
  86. void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
  87. {
  88. cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
  89. cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
  90. cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
  91. cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
  92. cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
  93. }
  94. /**
  95. * ccw_device_is_pathgroup() - determine if paths to this device are grouped
  96. * @cdev: ccw device
  97. *
  98. * Return non-zero if there is a path group, zero otherwise.
  99. */
  100. int ccw_device_is_pathgroup(struct ccw_device *cdev)
  101. {
  102. return cdev->private->flags.pgroup;
  103. }
  104. EXPORT_SYMBOL(ccw_device_is_pathgroup);
  105. /**
  106. * ccw_device_is_multipath() - determine if device is operating in multipath mode
  107. * @cdev: ccw device
  108. *
  109. * Return non-zero if device is operating in multipath mode, zero otherwise.
  110. */
  111. int ccw_device_is_multipath(struct ccw_device *cdev)
  112. {
  113. return cdev->private->flags.mpath;
  114. }
  115. EXPORT_SYMBOL(ccw_device_is_multipath);
  116. /**
  117. * ccw_device_clear() - terminate I/O request processing
  118. * @cdev: target ccw device
  119. * @intparm: interruption parameter to be returned upon conclusion of csch
  120. *
  121. * ccw_device_clear() calls csch on @cdev's subchannel.
  122. * Returns:
  123. * %0 on success,
  124. * -%ENODEV on device not operational,
  125. * -%EINVAL on invalid device state.
  126. * Context:
  127. * Interrupts disabled, ccw device lock held
  128. */
  129. int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
  130. {
  131. struct subchannel *sch;
  132. int ret;
  133. if (!cdev || !cdev->dev.parent)
  134. return -ENODEV;
  135. sch = to_subchannel(cdev->dev.parent);
  136. if (!sch->schib.pmcw.ena)
  137. return -EINVAL;
  138. if (cdev->private->state == DEV_STATE_NOT_OPER)
  139. return -ENODEV;
  140. if (cdev->private->state != DEV_STATE_ONLINE &&
  141. cdev->private->state != DEV_STATE_W4SENSE)
  142. return -EINVAL;
  143. ret = cio_clear(sch);
  144. if (ret == 0)
  145. cdev->private->intparm = intparm;
  146. return ret;
  147. }
  148. /**
  149. * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
  150. * @cdev: target ccw device
  151. * @cpa: logical start address of channel program
  152. * @intparm: user specific interruption parameter; will be presented back to
  153. * @cdev's interrupt handler. Allows a device driver to associate
  154. * the interrupt with a particular I/O request.
  155. * @lpm: defines the channel path to be used for a specific I/O request. A
  156. * value of 0 will make cio use the opm.
  157. * @key: storage key to be used for the I/O
  158. * @flags: additional flags; defines the action to be performed for I/O
  159. * processing.
  160. * @expires: timeout value in jiffies
  161. *
  162. * Start a S/390 channel program. When the interrupt arrives, the
  163. * IRQ handler is called, either immediately, delayed (dev-end missing,
  164. * or sense required) or never (no IRQ handler registered).
  165. * This function notifies the device driver if the channel program has not
  166. * completed during the time specified by @expires. If a timeout occurs, the
  167. * channel program is terminated via xsch, hsch or csch, and the device's
  168. * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
  169. * The interruption handler will echo back the @intparm specified here, unless
  170. * another interruption parameter is specified by a subsequent invocation of
  171. * ccw_device_halt() or ccw_device_clear().
  172. * Returns:
  173. * %0, if the operation was successful;
  174. * -%EBUSY, if the device is busy, or status pending;
  175. * -%EACCES, if no path specified in @lpm is operational;
  176. * -%ENODEV, if the device is not operational.
  177. * Context:
  178. * Interrupts disabled, ccw device lock held
  179. */
  180. int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
  181. unsigned long intparm, __u8 lpm, __u8 key,
  182. unsigned long flags, int expires)
  183. {
  184. struct subchannel *sch;
  185. int ret;
  186. if (!cdev || !cdev->dev.parent)
  187. return -ENODEV;
  188. sch = to_subchannel(cdev->dev.parent);
  189. if (!sch->schib.pmcw.ena)
  190. return -EINVAL;
  191. if (cdev->private->state == DEV_STATE_NOT_OPER)
  192. return -ENODEV;
  193. if (cdev->private->state == DEV_STATE_VERIFY ||
  194. cdev->private->flags.doverify) {
  195. /* Remember to fake irb when finished. */
  196. if (!cdev->private->flags.fake_irb) {
  197. cdev->private->flags.fake_irb = FAKE_CMD_IRB;
  198. cdev->private->intparm = intparm;
  199. CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
  200. cdev->private->dev_id.ssid,
  201. cdev->private->dev_id.devno, intparm,
  202. cdev->private->flags.fake_irb);
  203. return 0;
  204. } else
  205. /* There's already a fake I/O around. */
  206. return -EBUSY;
  207. }
  208. if (cdev->private->state != DEV_STATE_ONLINE ||
  209. ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
  210. !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
  211. return -EBUSY;
  212. ret = cio_set_options (sch, flags);
  213. if (ret)
  214. return ret;
  215. /* Adjust requested path mask to exclude unusable paths. */
  216. if (lpm) {
  217. lpm &= sch->lpm;
  218. if (lpm == 0)
  219. return -EACCES;
  220. }
  221. ret = cio_start_key (sch, cpa, lpm, key);
  222. switch (ret) {
  223. case 0:
  224. cdev->private->intparm = intparm;
  225. if (expires)
  226. ccw_device_set_timeout(cdev, expires);
  227. break;
  228. case -EACCES:
  229. case -ENODEV:
  230. dev_fsm_event(cdev, DEV_EVENT_VERIFY);
  231. break;
  232. }
  233. return ret;
  234. }
  235. /**
  236. * ccw_device_start_key() - start a s390 channel program with key
  237. * @cdev: target ccw device
  238. * @cpa: logical start address of channel program
  239. * @intparm: user specific interruption parameter; will be presented back to
  240. * @cdev's interrupt handler. Allows a device driver to associate
  241. * the interrupt with a particular I/O request.
  242. * @lpm: defines the channel path to be used for a specific I/O request. A
  243. * value of 0 will make cio use the opm.
  244. * @key: storage key to be used for the I/O
  245. * @flags: additional flags; defines the action to be performed for I/O
  246. * processing.
  247. *
  248. * Start a S/390 channel program. When the interrupt arrives, the
  249. * IRQ handler is called, either immediately, delayed (dev-end missing,
  250. * or sense required) or never (no IRQ handler registered).
  251. * The interruption handler will echo back the @intparm specified here, unless
  252. * another interruption parameter is specified by a subsequent invocation of
  253. * ccw_device_halt() or ccw_device_clear().
  254. * Returns:
  255. * %0, if the operation was successful;
  256. * -%EBUSY, if the device is busy, or status pending;
  257. * -%EACCES, if no path specified in @lpm is operational;
  258. * -%ENODEV, if the device is not operational.
  259. * Context:
  260. * Interrupts disabled, ccw device lock held
  261. */
  262. int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
  263. unsigned long intparm, __u8 lpm, __u8 key,
  264. unsigned long flags)
  265. {
  266. return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
  267. flags, 0);
  268. }
  269. /**
  270. * ccw_device_start() - start a s390 channel program
  271. * @cdev: target ccw device
  272. * @cpa: logical start address of channel program
  273. * @intparm: user specific interruption parameter; will be presented back to
  274. * @cdev's interrupt handler. Allows a device driver to associate
  275. * the interrupt with a particular I/O request.
  276. * @lpm: defines the channel path to be used for a specific I/O request. A
  277. * value of 0 will make cio use the opm.
  278. * @flags: additional flags; defines the action to be performed for I/O
  279. * processing.
  280. *
  281. * Start a S/390 channel program. When the interrupt arrives, the
  282. * IRQ handler is called, either immediately, delayed (dev-end missing,
  283. * or sense required) or never (no IRQ handler registered).
  284. * The interruption handler will echo back the @intparm specified here, unless
  285. * another interruption parameter is specified by a subsequent invocation of
  286. * ccw_device_halt() or ccw_device_clear().
  287. * Returns:
  288. * %0, if the operation was successful;
  289. * -%EBUSY, if the device is busy, or status pending;
  290. * -%EACCES, if no path specified in @lpm is operational;
  291. * -%ENODEV, if the device is not operational.
  292. * Context:
  293. * Interrupts disabled, ccw device lock held
  294. */
  295. int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
  296. unsigned long intparm, __u8 lpm, unsigned long flags)
  297. {
  298. return ccw_device_start_key(cdev, cpa, intparm, lpm,
  299. PAGE_DEFAULT_KEY, flags);
  300. }
  301. /**
  302. * ccw_device_start_timeout() - start a s390 channel program with timeout
  303. * @cdev: target ccw device
  304. * @cpa: logical start address of channel program
  305. * @intparm: user specific interruption parameter; will be presented back to
  306. * @cdev's interrupt handler. Allows a device driver to associate
  307. * the interrupt with a particular I/O request.
  308. * @lpm: defines the channel path to be used for a specific I/O request. A
  309. * value of 0 will make cio use the opm.
  310. * @flags: additional flags; defines the action to be performed for I/O
  311. * processing.
  312. * @expires: timeout value in jiffies
  313. *
  314. * Start a S/390 channel program. When the interrupt arrives, the
  315. * IRQ handler is called, either immediately, delayed (dev-end missing,
  316. * or sense required) or never (no IRQ handler registered).
  317. * This function notifies the device driver if the channel program has not
  318. * completed during the time specified by @expires. If a timeout occurs, the
  319. * channel program is terminated via xsch, hsch or csch, and the device's
  320. * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
  321. * The interruption handler will echo back the @intparm specified here, unless
  322. * another interruption parameter is specified by a subsequent invocation of
  323. * ccw_device_halt() or ccw_device_clear().
  324. * Returns:
  325. * %0, if the operation was successful;
  326. * -%EBUSY, if the device is busy, or status pending;
  327. * -%EACCES, if no path specified in @lpm is operational;
  328. * -%ENODEV, if the device is not operational.
  329. * Context:
  330. * Interrupts disabled, ccw device lock held
  331. */
  332. int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
  333. unsigned long intparm, __u8 lpm,
  334. unsigned long flags, int expires)
  335. {
  336. return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
  337. PAGE_DEFAULT_KEY, flags,
  338. expires);
  339. }
  340. /**
  341. * ccw_device_halt() - halt I/O request processing
  342. * @cdev: target ccw device
  343. * @intparm: interruption parameter to be returned upon conclusion of hsch
  344. *
  345. * ccw_device_halt() calls hsch on @cdev's subchannel.
  346. * The interruption handler will echo back the @intparm specified here, unless
  347. * another interruption parameter is specified by a subsequent invocation of
  348. * ccw_device_clear().
  349. * Returns:
  350. * %0 on success,
  351. * -%ENODEV on device not operational,
  352. * -%EINVAL on invalid device state,
  353. * -%EBUSY on device busy or interrupt pending.
  354. * Context:
  355. * Interrupts disabled, ccw device lock held
  356. */
  357. int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
  358. {
  359. struct subchannel *sch;
  360. int ret;
  361. if (!cdev || !cdev->dev.parent)
  362. return -ENODEV;
  363. sch = to_subchannel(cdev->dev.parent);
  364. if (!sch->schib.pmcw.ena)
  365. return -EINVAL;
  366. if (cdev->private->state == DEV_STATE_NOT_OPER)
  367. return -ENODEV;
  368. if (cdev->private->state != DEV_STATE_ONLINE &&
  369. cdev->private->state != DEV_STATE_W4SENSE)
  370. return -EINVAL;
  371. ret = cio_halt(sch);
  372. if (ret == 0)
  373. cdev->private->intparm = intparm;
  374. return ret;
  375. }
  376. /**
  377. * ccw_device_resume() - resume channel program execution
  378. * @cdev: target ccw device
  379. *
  380. * ccw_device_resume() calls rsch on @cdev's subchannel.
  381. * Returns:
  382. * %0 on success,
  383. * -%ENODEV on device not operational,
  384. * -%EINVAL on invalid device state,
  385. * -%EBUSY on device busy or interrupt pending.
  386. * Context:
  387. * Interrupts disabled, ccw device lock held
  388. */
  389. int ccw_device_resume(struct ccw_device *cdev)
  390. {
  391. struct subchannel *sch;
  392. if (!cdev || !cdev->dev.parent)
  393. return -ENODEV;
  394. sch = to_subchannel(cdev->dev.parent);
  395. if (!sch->schib.pmcw.ena)
  396. return -EINVAL;
  397. if (cdev->private->state == DEV_STATE_NOT_OPER)
  398. return -ENODEV;
  399. if (cdev->private->state != DEV_STATE_ONLINE ||
  400. !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
  401. return -EINVAL;
  402. return cio_resume(sch);
  403. }
  404. /**
  405. * ccw_device_get_ciw() - Search for CIW command in extended sense data.
  406. * @cdev: ccw device to inspect
  407. * @ct: command type to look for
  408. *
  409. * During SenseID, command information words (CIWs) describing special
  410. * commands available to the device may have been stored in the extended
  411. * sense data. This function searches for CIWs of a specified command
  412. * type in the extended sense data.
  413. * Returns:
  414. * %NULL if no extended sense data has been stored or if no CIW of the
  415. * specified command type could be found,
  416. * else a pointer to the CIW of the specified command type.
  417. */
  418. struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
  419. {
  420. int ciw_cnt;
  421. if (cdev->private->flags.esid == 0)
  422. return NULL;
  423. for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
  424. if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
  425. return cdev->private->dma_area->senseid.ciw + ciw_cnt;
  426. return NULL;
  427. }
  428. /**
  429. * ccw_device_get_path_mask() - get currently available paths
  430. * @cdev: ccw device to be queried
  431. * Returns:
  432. * %0 if no subchannel for the device is available,
  433. * else the mask of currently available paths for the ccw device's subchannel.
  434. */
  435. __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
  436. {
  437. struct subchannel *sch;
  438. if (!cdev->dev.parent)
  439. return 0;
  440. sch = to_subchannel(cdev->dev.parent);
  441. return sch->lpm;
  442. }
  443. /**
  444. * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
  445. * @cdev: device to obtain the descriptor for
  446. * @chp_idx: index of the channel path
  447. *
  448. * On success return a newly allocated copy of the channel-path description
  449. * data associated with the given channel path. Return %NULL on error.
  450. */
  451. struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
  452. int chp_idx)
  453. {
  454. struct subchannel *sch;
  455. struct chp_id chpid;
  456. sch = to_subchannel(cdev->dev.parent);
  457. chp_id_init(&chpid);
  458. chpid.id = sch->schib.pmcw.chpid[chp_idx];
  459. return chp_get_chp_desc(chpid);
  460. }
  461. /**
  462. * ccw_device_get_util_str() - return newly allocated utility strings
  463. * @cdev: device to obtain the utility strings for
  464. * @chp_idx: index of the channel path
  465. *
  466. * On success return a newly allocated copy of the utility strings
  467. * associated with the given channel path. Return %NULL on error.
  468. */
  469. u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
  470. {
  471. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  472. struct channel_path *chp;
  473. struct chp_id chpid;
  474. u8 *util_str;
  475. chp_id_init(&chpid);
  476. chpid.id = sch->schib.pmcw.chpid[chp_idx];
  477. chp = chpid_to_chp(chpid);
  478. util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
  479. if (!util_str)
  480. return NULL;
  481. mutex_lock(&chp->lock);
  482. memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
  483. mutex_unlock(&chp->lock);
  484. return util_str;
  485. }
  486. /**
  487. * ccw_device_get_id() - obtain a ccw device id
  488. * @cdev: device to obtain the id for
  489. * @dev_id: where to fill in the values
  490. */
  491. void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
  492. {
  493. *dev_id = cdev->private->dev_id;
  494. }
  495. EXPORT_SYMBOL(ccw_device_get_id);
  496. /**
  497. * ccw_device_tm_start_timeout_key() - perform start function
  498. * @cdev: ccw device on which to perform the start function
  499. * @tcw: transport-command word to be started
  500. * @intparm: user defined parameter to be passed to the interrupt handler
  501. * @lpm: mask of paths to use
  502. * @key: storage key to use for storage access
  503. * @expires: time span in jiffies after which to abort request
  504. *
  505. * Start the tcw on the given ccw device. Return zero on success, non-zero
  506. * otherwise.
  507. */
  508. int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
  509. unsigned long intparm, u8 lpm, u8 key,
  510. int expires)
  511. {
  512. struct subchannel *sch;
  513. int rc;
  514. sch = to_subchannel(cdev->dev.parent);
  515. if (!sch->schib.pmcw.ena)
  516. return -EINVAL;
  517. if (cdev->private->state == DEV_STATE_VERIFY) {
  518. /* Remember to fake irb when finished. */
  519. if (!cdev->private->flags.fake_irb) {
  520. cdev->private->flags.fake_irb = FAKE_TM_IRB;
  521. cdev->private->intparm = intparm;
  522. CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
  523. cdev->private->dev_id.ssid,
  524. cdev->private->dev_id.devno, intparm,
  525. cdev->private->flags.fake_irb);
  526. return 0;
  527. } else
  528. /* There's already a fake I/O around. */
  529. return -EBUSY;
  530. }
  531. if (cdev->private->state != DEV_STATE_ONLINE)
  532. return -EIO;
  533. /* Adjust requested path mask to exclude unusable paths. */
  534. if (lpm) {
  535. lpm &= sch->lpm;
  536. if (lpm == 0)
  537. return -EACCES;
  538. }
  539. rc = cio_tm_start_key(sch, tcw, lpm, key);
  540. if (rc == 0) {
  541. cdev->private->intparm = intparm;
  542. if (expires)
  543. ccw_device_set_timeout(cdev, expires);
  544. }
  545. return rc;
  546. }
  547. EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
  548. /**
  549. * ccw_device_tm_start_key() - perform start function
  550. * @cdev: ccw device on which to perform the start function
  551. * @tcw: transport-command word to be started
  552. * @intparm: user defined parameter to be passed to the interrupt handler
  553. * @lpm: mask of paths to use
  554. * @key: storage key to use for storage access
  555. *
  556. * Start the tcw on the given ccw device. Return zero on success, non-zero
  557. * otherwise.
  558. */
  559. int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
  560. unsigned long intparm, u8 lpm, u8 key)
  561. {
  562. return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
  563. }
  564. EXPORT_SYMBOL(ccw_device_tm_start_key);
  565. /**
  566. * ccw_device_tm_start() - perform start function
  567. * @cdev: ccw device on which to perform the start function
  568. * @tcw: transport-command word to be started
  569. * @intparm: user defined parameter to be passed to the interrupt handler
  570. * @lpm: mask of paths to use
  571. *
  572. * Start the tcw on the given ccw device. Return zero on success, non-zero
  573. * otherwise.
  574. */
  575. int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
  576. unsigned long intparm, u8 lpm)
  577. {
  578. return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
  579. PAGE_DEFAULT_KEY);
  580. }
  581. EXPORT_SYMBOL(ccw_device_tm_start);
  582. /**
  583. * ccw_device_tm_start_timeout() - perform start function
  584. * @cdev: ccw device on which to perform the start function
  585. * @tcw: transport-command word to be started
  586. * @intparm: user defined parameter to be passed to the interrupt handler
  587. * @lpm: mask of paths to use
  588. * @expires: time span in jiffies after which to abort request
  589. *
  590. * Start the tcw on the given ccw device. Return zero on success, non-zero
  591. * otherwise.
  592. */
  593. int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
  594. unsigned long intparm, u8 lpm, int expires)
  595. {
  596. return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
  597. PAGE_DEFAULT_KEY, expires);
  598. }
  599. EXPORT_SYMBOL(ccw_device_tm_start_timeout);
  600. /**
  601. * ccw_device_get_mdc() - accumulate max data count
  602. * @cdev: ccw device for which the max data count is accumulated
  603. * @mask: mask of paths to use
  604. *
  605. * Return the number of 64K-bytes blocks all paths at least support
  606. * for a transport command. Return value 0 indicates failure.
  607. */
  608. int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
  609. {
  610. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  611. struct channel_path *chp;
  612. struct chp_id chpid;
  613. int mdc = 0, i;
  614. /* Adjust requested path mask to excluded varied off paths. */
  615. if (mask)
  616. mask &= sch->lpm;
  617. else
  618. mask = sch->lpm;
  619. chp_id_init(&chpid);
  620. for (i = 0; i < 8; i++) {
  621. if (!(mask & (0x80 >> i)))
  622. continue;
  623. chpid.id = sch->schib.pmcw.chpid[i];
  624. chp = chpid_to_chp(chpid);
  625. if (!chp)
  626. continue;
  627. mutex_lock(&chp->lock);
  628. if (!chp->desc_fmt1.f) {
  629. mutex_unlock(&chp->lock);
  630. return 0;
  631. }
  632. if (!chp->desc_fmt1.r)
  633. mdc = 1;
  634. mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
  635. chp->desc_fmt1.mdc;
  636. mutex_unlock(&chp->lock);
  637. }
  638. return mdc;
  639. }
  640. EXPORT_SYMBOL(ccw_device_get_mdc);
  641. /**
  642. * ccw_device_tm_intrg() - perform interrogate function
  643. * @cdev: ccw device on which to perform the interrogate function
  644. *
  645. * Perform an interrogate function on the given ccw device. Return zero on
  646. * success, non-zero otherwise.
  647. */
  648. int ccw_device_tm_intrg(struct ccw_device *cdev)
  649. {
  650. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  651. if (!sch->schib.pmcw.ena)
  652. return -EINVAL;
  653. if (cdev->private->state != DEV_STATE_ONLINE)
  654. return -EIO;
  655. if (!scsw_is_tm(&sch->schib.scsw) ||
  656. !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
  657. return -EINVAL;
  658. return cio_tm_intrg(sch);
  659. }
  660. EXPORT_SYMBOL(ccw_device_tm_intrg);
  661. /**
  662. * ccw_device_get_schid() - obtain a subchannel id
  663. * @cdev: device to obtain the id for
  664. * @schid: where to fill in the values
  665. */
  666. void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
  667. {
  668. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  669. *schid = sch->schid;
  670. }
  671. EXPORT_SYMBOL_GPL(ccw_device_get_schid);
  672. /**
  673. * ccw_device_pnso() - Perform Network-Subchannel Operation
  674. * @cdev: device on which PNSO is performed
  675. * @pnso_area: request and response block for the operation
  676. * @oc: Operation Code
  677. * @resume_token: resume token for multiblock response
  678. * @cnc: Boolean change-notification control
  679. *
  680. * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
  681. *
  682. * Returns 0 on success.
  683. */
  684. int ccw_device_pnso(struct ccw_device *cdev,
  685. struct chsc_pnso_area *pnso_area, u8 oc,
  686. struct chsc_pnso_resume_token resume_token, int cnc)
  687. {
  688. struct subchannel_id schid;
  689. ccw_device_get_schid(cdev, &schid);
  690. return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
  691. }
  692. EXPORT_SYMBOL_GPL(ccw_device_pnso);
  693. /**
  694. * ccw_device_get_cssid() - obtain Channel Subsystem ID
  695. * @cdev: device to obtain the CSSID for
  696. * @cssid: The resulting Channel Subsystem ID
  697. */
  698. int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
  699. {
  700. struct device *sch_dev = cdev->dev.parent;
  701. struct channel_subsystem *css = to_css(sch_dev->parent);
  702. if (css->id_valid)
  703. *cssid = css->cssid;
  704. return css->id_valid ? 0 : -ENODEV;
  705. }
  706. EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
  707. /**
  708. * ccw_device_get_iid() - obtain MIF-image ID
  709. * @cdev: device to obtain the MIF-image ID for
  710. * @iid: The resulting MIF-image ID
  711. */
  712. int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
  713. {
  714. struct device *sch_dev = cdev->dev.parent;
  715. struct channel_subsystem *css = to_css(sch_dev->parent);
  716. if (css->id_valid)
  717. *iid = css->iid;
  718. return css->id_valid ? 0 : -ENODEV;
  719. }
  720. EXPORT_SYMBOL_GPL(ccw_device_get_iid);
  721. /**
  722. * ccw_device_get_chpid() - obtain Channel Path ID
  723. * @cdev: device to obtain the Channel Path ID for
  724. * @chp_idx: Index of the channel path
  725. * @chpid: The resulting Channel Path ID
  726. */
  727. int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
  728. {
  729. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  730. int mask;
  731. if ((chp_idx < 0) || (chp_idx > 7))
  732. return -EINVAL;
  733. mask = 0x80 >> chp_idx;
  734. if (!(sch->schib.pmcw.pim & mask))
  735. return -ENODEV;
  736. *chpid = sch->schib.pmcw.chpid[chp_idx];
  737. return 0;
  738. }
  739. EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
  740. /**
  741. * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
  742. * @cdev: device to obtain the Channel ID for
  743. * @chp_idx: Index of the channel path
  744. * @chid: The resulting Channel ID
  745. */
  746. int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
  747. {
  748. struct chp_id cssid_chpid;
  749. struct channel_path *chp;
  750. int rc;
  751. chp_id_init(&cssid_chpid);
  752. rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
  753. if (rc)
  754. return rc;
  755. chp = chpid_to_chp(cssid_chpid);
  756. if (!chp)
  757. return -ENODEV;
  758. mutex_lock(&chp->lock);
  759. if (chp->desc_fmt1.flags & 0x10)
  760. *chid = chp->desc_fmt1.chid;
  761. else
  762. rc = -ENODEV;
  763. mutex_unlock(&chp->lock);
  764. return rc;
  765. }
  766. EXPORT_SYMBOL_GPL(ccw_device_get_chid);
  767. /*
  768. * Allocate zeroed dma coherent 31 bit addressable memory using
  769. * the subchannels dma pool. Maximal size of allocation supported
  770. * is PAGE_SIZE.
  771. */
  772. void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
  773. dma32_t *dma_handle)
  774. {
  775. void *addr;
  776. if (!get_device(&cdev->dev))
  777. return NULL;
  778. addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle);
  779. if (IS_ERR_OR_NULL(addr))
  780. put_device(&cdev->dev);
  781. return addr;
  782. }
  783. EXPORT_SYMBOL(ccw_device_dma_zalloc);
  784. void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
  785. {
  786. if (!cpu_addr)
  787. return;
  788. cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
  789. put_device(&cdev->dev);
  790. }
  791. EXPORT_SYMBOL(ccw_device_dma_free);
  792. EXPORT_SYMBOL(ccw_device_set_options_mask);
  793. EXPORT_SYMBOL(ccw_device_set_options);
  794. EXPORT_SYMBOL(ccw_device_clear_options);
  795. EXPORT_SYMBOL(ccw_device_clear);
  796. EXPORT_SYMBOL(ccw_device_halt);
  797. EXPORT_SYMBOL(ccw_device_resume);
  798. EXPORT_SYMBOL(ccw_device_start_timeout);
  799. EXPORT_SYMBOL(ccw_device_start);
  800. EXPORT_SYMBOL(ccw_device_start_timeout_key);
  801. EXPORT_SYMBOL(ccw_device_start_key);
  802. EXPORT_SYMBOL(ccw_device_get_ciw);
  803. EXPORT_SYMBOL(ccw_device_get_path_mask);
  804. EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
  805. EXPORT_SYMBOL_GPL(ccw_device_get_util_str);