zcrypt_api.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * zcrypt 2.1.0
  4. *
  5. * Copyright IBM Corp. 2001, 2012
  6. * Author(s): Robert Burroughs
  7. * Eric Rossman (edrossma@us.ibm.com)
  8. * Cornelia Huck <cornelia.huck@de.ibm.com>
  9. *
  10. * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  11. * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  12. * Ralph Wuerthner <rwuerthn@de.ibm.com>
  13. * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/fs.h>
  20. #include <linux/compat.h>
  21. #include <linux/slab.h>
  22. #include <linux/atomic.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/hw_random.h>
  25. #include <linux/debugfs.h>
  26. #include <asm/debug.h>
  27. #define CREATE_TRACE_POINTS
  28. #include <asm/trace/zcrypt.h>
  29. #include "zcrypt_api.h"
  30. #include "zcrypt_debug.h"
  31. #include "zcrypt_msgtype6.h"
  32. #include "zcrypt_msgtype50.h"
  33. /*
  34. * Module description.
  35. */
  36. MODULE_AUTHOR("IBM Corporation");
  37. MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
  38. "Copyright IBM Corp. 2001, 2012");
  39. MODULE_LICENSE("GPL");
  40. /*
  41. * zcrypt tracepoint functions
  42. */
  43. EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
  44. EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
  45. static int zcrypt_hwrng_seed = 1;
  46. module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
  47. MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
  48. DEFINE_SPINLOCK(zcrypt_list_lock);
  49. LIST_HEAD(zcrypt_card_list);
  50. int zcrypt_device_count;
  51. static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
  52. static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
  53. atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
  54. EXPORT_SYMBOL(zcrypt_rescan_req);
  55. static LIST_HEAD(zcrypt_ops_list);
  56. /* Zcrypt related debug feature stuff. */
  57. debug_info_t *zcrypt_dbf_info;
  58. /**
  59. * Process a rescan of the transport layer.
  60. *
  61. * Returns 1, if the rescan has been processed, otherwise 0.
  62. */
  63. static inline int zcrypt_process_rescan(void)
  64. {
  65. if (atomic_read(&zcrypt_rescan_req)) {
  66. atomic_set(&zcrypt_rescan_req, 0);
  67. atomic_inc(&zcrypt_rescan_count);
  68. ap_bus_force_rescan();
  69. ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
  70. atomic_inc_return(&zcrypt_rescan_count));
  71. return 1;
  72. }
  73. return 0;
  74. }
  75. void zcrypt_msgtype_register(struct zcrypt_ops *zops)
  76. {
  77. list_add_tail(&zops->list, &zcrypt_ops_list);
  78. }
  79. void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
  80. {
  81. list_del_init(&zops->list);
  82. }
  83. struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
  84. {
  85. struct zcrypt_ops *zops;
  86. list_for_each_entry(zops, &zcrypt_ops_list, list)
  87. if ((zops->variant == variant) &&
  88. (!strncmp(zops->name, name, sizeof(zops->name))))
  89. return zops;
  90. return NULL;
  91. }
  92. EXPORT_SYMBOL(zcrypt_msgtype);
  93. /**
  94. * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
  95. *
  96. * This function is not supported beyond zcrypt 1.3.1.
  97. */
  98. static ssize_t zcrypt_read(struct file *filp, char __user *buf,
  99. size_t count, loff_t *f_pos)
  100. {
  101. return -EPERM;
  102. }
  103. /**
  104. * zcrypt_write(): Not allowed.
  105. *
  106. * Write is is not allowed
  107. */
  108. static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
  109. size_t count, loff_t *f_pos)
  110. {
  111. return -EPERM;
  112. }
  113. /**
  114. * zcrypt_open(): Count number of users.
  115. *
  116. * Device open function to count number of users.
  117. */
  118. static int zcrypt_open(struct inode *inode, struct file *filp)
  119. {
  120. atomic_inc(&zcrypt_open_count);
  121. return nonseekable_open(inode, filp);
  122. }
  123. /**
  124. * zcrypt_release(): Count number of users.
  125. *
  126. * Device close function to count number of users.
  127. */
  128. static int zcrypt_release(struct inode *inode, struct file *filp)
  129. {
  130. atomic_dec(&zcrypt_open_count);
  131. return 0;
  132. }
  133. static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
  134. struct zcrypt_queue *zq,
  135. unsigned int weight)
  136. {
  137. if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
  138. return NULL;
  139. zcrypt_queue_get(zq);
  140. get_device(&zq->queue->ap_dev.device);
  141. atomic_add(weight, &zc->load);
  142. atomic_add(weight, &zq->load);
  143. zq->request_count++;
  144. return zq;
  145. }
  146. static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
  147. struct zcrypt_queue *zq,
  148. unsigned int weight)
  149. {
  150. struct module *mod = zq->queue->ap_dev.drv->driver.owner;
  151. zq->request_count--;
  152. atomic_sub(weight, &zc->load);
  153. atomic_sub(weight, &zq->load);
  154. put_device(&zq->queue->ap_dev.device);
  155. zcrypt_queue_put(zq);
  156. module_put(mod);
  157. }
  158. static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
  159. struct zcrypt_card *pref_zc,
  160. unsigned int weight,
  161. unsigned int pref_weight)
  162. {
  163. if (!pref_zc)
  164. return false;
  165. weight += atomic_read(&zc->load);
  166. pref_weight += atomic_read(&pref_zc->load);
  167. if (weight == pref_weight)
  168. return atomic64_read(&zc->card->total_request_count) >
  169. atomic64_read(&pref_zc->card->total_request_count);
  170. return weight > pref_weight;
  171. }
  172. static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
  173. struct zcrypt_queue *pref_zq,
  174. unsigned int weight,
  175. unsigned int pref_weight)
  176. {
  177. if (!pref_zq)
  178. return false;
  179. weight += atomic_read(&zq->load);
  180. pref_weight += atomic_read(&pref_zq->load);
  181. if (weight == pref_weight)
  182. return zq->queue->total_request_count >
  183. pref_zq->queue->total_request_count;
  184. return weight > pref_weight;
  185. }
  186. /*
  187. * zcrypt ioctls.
  188. */
  189. static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
  190. {
  191. struct zcrypt_card *zc, *pref_zc;
  192. struct zcrypt_queue *zq, *pref_zq;
  193. unsigned int weight, pref_weight;
  194. unsigned int func_code;
  195. int qid = 0, rc = -ENODEV;
  196. trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
  197. if (mex->outputdatalength < mex->inputdatalength) {
  198. func_code = 0;
  199. rc = -EINVAL;
  200. goto out;
  201. }
  202. /*
  203. * As long as outputdatalength is big enough, we can set the
  204. * outputdatalength equal to the inputdatalength, since that is the
  205. * number of bytes we will copy in any case
  206. */
  207. mex->outputdatalength = mex->inputdatalength;
  208. rc = get_rsa_modex_fc(mex, &func_code);
  209. if (rc)
  210. goto out;
  211. pref_zc = NULL;
  212. pref_zq = NULL;
  213. spin_lock(&zcrypt_list_lock);
  214. for_each_zcrypt_card(zc) {
  215. /* Check for online accelarator and CCA cards */
  216. if (!zc->online || !(zc->card->functions & 0x18000000))
  217. continue;
  218. /* Check for size limits */
  219. if (zc->min_mod_size > mex->inputdatalength ||
  220. zc->max_mod_size < mex->inputdatalength)
  221. continue;
  222. /* get weight index of the card device */
  223. weight = zc->speed_rating[func_code];
  224. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  225. continue;
  226. for_each_zcrypt_queue(zq, zc) {
  227. /* check if device is online and eligible */
  228. if (!zq->online || !zq->ops->rsa_modexpo)
  229. continue;
  230. if (zcrypt_queue_compare(zq, pref_zq,
  231. weight, pref_weight))
  232. continue;
  233. pref_zc = zc;
  234. pref_zq = zq;
  235. pref_weight = weight;
  236. }
  237. }
  238. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  239. spin_unlock(&zcrypt_list_lock);
  240. if (!pref_zq) {
  241. rc = -ENODEV;
  242. goto out;
  243. }
  244. qid = pref_zq->queue->qid;
  245. rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
  246. spin_lock(&zcrypt_list_lock);
  247. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  248. spin_unlock(&zcrypt_list_lock);
  249. out:
  250. trace_s390_zcrypt_rep(mex, func_code, rc,
  251. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  252. return rc;
  253. }
  254. static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
  255. {
  256. struct zcrypt_card *zc, *pref_zc;
  257. struct zcrypt_queue *zq, *pref_zq;
  258. unsigned int weight, pref_weight;
  259. unsigned int func_code;
  260. int qid = 0, rc = -ENODEV;
  261. trace_s390_zcrypt_req(crt, TP_ICARSACRT);
  262. if (crt->outputdatalength < crt->inputdatalength) {
  263. func_code = 0;
  264. rc = -EINVAL;
  265. goto out;
  266. }
  267. /*
  268. * As long as outputdatalength is big enough, we can set the
  269. * outputdatalength equal to the inputdatalength, since that is the
  270. * number of bytes we will copy in any case
  271. */
  272. crt->outputdatalength = crt->inputdatalength;
  273. rc = get_rsa_crt_fc(crt, &func_code);
  274. if (rc)
  275. goto out;
  276. pref_zc = NULL;
  277. pref_zq = NULL;
  278. spin_lock(&zcrypt_list_lock);
  279. for_each_zcrypt_card(zc) {
  280. /* Check for online accelarator and CCA cards */
  281. if (!zc->online || !(zc->card->functions & 0x18000000))
  282. continue;
  283. /* Check for size limits */
  284. if (zc->min_mod_size > crt->inputdatalength ||
  285. zc->max_mod_size < crt->inputdatalength)
  286. continue;
  287. /* get weight index of the card device */
  288. weight = zc->speed_rating[func_code];
  289. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  290. continue;
  291. for_each_zcrypt_queue(zq, zc) {
  292. /* check if device is online and eligible */
  293. if (!zq->online || !zq->ops->rsa_modexpo_crt)
  294. continue;
  295. if (zcrypt_queue_compare(zq, pref_zq,
  296. weight, pref_weight))
  297. continue;
  298. pref_zc = zc;
  299. pref_zq = zq;
  300. pref_weight = weight;
  301. }
  302. }
  303. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  304. spin_unlock(&zcrypt_list_lock);
  305. if (!pref_zq) {
  306. rc = -ENODEV;
  307. goto out;
  308. }
  309. qid = pref_zq->queue->qid;
  310. rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
  311. spin_lock(&zcrypt_list_lock);
  312. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  313. spin_unlock(&zcrypt_list_lock);
  314. out:
  315. trace_s390_zcrypt_rep(crt, func_code, rc,
  316. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  317. return rc;
  318. }
  319. long zcrypt_send_cprb(struct ica_xcRB *xcRB)
  320. {
  321. struct zcrypt_card *zc, *pref_zc;
  322. struct zcrypt_queue *zq, *pref_zq;
  323. struct ap_message ap_msg;
  324. unsigned int weight, pref_weight;
  325. unsigned int func_code;
  326. unsigned short *domain;
  327. int qid = 0, rc = -ENODEV;
  328. trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
  329. ap_init_message(&ap_msg);
  330. rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
  331. if (rc)
  332. goto out;
  333. pref_zc = NULL;
  334. pref_zq = NULL;
  335. spin_lock(&zcrypt_list_lock);
  336. for_each_zcrypt_card(zc) {
  337. /* Check for online CCA cards */
  338. if (!zc->online || !(zc->card->functions & 0x10000000))
  339. continue;
  340. /* Check for user selected CCA card */
  341. if (xcRB->user_defined != AUTOSELECT &&
  342. xcRB->user_defined != zc->card->id)
  343. continue;
  344. /* get weight index of the card device */
  345. weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
  346. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  347. continue;
  348. for_each_zcrypt_queue(zq, zc) {
  349. /* check if device is online and eligible */
  350. if (!zq->online ||
  351. !zq->ops->send_cprb ||
  352. ((*domain != (unsigned short) AUTOSELECT) &&
  353. (*domain != AP_QID_QUEUE(zq->queue->qid))))
  354. continue;
  355. if (zcrypt_queue_compare(zq, pref_zq,
  356. weight, pref_weight))
  357. continue;
  358. pref_zc = zc;
  359. pref_zq = zq;
  360. pref_weight = weight;
  361. }
  362. }
  363. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  364. spin_unlock(&zcrypt_list_lock);
  365. if (!pref_zq) {
  366. rc = -ENODEV;
  367. goto out;
  368. }
  369. /* in case of auto select, provide the correct domain */
  370. qid = pref_zq->queue->qid;
  371. if (*domain == (unsigned short) AUTOSELECT)
  372. *domain = AP_QID_QUEUE(qid);
  373. rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
  374. spin_lock(&zcrypt_list_lock);
  375. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  376. spin_unlock(&zcrypt_list_lock);
  377. out:
  378. ap_release_message(&ap_msg);
  379. trace_s390_zcrypt_rep(xcRB, func_code, rc,
  380. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  381. return rc;
  382. }
  383. EXPORT_SYMBOL(zcrypt_send_cprb);
  384. static bool is_desired_ep11_card(unsigned int dev_id,
  385. unsigned short target_num,
  386. struct ep11_target_dev *targets)
  387. {
  388. while (target_num-- > 0) {
  389. if (dev_id == targets->ap_id)
  390. return true;
  391. targets++;
  392. }
  393. return false;
  394. }
  395. static bool is_desired_ep11_queue(unsigned int dev_qid,
  396. unsigned short target_num,
  397. struct ep11_target_dev *targets)
  398. {
  399. while (target_num-- > 0) {
  400. if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
  401. return true;
  402. targets++;
  403. }
  404. return false;
  405. }
  406. static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
  407. {
  408. struct zcrypt_card *zc, *pref_zc;
  409. struct zcrypt_queue *zq, *pref_zq;
  410. struct ep11_target_dev *targets;
  411. unsigned short target_num;
  412. unsigned int weight, pref_weight;
  413. unsigned int func_code;
  414. struct ap_message ap_msg;
  415. int qid = 0, rc = -ENODEV;
  416. trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
  417. ap_init_message(&ap_msg);
  418. target_num = (unsigned short) xcrb->targets_num;
  419. /* empty list indicates autoselect (all available targets) */
  420. targets = NULL;
  421. if (target_num != 0) {
  422. struct ep11_target_dev __user *uptr;
  423. targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
  424. if (!targets) {
  425. func_code = 0;
  426. rc = -ENOMEM;
  427. goto out;
  428. }
  429. uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
  430. if (copy_from_user(targets, uptr,
  431. target_num * sizeof(*targets))) {
  432. func_code = 0;
  433. rc = -EFAULT;
  434. goto out_free;
  435. }
  436. }
  437. rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
  438. if (rc)
  439. goto out_free;
  440. pref_zc = NULL;
  441. pref_zq = NULL;
  442. spin_lock(&zcrypt_list_lock);
  443. for_each_zcrypt_card(zc) {
  444. /* Check for online EP11 cards */
  445. if (!zc->online || !(zc->card->functions & 0x04000000))
  446. continue;
  447. /* Check for user selected EP11 card */
  448. if (targets &&
  449. !is_desired_ep11_card(zc->card->id, target_num, targets))
  450. continue;
  451. /* get weight index of the card device */
  452. weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
  453. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  454. continue;
  455. for_each_zcrypt_queue(zq, zc) {
  456. /* check if device is online and eligible */
  457. if (!zq->online ||
  458. !zq->ops->send_ep11_cprb ||
  459. (targets &&
  460. !is_desired_ep11_queue(zq->queue->qid,
  461. target_num, targets)))
  462. continue;
  463. if (zcrypt_queue_compare(zq, pref_zq,
  464. weight, pref_weight))
  465. continue;
  466. pref_zc = zc;
  467. pref_zq = zq;
  468. pref_weight = weight;
  469. }
  470. }
  471. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  472. spin_unlock(&zcrypt_list_lock);
  473. if (!pref_zq) {
  474. rc = -ENODEV;
  475. goto out_free;
  476. }
  477. qid = pref_zq->queue->qid;
  478. rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
  479. spin_lock(&zcrypt_list_lock);
  480. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  481. spin_unlock(&zcrypt_list_lock);
  482. out_free:
  483. kfree(targets);
  484. out:
  485. ap_release_message(&ap_msg);
  486. trace_s390_zcrypt_rep(xcrb, func_code, rc,
  487. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  488. return rc;
  489. }
  490. static long zcrypt_rng(char *buffer)
  491. {
  492. struct zcrypt_card *zc, *pref_zc;
  493. struct zcrypt_queue *zq, *pref_zq;
  494. unsigned int weight, pref_weight;
  495. unsigned int func_code;
  496. struct ap_message ap_msg;
  497. unsigned int domain;
  498. int qid = 0, rc = -ENODEV;
  499. trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
  500. ap_init_message(&ap_msg);
  501. rc = get_rng_fc(&ap_msg, &func_code, &domain);
  502. if (rc)
  503. goto out;
  504. pref_zc = NULL;
  505. pref_zq = NULL;
  506. spin_lock(&zcrypt_list_lock);
  507. for_each_zcrypt_card(zc) {
  508. /* Check for online CCA cards */
  509. if (!zc->online || !(zc->card->functions & 0x10000000))
  510. continue;
  511. /* get weight index of the card device */
  512. weight = zc->speed_rating[func_code];
  513. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  514. continue;
  515. for_each_zcrypt_queue(zq, zc) {
  516. /* check if device is online and eligible */
  517. if (!zq->online || !zq->ops->rng)
  518. continue;
  519. if (zcrypt_queue_compare(zq, pref_zq,
  520. weight, pref_weight))
  521. continue;
  522. pref_zc = zc;
  523. pref_zq = zq;
  524. pref_weight = weight;
  525. }
  526. }
  527. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  528. spin_unlock(&zcrypt_list_lock);
  529. if (!pref_zq) {
  530. rc = -ENODEV;
  531. goto out;
  532. }
  533. qid = pref_zq->queue->qid;
  534. rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
  535. spin_lock(&zcrypt_list_lock);
  536. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  537. spin_unlock(&zcrypt_list_lock);
  538. out:
  539. ap_release_message(&ap_msg);
  540. trace_s390_zcrypt_rep(buffer, func_code, rc,
  541. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  542. return rc;
  543. }
  544. static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
  545. {
  546. struct zcrypt_card *zc;
  547. struct zcrypt_queue *zq;
  548. struct zcrypt_device_status *stat;
  549. int card, queue;
  550. memset(devstatus, 0, MAX_ZDEV_ENTRIES
  551. * sizeof(struct zcrypt_device_status));
  552. spin_lock(&zcrypt_list_lock);
  553. for_each_zcrypt_card(zc) {
  554. for_each_zcrypt_queue(zq, zc) {
  555. card = AP_QID_CARD(zq->queue->qid);
  556. if (card >= MAX_ZDEV_CARDIDS)
  557. continue;
  558. queue = AP_QID_QUEUE(zq->queue->qid);
  559. stat = &devstatus[card * AP_DOMAINS + queue];
  560. stat->hwtype = zc->card->ap_dev.device_type;
  561. stat->functions = zc->card->functions >> 26;
  562. stat->qid = zq->queue->qid;
  563. stat->online = zq->online ? 0x01 : 0x00;
  564. }
  565. }
  566. spin_unlock(&zcrypt_list_lock);
  567. }
  568. void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
  569. {
  570. struct zcrypt_card *zc;
  571. struct zcrypt_queue *zq;
  572. struct zcrypt_device_status_ext *stat;
  573. int card, queue;
  574. memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
  575. * sizeof(struct zcrypt_device_status_ext));
  576. spin_lock(&zcrypt_list_lock);
  577. for_each_zcrypt_card(zc) {
  578. for_each_zcrypt_queue(zq, zc) {
  579. card = AP_QID_CARD(zq->queue->qid);
  580. queue = AP_QID_QUEUE(zq->queue->qid);
  581. stat = &devstatus[card * AP_DOMAINS + queue];
  582. stat->hwtype = zc->card->ap_dev.device_type;
  583. stat->functions = zc->card->functions >> 26;
  584. stat->qid = zq->queue->qid;
  585. stat->online = zq->online ? 0x01 : 0x00;
  586. }
  587. }
  588. spin_unlock(&zcrypt_list_lock);
  589. }
  590. EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
  591. static void zcrypt_status_mask(char status[], size_t max_adapters)
  592. {
  593. struct zcrypt_card *zc;
  594. struct zcrypt_queue *zq;
  595. int card;
  596. memset(status, 0, max_adapters);
  597. spin_lock(&zcrypt_list_lock);
  598. for_each_zcrypt_card(zc) {
  599. for_each_zcrypt_queue(zq, zc) {
  600. card = AP_QID_CARD(zq->queue->qid);
  601. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
  602. || card >= max_adapters)
  603. continue;
  604. status[card] = zc->online ? zc->user_space_type : 0x0d;
  605. }
  606. }
  607. spin_unlock(&zcrypt_list_lock);
  608. }
  609. static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
  610. {
  611. struct zcrypt_card *zc;
  612. struct zcrypt_queue *zq;
  613. int card;
  614. memset(qdepth, 0, max_adapters);
  615. spin_lock(&zcrypt_list_lock);
  616. local_bh_disable();
  617. for_each_zcrypt_card(zc) {
  618. for_each_zcrypt_queue(zq, zc) {
  619. card = AP_QID_CARD(zq->queue->qid);
  620. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
  621. || card >= max_adapters)
  622. continue;
  623. spin_lock(&zq->queue->lock);
  624. qdepth[card] =
  625. zq->queue->pendingq_count +
  626. zq->queue->requestq_count;
  627. spin_unlock(&zq->queue->lock);
  628. }
  629. }
  630. local_bh_enable();
  631. spin_unlock(&zcrypt_list_lock);
  632. }
  633. static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
  634. {
  635. struct zcrypt_card *zc;
  636. struct zcrypt_queue *zq;
  637. int card;
  638. u64 cnt;
  639. memset(reqcnt, 0, sizeof(int) * max_adapters);
  640. spin_lock(&zcrypt_list_lock);
  641. local_bh_disable();
  642. for_each_zcrypt_card(zc) {
  643. for_each_zcrypt_queue(zq, zc) {
  644. card = AP_QID_CARD(zq->queue->qid);
  645. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
  646. || card >= max_adapters)
  647. continue;
  648. spin_lock(&zq->queue->lock);
  649. cnt = zq->queue->total_request_count;
  650. spin_unlock(&zq->queue->lock);
  651. reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
  652. }
  653. }
  654. local_bh_enable();
  655. spin_unlock(&zcrypt_list_lock);
  656. }
  657. static int zcrypt_pendingq_count(void)
  658. {
  659. struct zcrypt_card *zc;
  660. struct zcrypt_queue *zq;
  661. int pendingq_count;
  662. pendingq_count = 0;
  663. spin_lock(&zcrypt_list_lock);
  664. local_bh_disable();
  665. for_each_zcrypt_card(zc) {
  666. for_each_zcrypt_queue(zq, zc) {
  667. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
  668. continue;
  669. spin_lock(&zq->queue->lock);
  670. pendingq_count += zq->queue->pendingq_count;
  671. spin_unlock(&zq->queue->lock);
  672. }
  673. }
  674. local_bh_enable();
  675. spin_unlock(&zcrypt_list_lock);
  676. return pendingq_count;
  677. }
  678. static int zcrypt_requestq_count(void)
  679. {
  680. struct zcrypt_card *zc;
  681. struct zcrypt_queue *zq;
  682. int requestq_count;
  683. requestq_count = 0;
  684. spin_lock(&zcrypt_list_lock);
  685. local_bh_disable();
  686. for_each_zcrypt_card(zc) {
  687. for_each_zcrypt_queue(zq, zc) {
  688. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
  689. continue;
  690. spin_lock(&zq->queue->lock);
  691. requestq_count += zq->queue->requestq_count;
  692. spin_unlock(&zq->queue->lock);
  693. }
  694. }
  695. local_bh_enable();
  696. spin_unlock(&zcrypt_list_lock);
  697. return requestq_count;
  698. }
  699. static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
  700. unsigned long arg)
  701. {
  702. int rc = 0;
  703. switch (cmd) {
  704. case ICARSAMODEXPO: {
  705. struct ica_rsa_modexpo __user *umex = (void __user *) arg;
  706. struct ica_rsa_modexpo mex;
  707. if (copy_from_user(&mex, umex, sizeof(mex)))
  708. return -EFAULT;
  709. do {
  710. rc = zcrypt_rsa_modexpo(&mex);
  711. } while (rc == -EAGAIN);
  712. /* on failure: retry once again after a requested rescan */
  713. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  714. do {
  715. rc = zcrypt_rsa_modexpo(&mex);
  716. } while (rc == -EAGAIN);
  717. if (rc) {
  718. ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
  719. return rc;
  720. }
  721. return put_user(mex.outputdatalength, &umex->outputdatalength);
  722. }
  723. case ICARSACRT: {
  724. struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
  725. struct ica_rsa_modexpo_crt crt;
  726. if (copy_from_user(&crt, ucrt, sizeof(crt)))
  727. return -EFAULT;
  728. do {
  729. rc = zcrypt_rsa_crt(&crt);
  730. } while (rc == -EAGAIN);
  731. /* on failure: retry once again after a requested rescan */
  732. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  733. do {
  734. rc = zcrypt_rsa_crt(&crt);
  735. } while (rc == -EAGAIN);
  736. if (rc) {
  737. ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
  738. return rc;
  739. }
  740. return put_user(crt.outputdatalength, &ucrt->outputdatalength);
  741. }
  742. case ZSECSENDCPRB: {
  743. struct ica_xcRB __user *uxcRB = (void __user *) arg;
  744. struct ica_xcRB xcRB;
  745. if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
  746. return -EFAULT;
  747. do {
  748. rc = zcrypt_send_cprb(&xcRB);
  749. } while (rc == -EAGAIN);
  750. /* on failure: retry once again after a requested rescan */
  751. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  752. do {
  753. rc = zcrypt_send_cprb(&xcRB);
  754. } while (rc == -EAGAIN);
  755. if (rc)
  756. ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc);
  757. if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
  758. return -EFAULT;
  759. return rc;
  760. }
  761. case ZSENDEP11CPRB: {
  762. struct ep11_urb __user *uxcrb = (void __user *)arg;
  763. struct ep11_urb xcrb;
  764. if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
  765. return -EFAULT;
  766. do {
  767. rc = zcrypt_send_ep11_cprb(&xcrb);
  768. } while (rc == -EAGAIN);
  769. /* on failure: retry once again after a requested rescan */
  770. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  771. do {
  772. rc = zcrypt_send_ep11_cprb(&xcrb);
  773. } while (rc == -EAGAIN);
  774. if (rc)
  775. ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
  776. if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
  777. return -EFAULT;
  778. return rc;
  779. }
  780. case ZCRYPT_DEVICE_STATUS: {
  781. struct zcrypt_device_status_ext *device_status;
  782. size_t total_size = MAX_ZDEV_ENTRIES_EXT
  783. * sizeof(struct zcrypt_device_status_ext);
  784. device_status = kzalloc(total_size, GFP_KERNEL);
  785. if (!device_status)
  786. return -ENOMEM;
  787. zcrypt_device_status_mask_ext(device_status);
  788. if (copy_to_user((char __user *) arg, device_status,
  789. total_size))
  790. rc = -EFAULT;
  791. kfree(device_status);
  792. return rc;
  793. }
  794. case ZCRYPT_STATUS_MASK: {
  795. char status[AP_DEVICES];
  796. zcrypt_status_mask(status, AP_DEVICES);
  797. if (copy_to_user((char __user *) arg, status, sizeof(status)))
  798. return -EFAULT;
  799. return 0;
  800. }
  801. case ZCRYPT_QDEPTH_MASK: {
  802. char qdepth[AP_DEVICES];
  803. zcrypt_qdepth_mask(qdepth, AP_DEVICES);
  804. if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
  805. return -EFAULT;
  806. return 0;
  807. }
  808. case ZCRYPT_PERDEV_REQCNT: {
  809. u32 *reqcnt;
  810. reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
  811. if (!reqcnt)
  812. return -ENOMEM;
  813. zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
  814. if (copy_to_user((int __user *) arg, reqcnt,
  815. sizeof(u32) * AP_DEVICES))
  816. rc = -EFAULT;
  817. kfree(reqcnt);
  818. return rc;
  819. }
  820. case Z90STAT_REQUESTQ_COUNT:
  821. return put_user(zcrypt_requestq_count(), (int __user *) arg);
  822. case Z90STAT_PENDINGQ_COUNT:
  823. return put_user(zcrypt_pendingq_count(), (int __user *) arg);
  824. case Z90STAT_TOTALOPEN_COUNT:
  825. return put_user(atomic_read(&zcrypt_open_count),
  826. (int __user *) arg);
  827. case Z90STAT_DOMAIN_INDEX:
  828. return put_user(ap_domain_index, (int __user *) arg);
  829. /*
  830. * Deprecated ioctls
  831. */
  832. case ZDEVICESTATUS: {
  833. /* the old ioctl supports only 64 adapters */
  834. struct zcrypt_device_status *device_status;
  835. size_t total_size = MAX_ZDEV_ENTRIES
  836. * sizeof(struct zcrypt_device_status);
  837. device_status = kzalloc(total_size, GFP_KERNEL);
  838. if (!device_status)
  839. return -ENOMEM;
  840. zcrypt_device_status_mask(device_status);
  841. if (copy_to_user((char __user *) arg, device_status,
  842. total_size))
  843. rc = -EFAULT;
  844. kfree(device_status);
  845. return rc;
  846. }
  847. case Z90STAT_STATUS_MASK: {
  848. /* the old ioctl supports only 64 adapters */
  849. char status[MAX_ZDEV_CARDIDS];
  850. zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
  851. if (copy_to_user((char __user *) arg, status, sizeof(status)))
  852. return -EFAULT;
  853. return 0;
  854. }
  855. case Z90STAT_QDEPTH_MASK: {
  856. /* the old ioctl supports only 64 adapters */
  857. char qdepth[MAX_ZDEV_CARDIDS];
  858. zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
  859. if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
  860. return -EFAULT;
  861. return 0;
  862. }
  863. case Z90STAT_PERDEV_REQCNT: {
  864. /* the old ioctl supports only 64 adapters */
  865. u32 reqcnt[MAX_ZDEV_CARDIDS];
  866. zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
  867. if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
  868. return -EFAULT;
  869. return 0;
  870. }
  871. /* unknown ioctl number */
  872. default:
  873. ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
  874. return -ENOIOCTLCMD;
  875. }
  876. }
  877. #ifdef CONFIG_COMPAT
  878. /*
  879. * ioctl32 conversion routines
  880. */
  881. struct compat_ica_rsa_modexpo {
  882. compat_uptr_t inputdata;
  883. unsigned int inputdatalength;
  884. compat_uptr_t outputdata;
  885. unsigned int outputdatalength;
  886. compat_uptr_t b_key;
  887. compat_uptr_t n_modulus;
  888. };
  889. static long trans_modexpo32(struct file *filp, unsigned int cmd,
  890. unsigned long arg)
  891. {
  892. struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
  893. struct compat_ica_rsa_modexpo mex32;
  894. struct ica_rsa_modexpo mex64;
  895. long rc;
  896. if (copy_from_user(&mex32, umex32, sizeof(mex32)))
  897. return -EFAULT;
  898. mex64.inputdata = compat_ptr(mex32.inputdata);
  899. mex64.inputdatalength = mex32.inputdatalength;
  900. mex64.outputdata = compat_ptr(mex32.outputdata);
  901. mex64.outputdatalength = mex32.outputdatalength;
  902. mex64.b_key = compat_ptr(mex32.b_key);
  903. mex64.n_modulus = compat_ptr(mex32.n_modulus);
  904. do {
  905. rc = zcrypt_rsa_modexpo(&mex64);
  906. } while (rc == -EAGAIN);
  907. /* on failure: retry once again after a requested rescan */
  908. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  909. do {
  910. rc = zcrypt_rsa_modexpo(&mex64);
  911. } while (rc == -EAGAIN);
  912. if (rc)
  913. return rc;
  914. return put_user(mex64.outputdatalength,
  915. &umex32->outputdatalength);
  916. }
  917. struct compat_ica_rsa_modexpo_crt {
  918. compat_uptr_t inputdata;
  919. unsigned int inputdatalength;
  920. compat_uptr_t outputdata;
  921. unsigned int outputdatalength;
  922. compat_uptr_t bp_key;
  923. compat_uptr_t bq_key;
  924. compat_uptr_t np_prime;
  925. compat_uptr_t nq_prime;
  926. compat_uptr_t u_mult_inv;
  927. };
  928. static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
  929. unsigned long arg)
  930. {
  931. struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
  932. struct compat_ica_rsa_modexpo_crt crt32;
  933. struct ica_rsa_modexpo_crt crt64;
  934. long rc;
  935. if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
  936. return -EFAULT;
  937. crt64.inputdata = compat_ptr(crt32.inputdata);
  938. crt64.inputdatalength = crt32.inputdatalength;
  939. crt64.outputdata = compat_ptr(crt32.outputdata);
  940. crt64.outputdatalength = crt32.outputdatalength;
  941. crt64.bp_key = compat_ptr(crt32.bp_key);
  942. crt64.bq_key = compat_ptr(crt32.bq_key);
  943. crt64.np_prime = compat_ptr(crt32.np_prime);
  944. crt64.nq_prime = compat_ptr(crt32.nq_prime);
  945. crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
  946. do {
  947. rc = zcrypt_rsa_crt(&crt64);
  948. } while (rc == -EAGAIN);
  949. /* on failure: retry once again after a requested rescan */
  950. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  951. do {
  952. rc = zcrypt_rsa_crt(&crt64);
  953. } while (rc == -EAGAIN);
  954. if (rc)
  955. return rc;
  956. return put_user(crt64.outputdatalength,
  957. &ucrt32->outputdatalength);
  958. }
  959. struct compat_ica_xcRB {
  960. unsigned short agent_ID;
  961. unsigned int user_defined;
  962. unsigned short request_ID;
  963. unsigned int request_control_blk_length;
  964. unsigned char padding1[16 - sizeof(compat_uptr_t)];
  965. compat_uptr_t request_control_blk_addr;
  966. unsigned int request_data_length;
  967. char padding2[16 - sizeof(compat_uptr_t)];
  968. compat_uptr_t request_data_address;
  969. unsigned int reply_control_blk_length;
  970. char padding3[16 - sizeof(compat_uptr_t)];
  971. compat_uptr_t reply_control_blk_addr;
  972. unsigned int reply_data_length;
  973. char padding4[16 - sizeof(compat_uptr_t)];
  974. compat_uptr_t reply_data_addr;
  975. unsigned short priority_window;
  976. unsigned int status;
  977. } __packed;
  978. static long trans_xcRB32(struct file *filp, unsigned int cmd,
  979. unsigned long arg)
  980. {
  981. struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
  982. struct compat_ica_xcRB xcRB32;
  983. struct ica_xcRB xcRB64;
  984. long rc;
  985. if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
  986. return -EFAULT;
  987. xcRB64.agent_ID = xcRB32.agent_ID;
  988. xcRB64.user_defined = xcRB32.user_defined;
  989. xcRB64.request_ID = xcRB32.request_ID;
  990. xcRB64.request_control_blk_length =
  991. xcRB32.request_control_blk_length;
  992. xcRB64.request_control_blk_addr =
  993. compat_ptr(xcRB32.request_control_blk_addr);
  994. xcRB64.request_data_length =
  995. xcRB32.request_data_length;
  996. xcRB64.request_data_address =
  997. compat_ptr(xcRB32.request_data_address);
  998. xcRB64.reply_control_blk_length =
  999. xcRB32.reply_control_blk_length;
  1000. xcRB64.reply_control_blk_addr =
  1001. compat_ptr(xcRB32.reply_control_blk_addr);
  1002. xcRB64.reply_data_length = xcRB32.reply_data_length;
  1003. xcRB64.reply_data_addr =
  1004. compat_ptr(xcRB32.reply_data_addr);
  1005. xcRB64.priority_window = xcRB32.priority_window;
  1006. xcRB64.status = xcRB32.status;
  1007. do {
  1008. rc = zcrypt_send_cprb(&xcRB64);
  1009. } while (rc == -EAGAIN);
  1010. /* on failure: retry once again after a requested rescan */
  1011. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  1012. do {
  1013. rc = zcrypt_send_cprb(&xcRB64);
  1014. } while (rc == -EAGAIN);
  1015. xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
  1016. xcRB32.reply_data_length = xcRB64.reply_data_length;
  1017. xcRB32.status = xcRB64.status;
  1018. if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
  1019. return -EFAULT;
  1020. return rc;
  1021. }
  1022. static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
  1023. unsigned long arg)
  1024. {
  1025. if (cmd == ICARSAMODEXPO)
  1026. return trans_modexpo32(filp, cmd, arg);
  1027. if (cmd == ICARSACRT)
  1028. return trans_modexpo_crt32(filp, cmd, arg);
  1029. if (cmd == ZSECSENDCPRB)
  1030. return trans_xcRB32(filp, cmd, arg);
  1031. return zcrypt_unlocked_ioctl(filp, cmd, arg);
  1032. }
  1033. #endif
  1034. /*
  1035. * Misc device file operations.
  1036. */
  1037. static const struct file_operations zcrypt_fops = {
  1038. .owner = THIS_MODULE,
  1039. .read = zcrypt_read,
  1040. .write = zcrypt_write,
  1041. .unlocked_ioctl = zcrypt_unlocked_ioctl,
  1042. #ifdef CONFIG_COMPAT
  1043. .compat_ioctl = zcrypt_compat_ioctl,
  1044. #endif
  1045. .open = zcrypt_open,
  1046. .release = zcrypt_release,
  1047. .llseek = no_llseek,
  1048. };
  1049. /*
  1050. * Misc device.
  1051. */
  1052. static struct miscdevice zcrypt_misc_device = {
  1053. .minor = MISC_DYNAMIC_MINOR,
  1054. .name = "z90crypt",
  1055. .fops = &zcrypt_fops,
  1056. };
  1057. static int zcrypt_rng_device_count;
  1058. static u32 *zcrypt_rng_buffer;
  1059. static int zcrypt_rng_buffer_index;
  1060. static DEFINE_MUTEX(zcrypt_rng_mutex);
  1061. static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
  1062. {
  1063. int rc;
  1064. /*
  1065. * We don't need locking here because the RNG API guarantees serialized
  1066. * read method calls.
  1067. */
  1068. if (zcrypt_rng_buffer_index == 0) {
  1069. rc = zcrypt_rng((char *) zcrypt_rng_buffer);
  1070. /* on failure: retry once again after a requested rescan */
  1071. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  1072. rc = zcrypt_rng((char *) zcrypt_rng_buffer);
  1073. if (rc < 0)
  1074. return -EIO;
  1075. zcrypt_rng_buffer_index = rc / sizeof(*data);
  1076. }
  1077. *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
  1078. return sizeof(*data);
  1079. }
  1080. static struct hwrng zcrypt_rng_dev = {
  1081. .name = "zcrypt",
  1082. .data_read = zcrypt_rng_data_read,
  1083. .quality = 990,
  1084. };
  1085. int zcrypt_rng_device_add(void)
  1086. {
  1087. int rc = 0;
  1088. mutex_lock(&zcrypt_rng_mutex);
  1089. if (zcrypt_rng_device_count == 0) {
  1090. zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
  1091. if (!zcrypt_rng_buffer) {
  1092. rc = -ENOMEM;
  1093. goto out;
  1094. }
  1095. zcrypt_rng_buffer_index = 0;
  1096. if (!zcrypt_hwrng_seed)
  1097. zcrypt_rng_dev.quality = 0;
  1098. rc = hwrng_register(&zcrypt_rng_dev);
  1099. if (rc)
  1100. goto out_free;
  1101. zcrypt_rng_device_count = 1;
  1102. } else
  1103. zcrypt_rng_device_count++;
  1104. mutex_unlock(&zcrypt_rng_mutex);
  1105. return 0;
  1106. out_free:
  1107. free_page((unsigned long) zcrypt_rng_buffer);
  1108. out:
  1109. mutex_unlock(&zcrypt_rng_mutex);
  1110. return rc;
  1111. }
  1112. void zcrypt_rng_device_remove(void)
  1113. {
  1114. mutex_lock(&zcrypt_rng_mutex);
  1115. zcrypt_rng_device_count--;
  1116. if (zcrypt_rng_device_count == 0) {
  1117. hwrng_unregister(&zcrypt_rng_dev);
  1118. free_page((unsigned long) zcrypt_rng_buffer);
  1119. }
  1120. mutex_unlock(&zcrypt_rng_mutex);
  1121. }
  1122. int __init zcrypt_debug_init(void)
  1123. {
  1124. zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
  1125. DBF_MAX_SPRINTF_ARGS * sizeof(long));
  1126. debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
  1127. debug_set_level(zcrypt_dbf_info, DBF_ERR);
  1128. return 0;
  1129. }
  1130. void zcrypt_debug_exit(void)
  1131. {
  1132. debug_unregister(zcrypt_dbf_info);
  1133. }
  1134. /**
  1135. * zcrypt_api_init(): Module initialization.
  1136. *
  1137. * The module initialization code.
  1138. */
  1139. int __init zcrypt_api_init(void)
  1140. {
  1141. int rc;
  1142. rc = zcrypt_debug_init();
  1143. if (rc)
  1144. goto out;
  1145. /* Register the request sprayer. */
  1146. rc = misc_register(&zcrypt_misc_device);
  1147. if (rc < 0)
  1148. goto out;
  1149. zcrypt_msgtype6_init();
  1150. zcrypt_msgtype50_init();
  1151. return 0;
  1152. out:
  1153. return rc;
  1154. }
  1155. /**
  1156. * zcrypt_api_exit(): Module termination.
  1157. *
  1158. * The module termination code.
  1159. */
  1160. void __exit zcrypt_api_exit(void)
  1161. {
  1162. misc_deregister(&zcrypt_misc_device);
  1163. zcrypt_msgtype6_exit();
  1164. zcrypt_msgtype50_exit();
  1165. zcrypt_debug_exit();
  1166. }
  1167. module_init(zcrypt_api_init);
  1168. module_exit(zcrypt_api_exit);