tpm_ibmvtpm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * Copyright (C) 2012-2020 IBM Corporation
  3. *
  4. * Author: Ashley Lai <ashleydlai@gmail.com>
  5. *
  6. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  7. *
  8. * Device driver for TCG/TCPA TPM (trusted platform module).
  9. * Specifications at www.trustedcomputinggroup.org
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation, version 2 of the
  14. * License.
  15. *
  16. */
  17. #include <linux/dma-mapping.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/slab.h>
  20. #include <asm/vio.h>
  21. #include <asm/irq.h>
  22. #include <linux/types.h>
  23. #include <linux/list.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/wait.h>
  27. #include <asm/prom.h>
  28. #include "tpm.h"
  29. #include "tpm_ibmvtpm.h"
  30. static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  31. static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
  32. { "IBM,vtpm", "IBM,vtpm"},
  33. { "", "" }
  34. };
  35. MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  36. /**
  37. *
  38. * ibmvtpm_send_crq_word - Send a CRQ request
  39. * @vdev: vio device struct
  40. * @w1: pre-constructed first word of tpm crq (second word is reserved)
  41. *
  42. * Return:
  43. * 0 - Success
  44. * Non-zero - Failure
  45. */
  46. static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
  47. {
  48. return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
  49. }
  50. /**
  51. *
  52. * ibmvtpm_send_crq - Send a CRQ request
  53. *
  54. * @vdev: vio device struct
  55. * @valid: Valid field
  56. * @msg: Type field
  57. * @len: Length field
  58. * @data: Data field
  59. *
  60. * The ibmvtpm crq is defined as follows:
  61. *
  62. * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
  63. * -----------------------------------------------------------------------
  64. * Word0 | Valid | Type | Length | Data
  65. * -----------------------------------------------------------------------
  66. * Word1 | Reserved
  67. * -----------------------------------------------------------------------
  68. *
  69. * Which matches the following structure (on bigendian host):
  70. *
  71. * struct ibmvtpm_crq {
  72. * u8 valid;
  73. * u8 msg;
  74. * __be16 len;
  75. * __be32 data;
  76. * __be64 reserved;
  77. * } __attribute__((packed, aligned(8)));
  78. *
  79. * However, the value is passed in a register so just compute the numeric value
  80. * to load into the register avoiding byteswap altogether. Endian only affects
  81. * memory loads and stores - registers are internally represented the same.
  82. *
  83. * Return:
  84. * 0 (H_SUCCESS) - Success
  85. * Non-zero - Failure
  86. */
  87. static int ibmvtpm_send_crq(struct vio_dev *vdev,
  88. u8 valid, u8 msg, u16 len, u32 data)
  89. {
  90. u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
  91. (u64)data;
  92. return ibmvtpm_send_crq_word(vdev, w1);
  93. }
  94. /**
  95. * tpm_ibmvtpm_recv - Receive data after send
  96. *
  97. * @chip: tpm chip struct
  98. * @buf: buffer to read
  99. * @count: size of buffer
  100. *
  101. * Return:
  102. * Number of bytes read
  103. */
  104. static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  105. {
  106. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  107. u16 len;
  108. int sig;
  109. if (!ibmvtpm->rtce_buf) {
  110. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  111. return 0;
  112. }
  113. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  114. if (sig)
  115. return -EINTR;
  116. len = ibmvtpm->res_len;
  117. if (count < len) {
  118. dev_err(ibmvtpm->dev,
  119. "Invalid size in recv: count=%zd, crq_size=%d\n",
  120. count, len);
  121. return -EIO;
  122. }
  123. spin_lock(&ibmvtpm->rtce_lock);
  124. memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
  125. memset(ibmvtpm->rtce_buf, 0, len);
  126. ibmvtpm->res_len = 0;
  127. spin_unlock(&ibmvtpm->rtce_lock);
  128. return len;
  129. }
  130. /**
  131. * ibmvtpm_crq_send_init - Send a CRQ initialize message
  132. * @ibmvtpm: vtpm device struct
  133. *
  134. * Return:
  135. * 0 on success.
  136. * Non-zero on failure.
  137. */
  138. static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
  139. {
  140. int rc;
  141. rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
  142. if (rc != H_SUCCESS)
  143. dev_err(ibmvtpm->dev,
  144. "%s failed rc=%d\n", __func__, rc);
  145. return rc;
  146. }
  147. /**
  148. * tpm_ibmvtpm_resume - Resume from suspend
  149. *
  150. * @dev: device struct
  151. *
  152. * Return: Always 0.
  153. */
  154. static int tpm_ibmvtpm_resume(struct device *dev)
  155. {
  156. struct tpm_chip *chip = dev_get_drvdata(dev);
  157. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  158. int rc = 0;
  159. do {
  160. if (rc)
  161. msleep(100);
  162. rc = plpar_hcall_norets(H_ENABLE_CRQ,
  163. ibmvtpm->vdev->unit_address);
  164. } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
  165. if (rc) {
  166. dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
  167. return rc;
  168. }
  169. rc = vio_enable_interrupts(ibmvtpm->vdev);
  170. if (rc) {
  171. dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
  172. return rc;
  173. }
  174. rc = ibmvtpm_crq_send_init(ibmvtpm);
  175. if (rc)
  176. dev_err(dev, "Error send_init rc=%d\n", rc);
  177. return rc;
  178. }
  179. /**
  180. * tpm_ibmvtpm_send() - Send a TPM command
  181. * @chip: tpm chip struct
  182. * @buf: buffer contains data to send
  183. * @count: size of buffer
  184. *
  185. * Return:
  186. * 0 on success,
  187. * -errno on error
  188. */
  189. static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  190. {
  191. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  192. bool retry = true;
  193. int rc, sig;
  194. if (!ibmvtpm->rtce_buf) {
  195. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  196. return 0;
  197. }
  198. if (count > ibmvtpm->rtce_size) {
  199. dev_err(ibmvtpm->dev,
  200. "Invalid size in send: count=%zd, rtce_size=%d\n",
  201. count, ibmvtpm->rtce_size);
  202. return -EIO;
  203. }
  204. if (ibmvtpm->tpm_processing_cmd) {
  205. dev_info(ibmvtpm->dev,
  206. "Need to wait for TPM to finish\n");
  207. /* wait for previous command to finish */
  208. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  209. if (sig)
  210. return -EINTR;
  211. }
  212. spin_lock(&ibmvtpm->rtce_lock);
  213. ibmvtpm->res_len = 0;
  214. memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
  215. /*
  216. * set the processing flag before the Hcall, since we may get the
  217. * result (interrupt) before even being able to check rc.
  218. */
  219. ibmvtpm->tpm_processing_cmd = true;
  220. again:
  221. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  222. IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
  223. count, ibmvtpm->rtce_dma_handle);
  224. if (rc != H_SUCCESS) {
  225. /*
  226. * H_CLOSED can be returned after LPM resume. Call
  227. * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
  228. * ibmvtpm_send_crq() once before failing.
  229. */
  230. if (rc == H_CLOSED && retry) {
  231. tpm_ibmvtpm_resume(ibmvtpm->dev);
  232. retry = false;
  233. goto again;
  234. }
  235. dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
  236. ibmvtpm->tpm_processing_cmd = false;
  237. }
  238. spin_unlock(&ibmvtpm->rtce_lock);
  239. return 0;
  240. }
  241. static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
  242. {
  243. return;
  244. }
  245. static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
  246. {
  247. return 0;
  248. }
  249. /**
  250. * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
  251. *
  252. * @ibmvtpm: vtpm device struct
  253. *
  254. * Return:
  255. * 0 on success.
  256. * Non-zero on failure.
  257. */
  258. static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
  259. {
  260. int rc;
  261. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  262. IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
  263. if (rc != H_SUCCESS)
  264. dev_err(ibmvtpm->dev,
  265. "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
  266. return rc;
  267. }
  268. /**
  269. * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
  270. * - Note that this is vtpm version and not tpm version
  271. *
  272. * @ibmvtpm: vtpm device struct
  273. *
  274. * Return:
  275. * 0 on success.
  276. * Non-zero on failure.
  277. */
  278. static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
  279. {
  280. int rc;
  281. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  282. IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
  283. if (rc != H_SUCCESS)
  284. dev_err(ibmvtpm->dev,
  285. "ibmvtpm_crq_get_version failed rc=%d\n", rc);
  286. return rc;
  287. }
  288. /**
  289. * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
  290. * @ibmvtpm: vtpm device struct
  291. *
  292. * Return:
  293. * 0 on success.
  294. * Non-zero on failure.
  295. */
  296. static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
  297. {
  298. int rc;
  299. rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
  300. if (rc != H_SUCCESS)
  301. dev_err(ibmvtpm->dev,
  302. "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
  303. return rc;
  304. }
  305. /**
  306. * tpm_ibmvtpm_remove - ibm vtpm remove entry point
  307. * @vdev: vio device struct
  308. *
  309. * Return: Always 0.
  310. */
  311. static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
  312. {
  313. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  314. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  315. int rc = 0;
  316. tpm_chip_unregister(chip);
  317. free_irq(vdev->irq, ibmvtpm);
  318. do {
  319. if (rc)
  320. msleep(100);
  321. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  322. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  323. dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
  324. CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
  325. free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
  326. if (ibmvtpm->rtce_buf) {
  327. dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
  328. ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
  329. kfree(ibmvtpm->rtce_buf);
  330. }
  331. kfree(ibmvtpm);
  332. /* For tpm_ibmvtpm_get_desired_dma */
  333. dev_set_drvdata(&vdev->dev, NULL);
  334. return 0;
  335. }
  336. /**
  337. * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
  338. * @vdev: vio device struct
  339. *
  340. * Return:
  341. * Number of bytes the driver needs to DMA map.
  342. */
  343. static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
  344. {
  345. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  346. struct ibmvtpm_dev *ibmvtpm;
  347. /*
  348. * ibmvtpm initializes at probe time, so the data we are
  349. * asking for may not be set yet. Estimate that 4K required
  350. * for TCE-mapped buffer in addition to CRQ.
  351. */
  352. if (chip)
  353. ibmvtpm = dev_get_drvdata(&chip->dev);
  354. else
  355. return CRQ_RES_BUF_SIZE + PAGE_SIZE;
  356. return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
  357. }
  358. /**
  359. * tpm_ibmvtpm_suspend - Suspend
  360. * @dev: device struct
  361. *
  362. * Return: Always 0.
  363. */
  364. static int tpm_ibmvtpm_suspend(struct device *dev)
  365. {
  366. struct tpm_chip *chip = dev_get_drvdata(dev);
  367. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  368. int rc = 0;
  369. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  370. IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
  371. if (rc != H_SUCCESS)
  372. dev_err(ibmvtpm->dev,
  373. "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
  374. return rc;
  375. }
  376. /**
  377. * ibmvtpm_reset_crq - Reset CRQ
  378. *
  379. * @ibmvtpm: ibm vtpm struct
  380. *
  381. * Return:
  382. * 0 on success.
  383. * Non-zero on failure.
  384. */
  385. static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
  386. {
  387. int rc = 0;
  388. do {
  389. if (rc)
  390. msleep(100);
  391. rc = plpar_hcall_norets(H_FREE_CRQ,
  392. ibmvtpm->vdev->unit_address);
  393. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  394. memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
  395. ibmvtpm->crq_queue.index = 0;
  396. return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
  397. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  398. }
  399. static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
  400. {
  401. return (status == 0);
  402. }
  403. static const struct tpm_class_ops tpm_ibmvtpm = {
  404. .recv = tpm_ibmvtpm_recv,
  405. .send = tpm_ibmvtpm_send,
  406. .cancel = tpm_ibmvtpm_cancel,
  407. .status = tpm_ibmvtpm_status,
  408. .req_complete_mask = 0,
  409. .req_complete_val = 0,
  410. .req_canceled = tpm_ibmvtpm_req_canceled,
  411. };
  412. static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
  413. .suspend = tpm_ibmvtpm_suspend,
  414. .resume = tpm_ibmvtpm_resume,
  415. };
  416. /**
  417. * ibmvtpm_crq_get_next - Get next responded crq
  418. *
  419. * @ibmvtpm: vtpm device struct
  420. *
  421. * Return: vtpm crq pointer or NULL.
  422. */
  423. static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
  424. {
  425. struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
  426. struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
  427. if (crq->valid & VTPM_MSG_RES) {
  428. if (++crq_q->index == crq_q->num_entry)
  429. crq_q->index = 0;
  430. smp_rmb();
  431. } else
  432. crq = NULL;
  433. return crq;
  434. }
  435. /**
  436. * ibmvtpm_crq_process - Process responded crq
  437. *
  438. * @crq: crq to be processed
  439. * @ibmvtpm: vtpm device struct
  440. *
  441. */
  442. static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
  443. struct ibmvtpm_dev *ibmvtpm)
  444. {
  445. int rc = 0;
  446. switch (crq->valid) {
  447. case VALID_INIT_CRQ:
  448. switch (crq->msg) {
  449. case INIT_CRQ_RES:
  450. dev_info(ibmvtpm->dev, "CRQ initialized\n");
  451. rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
  452. if (rc)
  453. dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
  454. return;
  455. case INIT_CRQ_COMP_RES:
  456. dev_info(ibmvtpm->dev,
  457. "CRQ initialization completed\n");
  458. return;
  459. default:
  460. dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
  461. return;
  462. }
  463. case IBMVTPM_VALID_CMD:
  464. switch (crq->msg) {
  465. case VTPM_GET_RTCE_BUFFER_SIZE_RES:
  466. if (be16_to_cpu(crq->len) <= 0) {
  467. dev_err(ibmvtpm->dev, "Invalid rtce size\n");
  468. return;
  469. }
  470. ibmvtpm->rtce_size = be16_to_cpu(crq->len);
  471. ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
  472. GFP_ATOMIC);
  473. if (!ibmvtpm->rtce_buf) {
  474. dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
  475. return;
  476. }
  477. ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
  478. ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
  479. DMA_BIDIRECTIONAL);
  480. if (dma_mapping_error(ibmvtpm->dev,
  481. ibmvtpm->rtce_dma_handle)) {
  482. kfree(ibmvtpm->rtce_buf);
  483. ibmvtpm->rtce_buf = NULL;
  484. dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
  485. }
  486. return;
  487. case VTPM_GET_VERSION_RES:
  488. ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
  489. return;
  490. case VTPM_TPM_COMMAND_RES:
  491. /* len of the data in rtce buffer */
  492. ibmvtpm->res_len = be16_to_cpu(crq->len);
  493. ibmvtpm->tpm_processing_cmd = false;
  494. wake_up_interruptible(&ibmvtpm->wq);
  495. return;
  496. default:
  497. return;
  498. }
  499. }
  500. return;
  501. }
  502. /**
  503. * ibmvtpm_interrupt - Interrupt handler
  504. *
  505. * @irq: irq number to handle
  506. * @vtpm_instance: vtpm that received interrupt
  507. *
  508. * Returns:
  509. * IRQ_HANDLED
  510. **/
  511. static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
  512. {
  513. struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
  514. struct ibmvtpm_crq *crq;
  515. /* while loop is needed for initial setup (get version and
  516. * get rtce_size). There should be only one tpm request at any
  517. * given time.
  518. */
  519. while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
  520. ibmvtpm_crq_process(crq, ibmvtpm);
  521. wake_up_interruptible(&ibmvtpm->crq_queue.wq);
  522. crq->valid = 0;
  523. smp_wmb();
  524. }
  525. return IRQ_HANDLED;
  526. }
  527. /**
  528. * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
  529. *
  530. * @vio_dev: vio device struct
  531. * @id: vio device id struct
  532. *
  533. * Return:
  534. * 0 on success.
  535. * Non-zero on failure.
  536. */
  537. static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
  538. const struct vio_device_id *id)
  539. {
  540. struct ibmvtpm_dev *ibmvtpm;
  541. struct device *dev = &vio_dev->dev;
  542. struct ibmvtpm_crq_queue *crq_q;
  543. struct tpm_chip *chip;
  544. int rc = -ENOMEM, rc1;
  545. chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
  546. if (IS_ERR(chip))
  547. return PTR_ERR(chip);
  548. ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
  549. if (!ibmvtpm) {
  550. dev_err(dev, "kzalloc for ibmvtpm failed\n");
  551. goto cleanup;
  552. }
  553. ibmvtpm->dev = dev;
  554. ibmvtpm->vdev = vio_dev;
  555. crq_q = &ibmvtpm->crq_queue;
  556. crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
  557. if (!crq_q->crq_addr) {
  558. dev_err(dev, "Unable to allocate memory for crq_addr\n");
  559. goto cleanup;
  560. }
  561. crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
  562. init_waitqueue_head(&crq_q->wq);
  563. ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
  564. CRQ_RES_BUF_SIZE,
  565. DMA_BIDIRECTIONAL);
  566. if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
  567. dev_err(dev, "dma mapping failed\n");
  568. goto cleanup;
  569. }
  570. rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
  571. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  572. if (rc == H_RESOURCE)
  573. rc = ibmvtpm_reset_crq(ibmvtpm);
  574. if (rc) {
  575. dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
  576. goto reg_crq_cleanup;
  577. }
  578. rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
  579. tpm_ibmvtpm_driver_name, ibmvtpm);
  580. if (rc) {
  581. dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
  582. goto init_irq_cleanup;
  583. }
  584. rc = vio_enable_interrupts(vio_dev);
  585. if (rc) {
  586. dev_err(dev, "Error %d enabling interrupts\n", rc);
  587. goto init_irq_cleanup;
  588. }
  589. init_waitqueue_head(&ibmvtpm->wq);
  590. crq_q->index = 0;
  591. dev_set_drvdata(&chip->dev, ibmvtpm);
  592. spin_lock_init(&ibmvtpm->rtce_lock);
  593. rc = ibmvtpm_crq_send_init(ibmvtpm);
  594. if (rc)
  595. goto init_irq_cleanup;
  596. rc = ibmvtpm_crq_get_version(ibmvtpm);
  597. if (rc)
  598. goto init_irq_cleanup;
  599. rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
  600. if (rc)
  601. goto init_irq_cleanup;
  602. if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
  603. ibmvtpm->rtce_buf != NULL,
  604. HZ)) {
  605. dev_err(dev, "CRQ response timed out\n");
  606. goto init_irq_cleanup;
  607. }
  608. return tpm_chip_register(chip);
  609. init_irq_cleanup:
  610. do {
  611. rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
  612. } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
  613. reg_crq_cleanup:
  614. dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
  615. DMA_BIDIRECTIONAL);
  616. cleanup:
  617. if (ibmvtpm) {
  618. if (crq_q->crq_addr)
  619. free_page((unsigned long)crq_q->crq_addr);
  620. kfree(ibmvtpm);
  621. }
  622. return rc;
  623. }
  624. static struct vio_driver ibmvtpm_driver = {
  625. .id_table = tpm_ibmvtpm_device_table,
  626. .probe = tpm_ibmvtpm_probe,
  627. .remove = tpm_ibmvtpm_remove,
  628. .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
  629. .name = tpm_ibmvtpm_driver_name,
  630. .pm = &tpm_ibmvtpm_pm_ops,
  631. };
  632. /**
  633. * ibmvtpm_module_init - Initialize ibm vtpm module.
  634. *
  635. *
  636. * Return:
  637. * 0 on success.
  638. * Non-zero on failure.
  639. */
  640. static int __init ibmvtpm_module_init(void)
  641. {
  642. return vio_register_driver(&ibmvtpm_driver);
  643. }
  644. /**
  645. * ibmvtpm_module_exit - Tear down ibm vtpm module.
  646. */
  647. static void __exit ibmvtpm_module_exit(void)
  648. {
  649. vio_unregister_driver(&ibmvtpm_driver);
  650. }
  651. module_init(ibmvtpm_module_init);
  652. module_exit(ibmvtpm_module_exit);
  653. MODULE_AUTHOR("adlai@us.ibm.com");
  654. MODULE_DESCRIPTION("IBM vTPM Driver");
  655. MODULE_VERSION("1.0");
  656. MODULE_LICENSE("GPL");