tpm-dev-common.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004 IBM Corporation
  4. * Authors:
  5. * Leendert van Doorn <leendert@watson.ibm.com>
  6. * Dave Safford <safford@watson.ibm.com>
  7. * Reiner Sailer <sailer@watson.ibm.com>
  8. * Kylene Hall <kjhall@us.ibm.com>
  9. *
  10. * Copyright (C) 2013 Obsidian Research Corp
  11. * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
  12. *
  13. * Device file system interface to the TPM
  14. */
  15. #include <linux/poll.h>
  16. #include <linux/slab.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/workqueue.h>
  19. #include "tpm.h"
  20. #include "tpm-dev.h"
  21. static struct workqueue_struct *tpm_dev_wq;
  22. static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
  23. u8 *buf, size_t bufsiz)
  24. {
  25. struct tpm_header *header = (void *)buf;
  26. ssize_t ret, len;
  27. if (chip->flags & TPM_CHIP_FLAG_TPM2)
  28. tpm2_end_auth_session(chip);
  29. ret = tpm2_prepare_space(chip, space, buf, bufsiz);
  30. /* If the command is not implemented by the TPM, synthesize a
  31. * response with a TPM2_RC_COMMAND_CODE return for user-space.
  32. */
  33. if (ret == -EOPNOTSUPP) {
  34. header->length = cpu_to_be32(sizeof(*header));
  35. header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
  36. header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
  37. TSS2_RESMGR_TPM_RC_LAYER);
  38. ret = sizeof(*header);
  39. }
  40. if (ret)
  41. goto out_rc;
  42. len = tpm_transmit(chip, buf, bufsiz);
  43. if (len < 0)
  44. ret = len;
  45. if (!ret)
  46. ret = tpm2_commit_space(chip, space, buf, &len);
  47. else
  48. tpm2_flush_space(chip);
  49. out_rc:
  50. return ret ? ret : len;
  51. }
  52. static void tpm_dev_async_work(struct work_struct *work)
  53. {
  54. struct file_priv *priv =
  55. container_of(work, struct file_priv, async_work);
  56. ssize_t ret;
  57. mutex_lock(&priv->buffer_mutex);
  58. priv->command_enqueued = false;
  59. ret = tpm_try_get_ops(priv->chip);
  60. if (ret) {
  61. priv->response_length = ret;
  62. goto out;
  63. }
  64. ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
  65. sizeof(priv->data_buffer));
  66. tpm_put_ops(priv->chip);
  67. /*
  68. * If ret is > 0 then tpm_dev_transmit returned the size of the
  69. * response. If ret is < 0 then tpm_dev_transmit failed and
  70. * returned an error code.
  71. */
  72. if (ret != 0) {
  73. priv->response_length = ret;
  74. mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
  75. }
  76. out:
  77. mutex_unlock(&priv->buffer_mutex);
  78. wake_up_interruptible(&priv->async_wait);
  79. }
  80. static void user_reader_timeout(struct timer_list *t)
  81. {
  82. struct file_priv *priv = from_timer(priv, t, user_read_timer);
  83. pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
  84. task_tgid_nr(current));
  85. schedule_work(&priv->timeout_work);
  86. }
  87. static void tpm_timeout_work(struct work_struct *work)
  88. {
  89. struct file_priv *priv = container_of(work, struct file_priv,
  90. timeout_work);
  91. mutex_lock(&priv->buffer_mutex);
  92. priv->response_read = true;
  93. priv->response_length = 0;
  94. memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
  95. mutex_unlock(&priv->buffer_mutex);
  96. wake_up_interruptible(&priv->async_wait);
  97. }
  98. void tpm_common_open(struct file *file, struct tpm_chip *chip,
  99. struct file_priv *priv, struct tpm_space *space)
  100. {
  101. priv->chip = chip;
  102. priv->space = space;
  103. priv->response_read = true;
  104. mutex_init(&priv->buffer_mutex);
  105. timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
  106. INIT_WORK(&priv->timeout_work, tpm_timeout_work);
  107. INIT_WORK(&priv->async_work, tpm_dev_async_work);
  108. init_waitqueue_head(&priv->async_wait);
  109. file->private_data = priv;
  110. }
  111. ssize_t tpm_common_read(struct file *file, char __user *buf,
  112. size_t size, loff_t *off)
  113. {
  114. struct file_priv *priv = file->private_data;
  115. ssize_t ret_size = 0;
  116. int rc;
  117. mutex_lock(&priv->buffer_mutex);
  118. if (priv->response_length) {
  119. priv->response_read = true;
  120. ret_size = min_t(ssize_t, size, priv->response_length);
  121. if (ret_size <= 0) {
  122. priv->response_length = 0;
  123. goto out;
  124. }
  125. rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
  126. if (rc) {
  127. memset(priv->data_buffer, 0, TPM_BUFSIZE);
  128. priv->response_length = 0;
  129. ret_size = -EFAULT;
  130. } else {
  131. memset(priv->data_buffer + *off, 0, ret_size);
  132. priv->response_length -= ret_size;
  133. *off += ret_size;
  134. }
  135. }
  136. out:
  137. if (!priv->response_length) {
  138. *off = 0;
  139. del_timer_sync(&priv->user_read_timer);
  140. flush_work(&priv->timeout_work);
  141. }
  142. mutex_unlock(&priv->buffer_mutex);
  143. return ret_size;
  144. }
  145. ssize_t tpm_common_write(struct file *file, const char __user *buf,
  146. size_t size, loff_t *off)
  147. {
  148. struct file_priv *priv = file->private_data;
  149. int ret = 0;
  150. if (size > TPM_BUFSIZE)
  151. return -E2BIG;
  152. mutex_lock(&priv->buffer_mutex);
  153. /* Cannot perform a write until the read has cleared either via
  154. * tpm_read or a user_read_timer timeout. This also prevents split
  155. * buffered writes from blocking here.
  156. */
  157. if ((!priv->response_read && priv->response_length) ||
  158. priv->command_enqueued) {
  159. ret = -EBUSY;
  160. goto out;
  161. }
  162. if (copy_from_user(priv->data_buffer, buf, size)) {
  163. ret = -EFAULT;
  164. goto out;
  165. }
  166. if (size < 6 ||
  167. size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
  168. ret = -EINVAL;
  169. goto out;
  170. }
  171. priv->response_length = 0;
  172. priv->response_read = false;
  173. *off = 0;
  174. /*
  175. * If in nonblocking mode schedule an async job to send
  176. * the command return the size.
  177. * In case of error the err code will be returned in
  178. * the subsequent read call.
  179. */
  180. if (file->f_flags & O_NONBLOCK) {
  181. priv->command_enqueued = true;
  182. queue_work(tpm_dev_wq, &priv->async_work);
  183. mutex_unlock(&priv->buffer_mutex);
  184. return size;
  185. }
  186. /* atomic tpm command send and result receive. We only hold the ops
  187. * lock during this period so that the tpm can be unregistered even if
  188. * the char dev is held open.
  189. */
  190. if (tpm_try_get_ops(priv->chip)) {
  191. ret = -EPIPE;
  192. goto out;
  193. }
  194. ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
  195. sizeof(priv->data_buffer));
  196. tpm_put_ops(priv->chip);
  197. if (ret > 0) {
  198. priv->response_length = ret;
  199. mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
  200. ret = size;
  201. }
  202. out:
  203. mutex_unlock(&priv->buffer_mutex);
  204. return ret;
  205. }
  206. __poll_t tpm_common_poll(struct file *file, poll_table *wait)
  207. {
  208. struct file_priv *priv = file->private_data;
  209. __poll_t mask = 0;
  210. poll_wait(file, &priv->async_wait, wait);
  211. mutex_lock(&priv->buffer_mutex);
  212. /*
  213. * The response_length indicates if there is still response
  214. * (or part of it) to be consumed. Partial reads decrease it
  215. * by the number of bytes read, and write resets it the zero.
  216. */
  217. if (priv->response_length)
  218. mask = EPOLLIN | EPOLLRDNORM;
  219. else
  220. mask = EPOLLOUT | EPOLLWRNORM;
  221. mutex_unlock(&priv->buffer_mutex);
  222. return mask;
  223. }
  224. /*
  225. * Called on file close
  226. */
  227. void tpm_common_release(struct file *file, struct file_priv *priv)
  228. {
  229. flush_work(&priv->async_work);
  230. del_timer_sync(&priv->user_read_timer);
  231. flush_work(&priv->timeout_work);
  232. file->private_data = NULL;
  233. priv->response_length = 0;
  234. }
  235. int __init tpm_dev_common_init(void)
  236. {
  237. tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
  238. return !tpm_dev_wq ? -ENOMEM : 0;
  239. }
  240. void __exit tpm_dev_common_exit(void)
  241. {
  242. if (tpm_dev_wq) {
  243. destroy_workqueue(tpm_dev_wq);
  244. tpm_dev_wq = NULL;
  245. }
  246. }