tee_core.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2016, Linaro Limited
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/cdev.h>
  7. #include <linux/cred.h>
  8. #include <linux/fs.h>
  9. #include <linux/idr.h>
  10. #include <linux/module.h>
  11. #include <linux/overflow.h>
  12. #include <linux/slab.h>
  13. #include <linux/tee_core.h>
  14. #include <linux/uaccess.h>
  15. #include <crypto/hash.h>
  16. #include <crypto/sha1.h>
  17. #include "tee_private.h"
  18. #define TEE_NUM_DEVICES 32
  19. #define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
  20. #define TEE_UUID_NS_NAME_SIZE 128
  21. /*
  22. * TEE Client UUID name space identifier (UUIDv4)
  23. *
  24. * Value here is random UUID that is allocated as name space identifier for
  25. * forming Client UUID's for TEE environment using UUIDv5 scheme.
  26. */
  27. static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
  28. 0xa1, 0xb8, 0xec, 0x4b,
  29. 0xc0, 0x8e, 0x01, 0xb6);
  30. /*
  31. * Unprivileged devices in the lower half range and privileged devices in
  32. * the upper half range.
  33. */
  34. static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
  35. static DEFINE_SPINLOCK(driver_lock);
  36. static const struct class tee_class;
  37. static dev_t tee_devt;
  38. struct tee_context *teedev_open(struct tee_device *teedev)
  39. {
  40. int rc;
  41. struct tee_context *ctx;
  42. if (!tee_device_get(teedev))
  43. return ERR_PTR(-EINVAL);
  44. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  45. if (!ctx) {
  46. rc = -ENOMEM;
  47. goto err;
  48. }
  49. kref_init(&ctx->refcount);
  50. ctx->teedev = teedev;
  51. rc = teedev->desc->ops->open(ctx);
  52. if (rc)
  53. goto err;
  54. return ctx;
  55. err:
  56. kfree(ctx);
  57. tee_device_put(teedev);
  58. return ERR_PTR(rc);
  59. }
  60. EXPORT_SYMBOL_GPL(teedev_open);
  61. void teedev_ctx_get(struct tee_context *ctx)
  62. {
  63. if (ctx->releasing)
  64. return;
  65. kref_get(&ctx->refcount);
  66. }
  67. static void teedev_ctx_release(struct kref *ref)
  68. {
  69. struct tee_context *ctx = container_of(ref, struct tee_context,
  70. refcount);
  71. ctx->releasing = true;
  72. ctx->teedev->desc->ops->release(ctx);
  73. kfree(ctx);
  74. }
  75. void teedev_ctx_put(struct tee_context *ctx)
  76. {
  77. if (ctx->releasing)
  78. return;
  79. kref_put(&ctx->refcount, teedev_ctx_release);
  80. }
  81. void teedev_close_context(struct tee_context *ctx)
  82. {
  83. struct tee_device *teedev = ctx->teedev;
  84. teedev_ctx_put(ctx);
  85. tee_device_put(teedev);
  86. }
  87. EXPORT_SYMBOL_GPL(teedev_close_context);
  88. static int tee_open(struct inode *inode, struct file *filp)
  89. {
  90. struct tee_context *ctx;
  91. ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
  92. if (IS_ERR(ctx))
  93. return PTR_ERR(ctx);
  94. /*
  95. * Default user-space behaviour is to wait for tee-supplicant
  96. * if not present for any requests in this context.
  97. */
  98. ctx->supp_nowait = false;
  99. filp->private_data = ctx;
  100. return 0;
  101. }
  102. static int tee_release(struct inode *inode, struct file *filp)
  103. {
  104. teedev_close_context(filp->private_data);
  105. return 0;
  106. }
  107. /**
  108. * uuid_v5() - Calculate UUIDv5
  109. * @uuid: Resulting UUID
  110. * @ns: Name space ID for UUIDv5 function
  111. * @name: Name for UUIDv5 function
  112. * @size: Size of name
  113. *
  114. * UUIDv5 is specific in RFC 4122.
  115. *
  116. * This implements section (for SHA-1):
  117. * 4.3. Algorithm for Creating a Name-Based UUID
  118. */
  119. static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
  120. size_t size)
  121. {
  122. unsigned char hash[SHA1_DIGEST_SIZE];
  123. struct crypto_shash *shash = NULL;
  124. struct shash_desc *desc = NULL;
  125. int rc;
  126. shash = crypto_alloc_shash("sha1", 0, 0);
  127. if (IS_ERR(shash)) {
  128. rc = PTR_ERR(shash);
  129. pr_err("shash(sha1) allocation failed\n");
  130. return rc;
  131. }
  132. desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
  133. GFP_KERNEL);
  134. if (!desc) {
  135. rc = -ENOMEM;
  136. goto out_free_shash;
  137. }
  138. desc->tfm = shash;
  139. rc = crypto_shash_init(desc);
  140. if (rc < 0)
  141. goto out_free_desc;
  142. rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
  143. if (rc < 0)
  144. goto out_free_desc;
  145. rc = crypto_shash_update(desc, (const u8 *)name, size);
  146. if (rc < 0)
  147. goto out_free_desc;
  148. rc = crypto_shash_final(desc, hash);
  149. if (rc < 0)
  150. goto out_free_desc;
  151. memcpy(uuid->b, hash, UUID_SIZE);
  152. /* Tag for version 5 */
  153. uuid->b[6] = (hash[6] & 0x0F) | 0x50;
  154. uuid->b[8] = (hash[8] & 0x3F) | 0x80;
  155. out_free_desc:
  156. kfree(desc);
  157. out_free_shash:
  158. crypto_free_shash(shash);
  159. return rc;
  160. }
  161. int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
  162. const u8 connection_data[TEE_IOCTL_UUID_LEN])
  163. {
  164. gid_t ns_grp = (gid_t)-1;
  165. kgid_t grp = INVALID_GID;
  166. char *name = NULL;
  167. int name_len;
  168. int rc;
  169. if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
  170. connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
  171. /* Nil UUID to be passed to TEE environment */
  172. uuid_copy(uuid, &uuid_null);
  173. return 0;
  174. }
  175. /*
  176. * In Linux environment client UUID is based on UUIDv5.
  177. *
  178. * Determine client UUID with following semantics for 'name':
  179. *
  180. * For TEEC_LOGIN_USER:
  181. * uid=<uid>
  182. *
  183. * For TEEC_LOGIN_GROUP:
  184. * gid=<gid>
  185. *
  186. */
  187. name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
  188. if (!name)
  189. return -ENOMEM;
  190. switch (connection_method) {
  191. case TEE_IOCTL_LOGIN_USER:
  192. name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
  193. current_euid().val);
  194. if (name_len >= TEE_UUID_NS_NAME_SIZE) {
  195. rc = -E2BIG;
  196. goto out_free_name;
  197. }
  198. break;
  199. case TEE_IOCTL_LOGIN_GROUP:
  200. memcpy(&ns_grp, connection_data, sizeof(gid_t));
  201. grp = make_kgid(current_user_ns(), ns_grp);
  202. if (!gid_valid(grp) || !in_egroup_p(grp)) {
  203. rc = -EPERM;
  204. goto out_free_name;
  205. }
  206. name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
  207. grp.val);
  208. if (name_len >= TEE_UUID_NS_NAME_SIZE) {
  209. rc = -E2BIG;
  210. goto out_free_name;
  211. }
  212. break;
  213. default:
  214. rc = -EINVAL;
  215. goto out_free_name;
  216. }
  217. rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
  218. out_free_name:
  219. kfree(name);
  220. return rc;
  221. }
  222. EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
  223. static int tee_ioctl_version(struct tee_context *ctx,
  224. struct tee_ioctl_version_data __user *uvers)
  225. {
  226. struct tee_ioctl_version_data vers;
  227. ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
  228. if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
  229. vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
  230. if (copy_to_user(uvers, &vers, sizeof(vers)))
  231. return -EFAULT;
  232. return 0;
  233. }
  234. static int tee_ioctl_shm_alloc(struct tee_context *ctx,
  235. struct tee_ioctl_shm_alloc_data __user *udata)
  236. {
  237. long ret;
  238. struct tee_ioctl_shm_alloc_data data;
  239. struct tee_shm *shm;
  240. if (copy_from_user(&data, udata, sizeof(data)))
  241. return -EFAULT;
  242. /* Currently no input flags are supported */
  243. if (data.flags)
  244. return -EINVAL;
  245. shm = tee_shm_alloc_user_buf(ctx, data.size);
  246. if (IS_ERR(shm))
  247. return PTR_ERR(shm);
  248. data.id = shm->id;
  249. data.size = shm->size;
  250. if (copy_to_user(udata, &data, sizeof(data)))
  251. ret = -EFAULT;
  252. else
  253. ret = tee_shm_get_fd(shm);
  254. /*
  255. * When user space closes the file descriptor the shared memory
  256. * should be freed or if tee_shm_get_fd() failed then it will
  257. * be freed immediately.
  258. */
  259. tee_shm_put(shm);
  260. return ret;
  261. }
  262. static int
  263. tee_ioctl_shm_register(struct tee_context *ctx,
  264. struct tee_ioctl_shm_register_data __user *udata)
  265. {
  266. long ret;
  267. struct tee_ioctl_shm_register_data data;
  268. struct tee_shm *shm;
  269. if (copy_from_user(&data, udata, sizeof(data)))
  270. return -EFAULT;
  271. /* Currently no input flags are supported */
  272. if (data.flags)
  273. return -EINVAL;
  274. shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
  275. if (IS_ERR(shm))
  276. return PTR_ERR(shm);
  277. data.id = shm->id;
  278. data.length = shm->size;
  279. if (copy_to_user(udata, &data, sizeof(data)))
  280. ret = -EFAULT;
  281. else
  282. ret = tee_shm_get_fd(shm);
  283. /*
  284. * When user space closes the file descriptor the shared memory
  285. * should be freed or if tee_shm_get_fd() failed then it will
  286. * be freed immediately.
  287. */
  288. tee_shm_put(shm);
  289. return ret;
  290. }
  291. static int params_from_user(struct tee_context *ctx, struct tee_param *params,
  292. size_t num_params,
  293. struct tee_ioctl_param __user *uparams)
  294. {
  295. size_t n;
  296. for (n = 0; n < num_params; n++) {
  297. struct tee_shm *shm;
  298. struct tee_ioctl_param ip;
  299. if (copy_from_user(&ip, uparams + n, sizeof(ip)))
  300. return -EFAULT;
  301. /* All unused attribute bits has to be zero */
  302. if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
  303. return -EINVAL;
  304. params[n].attr = ip.attr;
  305. switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
  306. case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
  307. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  308. break;
  309. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
  310. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  311. params[n].u.value.a = ip.a;
  312. params[n].u.value.b = ip.b;
  313. params[n].u.value.c = ip.c;
  314. break;
  315. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
  316. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  317. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  318. /*
  319. * If a NULL pointer is passed to a TA in the TEE,
  320. * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
  321. * indicating a NULL memory reference.
  322. */
  323. if (ip.c != TEE_MEMREF_NULL) {
  324. /*
  325. * If we fail to get a pointer to a shared
  326. * memory object (and increase the ref count)
  327. * from an identifier we return an error. All
  328. * pointers that has been added in params have
  329. * an increased ref count. It's the callers
  330. * responibility to do tee_shm_put() on all
  331. * resolved pointers.
  332. */
  333. shm = tee_shm_get_from_id(ctx, ip.c);
  334. if (IS_ERR(shm))
  335. return PTR_ERR(shm);
  336. /*
  337. * Ensure offset + size does not overflow
  338. * offset and does not overflow the size of
  339. * the referred shared memory object.
  340. */
  341. if ((ip.a + ip.b) < ip.a ||
  342. (ip.a + ip.b) > shm->size) {
  343. tee_shm_put(shm);
  344. return -EINVAL;
  345. }
  346. } else if (ctx->cap_memref_null) {
  347. /* Pass NULL pointer to OP-TEE */
  348. shm = NULL;
  349. } else {
  350. return -EINVAL;
  351. }
  352. params[n].u.memref.shm_offs = ip.a;
  353. params[n].u.memref.size = ip.b;
  354. params[n].u.memref.shm = shm;
  355. break;
  356. default:
  357. /* Unknown attribute */
  358. return -EINVAL;
  359. }
  360. }
  361. return 0;
  362. }
  363. static int params_to_user(struct tee_ioctl_param __user *uparams,
  364. size_t num_params, struct tee_param *params)
  365. {
  366. size_t n;
  367. for (n = 0; n < num_params; n++) {
  368. struct tee_ioctl_param __user *up = uparams + n;
  369. struct tee_param *p = params + n;
  370. switch (p->attr) {
  371. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  372. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  373. if (put_user(p->u.value.a, &up->a) ||
  374. put_user(p->u.value.b, &up->b) ||
  375. put_user(p->u.value.c, &up->c))
  376. return -EFAULT;
  377. break;
  378. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  379. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  380. if (put_user((u64)p->u.memref.size, &up->b))
  381. return -EFAULT;
  382. break;
  383. default:
  384. break;
  385. }
  386. }
  387. return 0;
  388. }
  389. static int tee_ioctl_open_session(struct tee_context *ctx,
  390. struct tee_ioctl_buf_data __user *ubuf)
  391. {
  392. int rc;
  393. size_t n;
  394. struct tee_ioctl_buf_data buf;
  395. struct tee_ioctl_open_session_arg __user *uarg;
  396. struct tee_ioctl_open_session_arg arg;
  397. struct tee_ioctl_param __user *uparams = NULL;
  398. struct tee_param *params = NULL;
  399. bool have_session = false;
  400. if (!ctx->teedev->desc->ops->open_session)
  401. return -EINVAL;
  402. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  403. return -EFAULT;
  404. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  405. buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
  406. return -EINVAL;
  407. uarg = u64_to_user_ptr(buf.buf_ptr);
  408. if (copy_from_user(&arg, uarg, sizeof(arg)))
  409. return -EFAULT;
  410. if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
  411. return -EINVAL;
  412. if (arg.num_params) {
  413. params = kcalloc(arg.num_params, sizeof(struct tee_param),
  414. GFP_KERNEL);
  415. if (!params)
  416. return -ENOMEM;
  417. uparams = uarg->params;
  418. rc = params_from_user(ctx, params, arg.num_params, uparams);
  419. if (rc)
  420. goto out;
  421. }
  422. if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
  423. arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
  424. pr_debug("login method not allowed for user-space client\n");
  425. rc = -EPERM;
  426. goto out;
  427. }
  428. rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
  429. if (rc)
  430. goto out;
  431. have_session = true;
  432. if (put_user(arg.session, &uarg->session) ||
  433. put_user(arg.ret, &uarg->ret) ||
  434. put_user(arg.ret_origin, &uarg->ret_origin)) {
  435. rc = -EFAULT;
  436. goto out;
  437. }
  438. rc = params_to_user(uparams, arg.num_params, params);
  439. out:
  440. /*
  441. * If we've succeeded to open the session but failed to communicate
  442. * it back to user space, close the session again to avoid leakage.
  443. */
  444. if (rc && have_session && ctx->teedev->desc->ops->close_session)
  445. ctx->teedev->desc->ops->close_session(ctx, arg.session);
  446. if (params) {
  447. /* Decrease ref count for all valid shared memory pointers */
  448. for (n = 0; n < arg.num_params; n++)
  449. if (tee_param_is_memref(params + n) &&
  450. params[n].u.memref.shm)
  451. tee_shm_put(params[n].u.memref.shm);
  452. kfree(params);
  453. }
  454. return rc;
  455. }
  456. static int tee_ioctl_invoke(struct tee_context *ctx,
  457. struct tee_ioctl_buf_data __user *ubuf)
  458. {
  459. int rc;
  460. size_t n;
  461. struct tee_ioctl_buf_data buf;
  462. struct tee_ioctl_invoke_arg __user *uarg;
  463. struct tee_ioctl_invoke_arg arg;
  464. struct tee_ioctl_param __user *uparams = NULL;
  465. struct tee_param *params = NULL;
  466. if (!ctx->teedev->desc->ops->invoke_func)
  467. return -EINVAL;
  468. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  469. return -EFAULT;
  470. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  471. buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
  472. return -EINVAL;
  473. uarg = u64_to_user_ptr(buf.buf_ptr);
  474. if (copy_from_user(&arg, uarg, sizeof(arg)))
  475. return -EFAULT;
  476. if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
  477. return -EINVAL;
  478. if (arg.num_params) {
  479. params = kcalloc(arg.num_params, sizeof(struct tee_param),
  480. GFP_KERNEL);
  481. if (!params)
  482. return -ENOMEM;
  483. uparams = uarg->params;
  484. rc = params_from_user(ctx, params, arg.num_params, uparams);
  485. if (rc)
  486. goto out;
  487. }
  488. rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
  489. if (rc)
  490. goto out;
  491. if (put_user(arg.ret, &uarg->ret) ||
  492. put_user(arg.ret_origin, &uarg->ret_origin)) {
  493. rc = -EFAULT;
  494. goto out;
  495. }
  496. rc = params_to_user(uparams, arg.num_params, params);
  497. out:
  498. if (params) {
  499. /* Decrease ref count for all valid shared memory pointers */
  500. for (n = 0; n < arg.num_params; n++)
  501. if (tee_param_is_memref(params + n) &&
  502. params[n].u.memref.shm)
  503. tee_shm_put(params[n].u.memref.shm);
  504. kfree(params);
  505. }
  506. return rc;
  507. }
  508. static int tee_ioctl_cancel(struct tee_context *ctx,
  509. struct tee_ioctl_cancel_arg __user *uarg)
  510. {
  511. struct tee_ioctl_cancel_arg arg;
  512. if (!ctx->teedev->desc->ops->cancel_req)
  513. return -EINVAL;
  514. if (copy_from_user(&arg, uarg, sizeof(arg)))
  515. return -EFAULT;
  516. return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
  517. arg.session);
  518. }
  519. static int
  520. tee_ioctl_close_session(struct tee_context *ctx,
  521. struct tee_ioctl_close_session_arg __user *uarg)
  522. {
  523. struct tee_ioctl_close_session_arg arg;
  524. if (!ctx->teedev->desc->ops->close_session)
  525. return -EINVAL;
  526. if (copy_from_user(&arg, uarg, sizeof(arg)))
  527. return -EFAULT;
  528. return ctx->teedev->desc->ops->close_session(ctx, arg.session);
  529. }
  530. static int params_to_supp(struct tee_context *ctx,
  531. struct tee_ioctl_param __user *uparams,
  532. size_t num_params, struct tee_param *params)
  533. {
  534. size_t n;
  535. for (n = 0; n < num_params; n++) {
  536. struct tee_ioctl_param ip;
  537. struct tee_param *p = params + n;
  538. ip.attr = p->attr;
  539. switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
  540. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
  541. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  542. ip.a = p->u.value.a;
  543. ip.b = p->u.value.b;
  544. ip.c = p->u.value.c;
  545. break;
  546. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
  547. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  548. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  549. ip.b = p->u.memref.size;
  550. if (!p->u.memref.shm) {
  551. ip.a = 0;
  552. ip.c = (u64)-1; /* invalid shm id */
  553. break;
  554. }
  555. ip.a = p->u.memref.shm_offs;
  556. ip.c = p->u.memref.shm->id;
  557. break;
  558. default:
  559. ip.a = 0;
  560. ip.b = 0;
  561. ip.c = 0;
  562. break;
  563. }
  564. if (copy_to_user(uparams + n, &ip, sizeof(ip)))
  565. return -EFAULT;
  566. }
  567. return 0;
  568. }
  569. static int tee_ioctl_supp_recv(struct tee_context *ctx,
  570. struct tee_ioctl_buf_data __user *ubuf)
  571. {
  572. int rc;
  573. struct tee_ioctl_buf_data buf;
  574. struct tee_iocl_supp_recv_arg __user *uarg;
  575. struct tee_param *params;
  576. u32 num_params;
  577. u32 func;
  578. if (!ctx->teedev->desc->ops->supp_recv)
  579. return -EINVAL;
  580. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  581. return -EFAULT;
  582. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  583. buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
  584. return -EINVAL;
  585. uarg = u64_to_user_ptr(buf.buf_ptr);
  586. if (get_user(num_params, &uarg->num_params))
  587. return -EFAULT;
  588. if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
  589. return -EINVAL;
  590. params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
  591. if (!params)
  592. return -ENOMEM;
  593. rc = params_from_user(ctx, params, num_params, uarg->params);
  594. if (rc)
  595. goto out;
  596. rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
  597. if (rc)
  598. goto out;
  599. if (put_user(func, &uarg->func) ||
  600. put_user(num_params, &uarg->num_params)) {
  601. rc = -EFAULT;
  602. goto out;
  603. }
  604. rc = params_to_supp(ctx, uarg->params, num_params, params);
  605. out:
  606. kfree(params);
  607. return rc;
  608. }
  609. static int params_from_supp(struct tee_param *params, size_t num_params,
  610. struct tee_ioctl_param __user *uparams)
  611. {
  612. size_t n;
  613. for (n = 0; n < num_params; n++) {
  614. struct tee_param *p = params + n;
  615. struct tee_ioctl_param ip;
  616. if (copy_from_user(&ip, uparams + n, sizeof(ip)))
  617. return -EFAULT;
  618. /* All unused attribute bits has to be zero */
  619. if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
  620. return -EINVAL;
  621. p->attr = ip.attr;
  622. switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
  623. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  624. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  625. /* Only out and in/out values can be updated */
  626. p->u.value.a = ip.a;
  627. p->u.value.b = ip.b;
  628. p->u.value.c = ip.c;
  629. break;
  630. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  631. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  632. /*
  633. * Only the size of the memref can be updated.
  634. * Since we don't have access to the original
  635. * parameters here, only store the supplied size.
  636. * The driver will copy the updated size into the
  637. * original parameters.
  638. */
  639. p->u.memref.shm = NULL;
  640. p->u.memref.shm_offs = 0;
  641. p->u.memref.size = ip.b;
  642. break;
  643. default:
  644. memset(&p->u, 0, sizeof(p->u));
  645. break;
  646. }
  647. }
  648. return 0;
  649. }
  650. static int tee_ioctl_supp_send(struct tee_context *ctx,
  651. struct tee_ioctl_buf_data __user *ubuf)
  652. {
  653. long rc;
  654. struct tee_ioctl_buf_data buf;
  655. struct tee_iocl_supp_send_arg __user *uarg;
  656. struct tee_param *params;
  657. u32 num_params;
  658. u32 ret;
  659. /* Not valid for this driver */
  660. if (!ctx->teedev->desc->ops->supp_send)
  661. return -EINVAL;
  662. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  663. return -EFAULT;
  664. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  665. buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
  666. return -EINVAL;
  667. uarg = u64_to_user_ptr(buf.buf_ptr);
  668. if (get_user(ret, &uarg->ret) ||
  669. get_user(num_params, &uarg->num_params))
  670. return -EFAULT;
  671. if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
  672. return -EINVAL;
  673. params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
  674. if (!params)
  675. return -ENOMEM;
  676. rc = params_from_supp(params, num_params, uarg->params);
  677. if (rc)
  678. goto out;
  679. rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
  680. out:
  681. kfree(params);
  682. return rc;
  683. }
  684. static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  685. {
  686. struct tee_context *ctx = filp->private_data;
  687. void __user *uarg = (void __user *)arg;
  688. switch (cmd) {
  689. case TEE_IOC_VERSION:
  690. return tee_ioctl_version(ctx, uarg);
  691. case TEE_IOC_SHM_ALLOC:
  692. return tee_ioctl_shm_alloc(ctx, uarg);
  693. case TEE_IOC_SHM_REGISTER:
  694. return tee_ioctl_shm_register(ctx, uarg);
  695. case TEE_IOC_OPEN_SESSION:
  696. return tee_ioctl_open_session(ctx, uarg);
  697. case TEE_IOC_INVOKE:
  698. return tee_ioctl_invoke(ctx, uarg);
  699. case TEE_IOC_CANCEL:
  700. return tee_ioctl_cancel(ctx, uarg);
  701. case TEE_IOC_CLOSE_SESSION:
  702. return tee_ioctl_close_session(ctx, uarg);
  703. case TEE_IOC_SUPPL_RECV:
  704. return tee_ioctl_supp_recv(ctx, uarg);
  705. case TEE_IOC_SUPPL_SEND:
  706. return tee_ioctl_supp_send(ctx, uarg);
  707. default:
  708. return -EINVAL;
  709. }
  710. }
  711. static const struct file_operations tee_fops = {
  712. .owner = THIS_MODULE,
  713. .open = tee_open,
  714. .release = tee_release,
  715. .unlocked_ioctl = tee_ioctl,
  716. .compat_ioctl = compat_ptr_ioctl,
  717. };
  718. static void tee_release_device(struct device *dev)
  719. {
  720. struct tee_device *teedev = container_of(dev, struct tee_device, dev);
  721. spin_lock(&driver_lock);
  722. clear_bit(teedev->id, dev_mask);
  723. spin_unlock(&driver_lock);
  724. mutex_destroy(&teedev->mutex);
  725. idr_destroy(&teedev->idr);
  726. kfree(teedev);
  727. }
  728. /**
  729. * tee_device_alloc() - Allocate a new struct tee_device instance
  730. * @teedesc: Descriptor for this driver
  731. * @dev: Parent device for this device
  732. * @pool: Shared memory pool, NULL if not used
  733. * @driver_data: Private driver data for this device
  734. *
  735. * Allocates a new struct tee_device instance. The device is
  736. * removed by tee_device_unregister().
  737. *
  738. * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
  739. */
  740. struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
  741. struct device *dev,
  742. struct tee_shm_pool *pool,
  743. void *driver_data)
  744. {
  745. struct tee_device *teedev;
  746. void *ret;
  747. int rc, max_id;
  748. int offs = 0;
  749. if (!teedesc || !teedesc->name || !teedesc->ops ||
  750. !teedesc->ops->get_version || !teedesc->ops->open ||
  751. !teedesc->ops->release || !pool)
  752. return ERR_PTR(-EINVAL);
  753. teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
  754. if (!teedev) {
  755. ret = ERR_PTR(-ENOMEM);
  756. goto err;
  757. }
  758. max_id = TEE_NUM_DEVICES / 2;
  759. if (teedesc->flags & TEE_DESC_PRIVILEGED) {
  760. offs = TEE_NUM_DEVICES / 2;
  761. max_id = TEE_NUM_DEVICES;
  762. }
  763. spin_lock(&driver_lock);
  764. teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
  765. if (teedev->id < max_id)
  766. set_bit(teedev->id, dev_mask);
  767. spin_unlock(&driver_lock);
  768. if (teedev->id >= max_id) {
  769. ret = ERR_PTR(-ENOMEM);
  770. goto err;
  771. }
  772. snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
  773. teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
  774. teedev->id - offs);
  775. teedev->dev.class = &tee_class;
  776. teedev->dev.release = tee_release_device;
  777. teedev->dev.parent = dev;
  778. teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
  779. rc = dev_set_name(&teedev->dev, "%s", teedev->name);
  780. if (rc) {
  781. ret = ERR_PTR(rc);
  782. goto err_devt;
  783. }
  784. cdev_init(&teedev->cdev, &tee_fops);
  785. teedev->cdev.owner = teedesc->owner;
  786. dev_set_drvdata(&teedev->dev, driver_data);
  787. device_initialize(&teedev->dev);
  788. /* 1 as tee_device_unregister() does one final tee_device_put() */
  789. teedev->num_users = 1;
  790. init_completion(&teedev->c_no_users);
  791. mutex_init(&teedev->mutex);
  792. idr_init(&teedev->idr);
  793. teedev->desc = teedesc;
  794. teedev->pool = pool;
  795. return teedev;
  796. err_devt:
  797. unregister_chrdev_region(teedev->dev.devt, 1);
  798. err:
  799. pr_err("could not register %s driver\n",
  800. teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
  801. if (teedev && teedev->id < TEE_NUM_DEVICES) {
  802. spin_lock(&driver_lock);
  803. clear_bit(teedev->id, dev_mask);
  804. spin_unlock(&driver_lock);
  805. }
  806. kfree(teedev);
  807. return ret;
  808. }
  809. EXPORT_SYMBOL_GPL(tee_device_alloc);
  810. void tee_device_set_dev_groups(struct tee_device *teedev,
  811. const struct attribute_group **dev_groups)
  812. {
  813. teedev->dev.groups = dev_groups;
  814. }
  815. EXPORT_SYMBOL_GPL(tee_device_set_dev_groups);
  816. static ssize_t implementation_id_show(struct device *dev,
  817. struct device_attribute *attr, char *buf)
  818. {
  819. struct tee_device *teedev = container_of(dev, struct tee_device, dev);
  820. struct tee_ioctl_version_data vers;
  821. teedev->desc->ops->get_version(teedev, &vers);
  822. return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
  823. }
  824. static DEVICE_ATTR_RO(implementation_id);
  825. static struct attribute *tee_dev_attrs[] = {
  826. &dev_attr_implementation_id.attr,
  827. NULL
  828. };
  829. ATTRIBUTE_GROUPS(tee_dev);
  830. static const struct class tee_class = {
  831. .name = "tee",
  832. .dev_groups = tee_dev_groups,
  833. };
  834. /**
  835. * tee_device_register() - Registers a TEE device
  836. * @teedev: Device to register
  837. *
  838. * tee_device_unregister() need to be called to remove the @teedev if
  839. * this function fails.
  840. *
  841. * @returns < 0 on failure
  842. */
  843. int tee_device_register(struct tee_device *teedev)
  844. {
  845. int rc;
  846. if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
  847. dev_err(&teedev->dev, "attempt to register twice\n");
  848. return -EINVAL;
  849. }
  850. rc = cdev_device_add(&teedev->cdev, &teedev->dev);
  851. if (rc) {
  852. dev_err(&teedev->dev,
  853. "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
  854. teedev->name, MAJOR(teedev->dev.devt),
  855. MINOR(teedev->dev.devt), rc);
  856. return rc;
  857. }
  858. teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
  859. return 0;
  860. }
  861. EXPORT_SYMBOL_GPL(tee_device_register);
  862. void tee_device_put(struct tee_device *teedev)
  863. {
  864. mutex_lock(&teedev->mutex);
  865. /* Shouldn't put in this state */
  866. if (!WARN_ON(!teedev->desc)) {
  867. teedev->num_users--;
  868. if (!teedev->num_users) {
  869. teedev->desc = NULL;
  870. complete(&teedev->c_no_users);
  871. }
  872. }
  873. mutex_unlock(&teedev->mutex);
  874. }
  875. bool tee_device_get(struct tee_device *teedev)
  876. {
  877. mutex_lock(&teedev->mutex);
  878. if (!teedev->desc) {
  879. mutex_unlock(&teedev->mutex);
  880. return false;
  881. }
  882. teedev->num_users++;
  883. mutex_unlock(&teedev->mutex);
  884. return true;
  885. }
  886. /**
  887. * tee_device_unregister() - Removes a TEE device
  888. * @teedev: Device to unregister
  889. *
  890. * This function should be called to remove the @teedev even if
  891. * tee_device_register() hasn't been called yet. Does nothing if
  892. * @teedev is NULL.
  893. */
  894. void tee_device_unregister(struct tee_device *teedev)
  895. {
  896. if (!teedev)
  897. return;
  898. if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
  899. cdev_device_del(&teedev->cdev, &teedev->dev);
  900. tee_device_put(teedev);
  901. wait_for_completion(&teedev->c_no_users);
  902. /*
  903. * No need to take a mutex any longer now since teedev->desc was
  904. * set to NULL before teedev->c_no_users was completed.
  905. */
  906. teedev->pool = NULL;
  907. put_device(&teedev->dev);
  908. }
  909. EXPORT_SYMBOL_GPL(tee_device_unregister);
  910. /**
  911. * tee_get_drvdata() - Return driver_data pointer
  912. * @teedev: Device containing the driver_data pointer
  913. * @returns the driver_data pointer supplied to tee_device_alloc().
  914. */
  915. void *tee_get_drvdata(struct tee_device *teedev)
  916. {
  917. return dev_get_drvdata(&teedev->dev);
  918. }
  919. EXPORT_SYMBOL_GPL(tee_get_drvdata);
  920. struct match_dev_data {
  921. struct tee_ioctl_version_data *vers;
  922. const void *data;
  923. int (*match)(struct tee_ioctl_version_data *, const void *);
  924. };
  925. static int match_dev(struct device *dev, const void *data)
  926. {
  927. const struct match_dev_data *match_data = data;
  928. struct tee_device *teedev = container_of(dev, struct tee_device, dev);
  929. teedev->desc->ops->get_version(teedev, match_data->vers);
  930. return match_data->match(match_data->vers, match_data->data);
  931. }
  932. struct tee_context *
  933. tee_client_open_context(struct tee_context *start,
  934. int (*match)(struct tee_ioctl_version_data *,
  935. const void *),
  936. const void *data, struct tee_ioctl_version_data *vers)
  937. {
  938. struct device *dev = NULL;
  939. struct device *put_dev = NULL;
  940. struct tee_context *ctx = NULL;
  941. struct tee_ioctl_version_data v;
  942. struct match_dev_data match_data = { vers ? vers : &v, data, match };
  943. if (start)
  944. dev = &start->teedev->dev;
  945. do {
  946. dev = class_find_device(&tee_class, dev, &match_data, match_dev);
  947. if (!dev) {
  948. ctx = ERR_PTR(-ENOENT);
  949. break;
  950. }
  951. put_device(put_dev);
  952. put_dev = dev;
  953. ctx = teedev_open(container_of(dev, struct tee_device, dev));
  954. } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
  955. put_device(put_dev);
  956. /*
  957. * Default behaviour for in kernel client is to not wait for
  958. * tee-supplicant if not present for any requests in this context.
  959. * Also this flag could be configured again before call to
  960. * tee_client_open_session() if any in kernel client requires
  961. * different behaviour.
  962. */
  963. if (!IS_ERR(ctx))
  964. ctx->supp_nowait = true;
  965. return ctx;
  966. }
  967. EXPORT_SYMBOL_GPL(tee_client_open_context);
  968. void tee_client_close_context(struct tee_context *ctx)
  969. {
  970. teedev_close_context(ctx);
  971. }
  972. EXPORT_SYMBOL_GPL(tee_client_close_context);
  973. void tee_client_get_version(struct tee_context *ctx,
  974. struct tee_ioctl_version_data *vers)
  975. {
  976. ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
  977. }
  978. EXPORT_SYMBOL_GPL(tee_client_get_version);
  979. int tee_client_open_session(struct tee_context *ctx,
  980. struct tee_ioctl_open_session_arg *arg,
  981. struct tee_param *param)
  982. {
  983. if (!ctx->teedev->desc->ops->open_session)
  984. return -EINVAL;
  985. return ctx->teedev->desc->ops->open_session(ctx, arg, param);
  986. }
  987. EXPORT_SYMBOL_GPL(tee_client_open_session);
  988. int tee_client_close_session(struct tee_context *ctx, u32 session)
  989. {
  990. if (!ctx->teedev->desc->ops->close_session)
  991. return -EINVAL;
  992. return ctx->teedev->desc->ops->close_session(ctx, session);
  993. }
  994. EXPORT_SYMBOL_GPL(tee_client_close_session);
  995. int tee_client_system_session(struct tee_context *ctx, u32 session)
  996. {
  997. if (!ctx->teedev->desc->ops->system_session)
  998. return -EINVAL;
  999. return ctx->teedev->desc->ops->system_session(ctx, session);
  1000. }
  1001. EXPORT_SYMBOL_GPL(tee_client_system_session);
  1002. int tee_client_invoke_func(struct tee_context *ctx,
  1003. struct tee_ioctl_invoke_arg *arg,
  1004. struct tee_param *param)
  1005. {
  1006. if (!ctx->teedev->desc->ops->invoke_func)
  1007. return -EINVAL;
  1008. return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
  1009. }
  1010. EXPORT_SYMBOL_GPL(tee_client_invoke_func);
  1011. int tee_client_cancel_req(struct tee_context *ctx,
  1012. struct tee_ioctl_cancel_arg *arg)
  1013. {
  1014. if (!ctx->teedev->desc->ops->cancel_req)
  1015. return -EINVAL;
  1016. return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
  1017. arg->session);
  1018. }
  1019. static int tee_client_device_match(struct device *dev,
  1020. const struct device_driver *drv)
  1021. {
  1022. const struct tee_client_device_id *id_table;
  1023. struct tee_client_device *tee_device;
  1024. id_table = to_tee_client_driver(drv)->id_table;
  1025. tee_device = to_tee_client_device(dev);
  1026. while (!uuid_is_null(&id_table->uuid)) {
  1027. if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
  1028. return 1;
  1029. id_table++;
  1030. }
  1031. return 0;
  1032. }
  1033. static int tee_client_device_uevent(const struct device *dev,
  1034. struct kobj_uevent_env *env)
  1035. {
  1036. uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
  1037. return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
  1038. }
  1039. const struct bus_type tee_bus_type = {
  1040. .name = "tee",
  1041. .match = tee_client_device_match,
  1042. .uevent = tee_client_device_uevent,
  1043. };
  1044. EXPORT_SYMBOL_GPL(tee_bus_type);
  1045. static int __init tee_init(void)
  1046. {
  1047. int rc;
  1048. rc = class_register(&tee_class);
  1049. if (rc) {
  1050. pr_err("couldn't create class\n");
  1051. return rc;
  1052. }
  1053. rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
  1054. if (rc) {
  1055. pr_err("failed to allocate char dev region\n");
  1056. goto out_unreg_class;
  1057. }
  1058. rc = bus_register(&tee_bus_type);
  1059. if (rc) {
  1060. pr_err("failed to register tee bus\n");
  1061. goto out_unreg_chrdev;
  1062. }
  1063. return 0;
  1064. out_unreg_chrdev:
  1065. unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
  1066. out_unreg_class:
  1067. class_unregister(&tee_class);
  1068. return rc;
  1069. }
  1070. static void __exit tee_exit(void)
  1071. {
  1072. bus_unregister(&tee_bus_type);
  1073. unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
  1074. class_unregister(&tee_class);
  1075. }
  1076. subsys_initcall(tee_init);
  1077. module_exit(tee_exit);
  1078. MODULE_AUTHOR("Linaro");
  1079. MODULE_DESCRIPTION("TEE Driver");
  1080. MODULE_VERSION("1.0");
  1081. MODULE_LICENSE("GPL v2");