af_alg.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. /*
  2. * af_alg: User-space algorithm interface
  3. *
  4. * This file provides the user-space API for algorithms.
  5. *
  6. * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/atomic.h>
  15. #include <crypto/if_alg.h>
  16. #include <linux/crypto.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <linux/net.h>
  22. #include <linux/rwsem.h>
  23. #include <linux/sched/signal.h>
  24. #include <linux/security.h>
  25. struct alg_type_list {
  26. const struct af_alg_type *type;
  27. struct list_head list;
  28. };
  29. static atomic_long_t alg_memory_allocated;
  30. static struct proto alg_proto = {
  31. .name = "ALG",
  32. .owner = THIS_MODULE,
  33. .memory_allocated = &alg_memory_allocated,
  34. .obj_size = sizeof(struct alg_sock),
  35. };
  36. static LIST_HEAD(alg_types);
  37. static DECLARE_RWSEM(alg_types_sem);
  38. static const struct af_alg_type *alg_get_type(const char *name)
  39. {
  40. const struct af_alg_type *type = ERR_PTR(-ENOENT);
  41. struct alg_type_list *node;
  42. down_read(&alg_types_sem);
  43. list_for_each_entry(node, &alg_types, list) {
  44. if (strcmp(node->type->name, name))
  45. continue;
  46. if (try_module_get(node->type->owner))
  47. type = node->type;
  48. break;
  49. }
  50. up_read(&alg_types_sem);
  51. return type;
  52. }
  53. int af_alg_register_type(const struct af_alg_type *type)
  54. {
  55. struct alg_type_list *node;
  56. int err = -EEXIST;
  57. down_write(&alg_types_sem);
  58. list_for_each_entry(node, &alg_types, list) {
  59. if (!strcmp(node->type->name, type->name))
  60. goto unlock;
  61. }
  62. node = kmalloc(sizeof(*node), GFP_KERNEL);
  63. err = -ENOMEM;
  64. if (!node)
  65. goto unlock;
  66. type->ops->owner = THIS_MODULE;
  67. if (type->ops_nokey)
  68. type->ops_nokey->owner = THIS_MODULE;
  69. node->type = type;
  70. list_add(&node->list, &alg_types);
  71. err = 0;
  72. unlock:
  73. up_write(&alg_types_sem);
  74. return err;
  75. }
  76. EXPORT_SYMBOL_GPL(af_alg_register_type);
  77. int af_alg_unregister_type(const struct af_alg_type *type)
  78. {
  79. struct alg_type_list *node;
  80. int err = -ENOENT;
  81. down_write(&alg_types_sem);
  82. list_for_each_entry(node, &alg_types, list) {
  83. if (strcmp(node->type->name, type->name))
  84. continue;
  85. list_del(&node->list);
  86. kfree(node);
  87. err = 0;
  88. break;
  89. }
  90. up_write(&alg_types_sem);
  91. return err;
  92. }
  93. EXPORT_SYMBOL_GPL(af_alg_unregister_type);
  94. static void alg_do_release(const struct af_alg_type *type, void *private)
  95. {
  96. if (!type)
  97. return;
  98. type->release(private);
  99. module_put(type->owner);
  100. }
  101. int af_alg_release(struct socket *sock)
  102. {
  103. if (sock->sk) {
  104. sock_put(sock->sk);
  105. sock->sk = NULL;
  106. }
  107. return 0;
  108. }
  109. EXPORT_SYMBOL_GPL(af_alg_release);
  110. void af_alg_release_parent(struct sock *sk)
  111. {
  112. struct alg_sock *ask = alg_sk(sk);
  113. unsigned int nokey = atomic_read(&ask->nokey_refcnt);
  114. sk = ask->parent;
  115. ask = alg_sk(sk);
  116. if (nokey)
  117. atomic_dec(&ask->nokey_refcnt);
  118. if (atomic_dec_and_test(&ask->refcnt))
  119. sock_put(sk);
  120. }
  121. EXPORT_SYMBOL_GPL(af_alg_release_parent);
  122. static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  123. {
  124. const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
  125. struct sock *sk = sock->sk;
  126. struct alg_sock *ask = alg_sk(sk);
  127. struct sockaddr_alg_new *sa = (void *)uaddr;
  128. const struct af_alg_type *type;
  129. void *private;
  130. int err;
  131. if (sock->state == SS_CONNECTED)
  132. return -EINVAL;
  133. BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
  134. offsetof(struct sockaddr_alg, salg_name));
  135. BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
  136. if (addr_len < sizeof(*sa) + 1)
  137. return -EINVAL;
  138. /* If caller uses non-allowed flag, return error. */
  139. if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
  140. return -EINVAL;
  141. sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
  142. sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
  143. type = alg_get_type(sa->salg_type);
  144. if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
  145. request_module("algif-%s", sa->salg_type);
  146. type = alg_get_type(sa->salg_type);
  147. }
  148. if (IS_ERR(type))
  149. return PTR_ERR(type);
  150. private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
  151. if (IS_ERR(private)) {
  152. module_put(type->owner);
  153. return PTR_ERR(private);
  154. }
  155. err = -EBUSY;
  156. lock_sock(sk);
  157. if (atomic_read(&ask->refcnt))
  158. goto unlock;
  159. swap(ask->type, type);
  160. swap(ask->private, private);
  161. err = 0;
  162. unlock:
  163. release_sock(sk);
  164. alg_do_release(type, private);
  165. return err;
  166. }
  167. static int alg_setkey(struct sock *sk, char __user *ukey,
  168. unsigned int keylen)
  169. {
  170. struct alg_sock *ask = alg_sk(sk);
  171. const struct af_alg_type *type = ask->type;
  172. u8 *key;
  173. int err;
  174. key = sock_kmalloc(sk, keylen, GFP_KERNEL);
  175. if (!key)
  176. return -ENOMEM;
  177. err = -EFAULT;
  178. if (copy_from_user(key, ukey, keylen))
  179. goto out;
  180. err = type->setkey(ask->private, key, keylen);
  181. out:
  182. sock_kzfree_s(sk, key, keylen);
  183. return err;
  184. }
  185. static int alg_setsockopt(struct socket *sock, int level, int optname,
  186. char __user *optval, unsigned int optlen)
  187. {
  188. struct sock *sk = sock->sk;
  189. struct alg_sock *ask = alg_sk(sk);
  190. const struct af_alg_type *type;
  191. int err = -EBUSY;
  192. lock_sock(sk);
  193. if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
  194. goto unlock;
  195. type = ask->type;
  196. err = -ENOPROTOOPT;
  197. if (level != SOL_ALG || !type)
  198. goto unlock;
  199. switch (optname) {
  200. case ALG_SET_KEY:
  201. if (sock->state == SS_CONNECTED)
  202. goto unlock;
  203. if (!type->setkey)
  204. goto unlock;
  205. err = alg_setkey(sk, optval, optlen);
  206. break;
  207. case ALG_SET_AEAD_AUTHSIZE:
  208. if (sock->state == SS_CONNECTED)
  209. goto unlock;
  210. if (!type->setauthsize)
  211. goto unlock;
  212. err = type->setauthsize(ask->private, optlen);
  213. }
  214. unlock:
  215. release_sock(sk);
  216. return err;
  217. }
  218. int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
  219. {
  220. struct alg_sock *ask = alg_sk(sk);
  221. const struct af_alg_type *type;
  222. struct sock *sk2;
  223. unsigned int nokey;
  224. int err;
  225. lock_sock(sk);
  226. type = ask->type;
  227. err = -EINVAL;
  228. if (!type)
  229. goto unlock;
  230. sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
  231. err = -ENOMEM;
  232. if (!sk2)
  233. goto unlock;
  234. sock_init_data(newsock, sk2);
  235. security_sock_graft(sk2, newsock);
  236. security_sk_clone(sk, sk2);
  237. err = type->accept(ask->private, sk2);
  238. nokey = err == -ENOKEY;
  239. if (nokey && type->accept_nokey)
  240. err = type->accept_nokey(ask->private, sk2);
  241. if (err)
  242. goto unlock;
  243. sk2->sk_family = PF_ALG;
  244. if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
  245. sock_hold(sk);
  246. if (nokey) {
  247. atomic_inc(&ask->nokey_refcnt);
  248. atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
  249. }
  250. alg_sk(sk2)->parent = sk;
  251. alg_sk(sk2)->type = type;
  252. newsock->ops = type->ops;
  253. newsock->state = SS_CONNECTED;
  254. if (nokey)
  255. newsock->ops = type->ops_nokey;
  256. err = 0;
  257. unlock:
  258. release_sock(sk);
  259. return err;
  260. }
  261. EXPORT_SYMBOL_GPL(af_alg_accept);
  262. static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
  263. bool kern)
  264. {
  265. return af_alg_accept(sock->sk, newsock, kern);
  266. }
  267. static const struct proto_ops alg_proto_ops = {
  268. .family = PF_ALG,
  269. .owner = THIS_MODULE,
  270. .connect = sock_no_connect,
  271. .socketpair = sock_no_socketpair,
  272. .getname = sock_no_getname,
  273. .ioctl = sock_no_ioctl,
  274. .listen = sock_no_listen,
  275. .shutdown = sock_no_shutdown,
  276. .getsockopt = sock_no_getsockopt,
  277. .mmap = sock_no_mmap,
  278. .sendpage = sock_no_sendpage,
  279. .sendmsg = sock_no_sendmsg,
  280. .recvmsg = sock_no_recvmsg,
  281. .bind = alg_bind,
  282. .release = af_alg_release,
  283. .setsockopt = alg_setsockopt,
  284. .accept = alg_accept,
  285. };
  286. static void alg_sock_destruct(struct sock *sk)
  287. {
  288. struct alg_sock *ask = alg_sk(sk);
  289. alg_do_release(ask->type, ask->private);
  290. }
  291. static int alg_create(struct net *net, struct socket *sock, int protocol,
  292. int kern)
  293. {
  294. struct sock *sk;
  295. int err;
  296. if (sock->type != SOCK_SEQPACKET)
  297. return -ESOCKTNOSUPPORT;
  298. if (protocol != 0)
  299. return -EPROTONOSUPPORT;
  300. err = -ENOMEM;
  301. sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
  302. if (!sk)
  303. goto out;
  304. sock->ops = &alg_proto_ops;
  305. sock_init_data(sock, sk);
  306. sk->sk_family = PF_ALG;
  307. sk->sk_destruct = alg_sock_destruct;
  308. return 0;
  309. out:
  310. return err;
  311. }
  312. static const struct net_proto_family alg_family = {
  313. .family = PF_ALG,
  314. .create = alg_create,
  315. .owner = THIS_MODULE,
  316. };
  317. int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
  318. {
  319. size_t off;
  320. ssize_t n;
  321. int npages, i;
  322. n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
  323. if (n < 0)
  324. return n;
  325. npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
  326. if (WARN_ON(npages == 0))
  327. return -EINVAL;
  328. /* Add one extra for linking */
  329. sg_init_table(sgl->sg, npages + 1);
  330. for (i = 0, len = n; i < npages; i++) {
  331. int plen = min_t(int, len, PAGE_SIZE - off);
  332. sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
  333. off = 0;
  334. len -= plen;
  335. }
  336. sg_mark_end(sgl->sg + npages - 1);
  337. sgl->npages = npages;
  338. return n;
  339. }
  340. EXPORT_SYMBOL_GPL(af_alg_make_sg);
  341. void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
  342. {
  343. sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
  344. sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
  345. }
  346. EXPORT_SYMBOL_GPL(af_alg_link_sg);
  347. void af_alg_free_sg(struct af_alg_sgl *sgl)
  348. {
  349. int i;
  350. for (i = 0; i < sgl->npages; i++)
  351. put_page(sgl->pages[i]);
  352. }
  353. EXPORT_SYMBOL_GPL(af_alg_free_sg);
  354. int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
  355. {
  356. struct cmsghdr *cmsg;
  357. for_each_cmsghdr(cmsg, msg) {
  358. if (!CMSG_OK(msg, cmsg))
  359. return -EINVAL;
  360. if (cmsg->cmsg_level != SOL_ALG)
  361. continue;
  362. switch (cmsg->cmsg_type) {
  363. case ALG_SET_IV:
  364. if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
  365. return -EINVAL;
  366. con->iv = (void *)CMSG_DATA(cmsg);
  367. if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
  368. sizeof(*con->iv)))
  369. return -EINVAL;
  370. break;
  371. case ALG_SET_OP:
  372. if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
  373. return -EINVAL;
  374. con->op = *(u32 *)CMSG_DATA(cmsg);
  375. break;
  376. case ALG_SET_AEAD_ASSOCLEN:
  377. if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
  378. return -EINVAL;
  379. con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
  380. break;
  381. default:
  382. return -EINVAL;
  383. }
  384. }
  385. return 0;
  386. }
  387. EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
  388. /**
  389. * af_alg_alloc_tsgl - allocate the TX SGL
  390. *
  391. * @sk socket of connection to user space
  392. * @return: 0 upon success, < 0 upon error
  393. */
  394. int af_alg_alloc_tsgl(struct sock *sk)
  395. {
  396. struct alg_sock *ask = alg_sk(sk);
  397. struct af_alg_ctx *ctx = ask->private;
  398. struct af_alg_tsgl *sgl;
  399. struct scatterlist *sg = NULL;
  400. sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
  401. if (!list_empty(&ctx->tsgl_list))
  402. sg = sgl->sg;
  403. if (!sg || sgl->cur >= MAX_SGL_ENTS) {
  404. sgl = sock_kmalloc(sk,
  405. struct_size(sgl, sg, (MAX_SGL_ENTS + 1)),
  406. GFP_KERNEL);
  407. if (!sgl)
  408. return -ENOMEM;
  409. sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
  410. sgl->cur = 0;
  411. if (sg)
  412. sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
  413. list_add_tail(&sgl->list, &ctx->tsgl_list);
  414. }
  415. return 0;
  416. }
  417. EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
  418. /**
  419. * aead_count_tsgl - Count number of TX SG entries
  420. *
  421. * The counting starts from the beginning of the SGL to @bytes. If
  422. * an offset is provided, the counting of the SG entries starts at the offset.
  423. *
  424. * @sk socket of connection to user space
  425. * @bytes Count the number of SG entries holding given number of bytes.
  426. * @offset Start the counting of SG entries from the given offset.
  427. * @return Number of TX SG entries found given the constraints
  428. */
  429. unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
  430. {
  431. struct alg_sock *ask = alg_sk(sk);
  432. struct af_alg_ctx *ctx = ask->private;
  433. struct af_alg_tsgl *sgl, *tmp;
  434. unsigned int i;
  435. unsigned int sgl_count = 0;
  436. if (!bytes)
  437. return 0;
  438. list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
  439. struct scatterlist *sg = sgl->sg;
  440. for (i = 0; i < sgl->cur; i++) {
  441. size_t bytes_count;
  442. /* Skip offset */
  443. if (offset >= sg[i].length) {
  444. offset -= sg[i].length;
  445. bytes -= sg[i].length;
  446. continue;
  447. }
  448. bytes_count = sg[i].length - offset;
  449. offset = 0;
  450. sgl_count++;
  451. /* If we have seen requested number of bytes, stop */
  452. if (bytes_count >= bytes)
  453. return sgl_count;
  454. bytes -= bytes_count;
  455. }
  456. }
  457. return sgl_count;
  458. }
  459. EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
  460. /**
  461. * aead_pull_tsgl - Release the specified buffers from TX SGL
  462. *
  463. * If @dst is non-null, reassign the pages to dst. The caller must release
  464. * the pages. If @dst_offset is given only reassign the pages to @dst starting
  465. * at the @dst_offset (byte). The caller must ensure that @dst is large
  466. * enough (e.g. by using af_alg_count_tsgl with the same offset).
  467. *
  468. * @sk socket of connection to user space
  469. * @used Number of bytes to pull from TX SGL
  470. * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
  471. * caller must release the buffers in dst.
  472. * @dst_offset Reassign the TX SGL from given offset. All buffers before
  473. * reaching the offset is released.
  474. */
  475. void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
  476. size_t dst_offset)
  477. {
  478. struct alg_sock *ask = alg_sk(sk);
  479. struct af_alg_ctx *ctx = ask->private;
  480. struct af_alg_tsgl *sgl;
  481. struct scatterlist *sg;
  482. unsigned int i, j = 0;
  483. while (!list_empty(&ctx->tsgl_list)) {
  484. sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
  485. list);
  486. sg = sgl->sg;
  487. for (i = 0; i < sgl->cur; i++) {
  488. size_t plen = min_t(size_t, used, sg[i].length);
  489. struct page *page = sg_page(sg + i);
  490. if (!page)
  491. continue;
  492. /*
  493. * Assumption: caller created af_alg_count_tsgl(len)
  494. * SG entries in dst.
  495. */
  496. if (dst) {
  497. if (dst_offset >= plen) {
  498. /* discard page before offset */
  499. dst_offset -= plen;
  500. } else {
  501. /* reassign page to dst after offset */
  502. get_page(page);
  503. sg_set_page(dst + j, page,
  504. plen - dst_offset,
  505. sg[i].offset + dst_offset);
  506. dst_offset = 0;
  507. j++;
  508. }
  509. }
  510. sg[i].length -= plen;
  511. sg[i].offset += plen;
  512. used -= plen;
  513. ctx->used -= plen;
  514. if (sg[i].length)
  515. return;
  516. put_page(page);
  517. sg_assign_page(sg + i, NULL);
  518. }
  519. list_del(&sgl->list);
  520. sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
  521. (MAX_SGL_ENTS + 1));
  522. }
  523. if (!ctx->used)
  524. ctx->merge = 0;
  525. }
  526. EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
  527. /**
  528. * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
  529. *
  530. * @areq Request holding the TX and RX SGL
  531. */
  532. void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
  533. {
  534. struct sock *sk = areq->sk;
  535. struct alg_sock *ask = alg_sk(sk);
  536. struct af_alg_ctx *ctx = ask->private;
  537. struct af_alg_rsgl *rsgl, *tmp;
  538. struct scatterlist *tsgl;
  539. struct scatterlist *sg;
  540. unsigned int i;
  541. list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
  542. atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
  543. af_alg_free_sg(&rsgl->sgl);
  544. list_del(&rsgl->list);
  545. if (rsgl != &areq->first_rsgl)
  546. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  547. }
  548. tsgl = areq->tsgl;
  549. if (tsgl) {
  550. for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
  551. if (!sg_page(sg))
  552. continue;
  553. put_page(sg_page(sg));
  554. }
  555. sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
  556. }
  557. }
  558. EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
  559. /**
  560. * af_alg_wait_for_wmem - wait for availability of writable memory
  561. *
  562. * @sk socket of connection to user space
  563. * @flags If MSG_DONTWAIT is set, then only report if function would sleep
  564. * @return 0 when writable memory is available, < 0 upon error
  565. */
  566. int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
  567. {
  568. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  569. int err = -ERESTARTSYS;
  570. long timeout;
  571. if (flags & MSG_DONTWAIT)
  572. return -EAGAIN;
  573. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  574. add_wait_queue(sk_sleep(sk), &wait);
  575. for (;;) {
  576. if (signal_pending(current))
  577. break;
  578. timeout = MAX_SCHEDULE_TIMEOUT;
  579. if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
  580. err = 0;
  581. break;
  582. }
  583. }
  584. remove_wait_queue(sk_sleep(sk), &wait);
  585. return err;
  586. }
  587. EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem);
  588. /**
  589. * af_alg_wmem_wakeup - wakeup caller when writable memory is available
  590. *
  591. * @sk socket of connection to user space
  592. */
  593. void af_alg_wmem_wakeup(struct sock *sk)
  594. {
  595. struct socket_wq *wq;
  596. if (!af_alg_writable(sk))
  597. return;
  598. rcu_read_lock();
  599. wq = rcu_dereference(sk->sk_wq);
  600. if (skwq_has_sleeper(wq))
  601. wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
  602. EPOLLRDNORM |
  603. EPOLLRDBAND);
  604. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  605. rcu_read_unlock();
  606. }
  607. EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
  608. /**
  609. * af_alg_wait_for_data - wait for availability of TX data
  610. *
  611. * @sk socket of connection to user space
  612. * @flags If MSG_DONTWAIT is set, then only report if function would sleep
  613. * @return 0 when writable memory is available, < 0 upon error
  614. */
  615. int af_alg_wait_for_data(struct sock *sk, unsigned flags)
  616. {
  617. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  618. struct alg_sock *ask = alg_sk(sk);
  619. struct af_alg_ctx *ctx = ask->private;
  620. long timeout;
  621. int err = -ERESTARTSYS;
  622. if (flags & MSG_DONTWAIT)
  623. return -EAGAIN;
  624. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  625. add_wait_queue(sk_sleep(sk), &wait);
  626. for (;;) {
  627. if (signal_pending(current))
  628. break;
  629. timeout = MAX_SCHEDULE_TIMEOUT;
  630. if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
  631. &wait)) {
  632. err = 0;
  633. break;
  634. }
  635. }
  636. remove_wait_queue(sk_sleep(sk), &wait);
  637. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  638. return err;
  639. }
  640. EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
  641. /**
  642. * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
  643. *
  644. * @sk socket of connection to user space
  645. */
  646. void af_alg_data_wakeup(struct sock *sk)
  647. {
  648. struct alg_sock *ask = alg_sk(sk);
  649. struct af_alg_ctx *ctx = ask->private;
  650. struct socket_wq *wq;
  651. if (!ctx->used)
  652. return;
  653. rcu_read_lock();
  654. wq = rcu_dereference(sk->sk_wq);
  655. if (skwq_has_sleeper(wq))
  656. wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
  657. EPOLLRDNORM |
  658. EPOLLRDBAND);
  659. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  660. rcu_read_unlock();
  661. }
  662. EXPORT_SYMBOL_GPL(af_alg_data_wakeup);
  663. /**
  664. * af_alg_sendmsg - implementation of sendmsg system call handler
  665. *
  666. * The sendmsg system call handler obtains the user data and stores it
  667. * in ctx->tsgl_list. This implies allocation of the required numbers of
  668. * struct af_alg_tsgl.
  669. *
  670. * In addition, the ctx is filled with the information sent via CMSG.
  671. *
  672. * @sock socket of connection to user space
  673. * @msg message from user space
  674. * @size size of message from user space
  675. * @ivsize the size of the IV for the cipher operation to verify that the
  676. * user-space-provided IV has the right size
  677. * @return the number of copied data upon success, < 0 upon error
  678. */
  679. int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
  680. unsigned int ivsize)
  681. {
  682. struct sock *sk = sock->sk;
  683. struct alg_sock *ask = alg_sk(sk);
  684. struct af_alg_ctx *ctx = ask->private;
  685. struct af_alg_tsgl *sgl;
  686. struct af_alg_control con = {};
  687. long copied = 0;
  688. bool enc = 0;
  689. bool init = 0;
  690. int err = 0;
  691. if (msg->msg_controllen) {
  692. err = af_alg_cmsg_send(msg, &con);
  693. if (err)
  694. return err;
  695. init = 1;
  696. switch (con.op) {
  697. case ALG_OP_ENCRYPT:
  698. enc = 1;
  699. break;
  700. case ALG_OP_DECRYPT:
  701. enc = 0;
  702. break;
  703. default:
  704. return -EINVAL;
  705. }
  706. if (con.iv && con.iv->ivlen != ivsize)
  707. return -EINVAL;
  708. }
  709. lock_sock(sk);
  710. if (!ctx->more && ctx->used) {
  711. err = -EINVAL;
  712. goto unlock;
  713. }
  714. if (init) {
  715. ctx->enc = enc;
  716. if (con.iv)
  717. memcpy(ctx->iv, con.iv->iv, ivsize);
  718. ctx->aead_assoclen = con.aead_assoclen;
  719. }
  720. while (size) {
  721. struct scatterlist *sg;
  722. size_t len = size;
  723. size_t plen;
  724. /* use the existing memory in an allocated page */
  725. if (ctx->merge) {
  726. sgl = list_entry(ctx->tsgl_list.prev,
  727. struct af_alg_tsgl, list);
  728. sg = sgl->sg + sgl->cur - 1;
  729. len = min_t(size_t, len,
  730. PAGE_SIZE - sg->offset - sg->length);
  731. err = memcpy_from_msg(page_address(sg_page(sg)) +
  732. sg->offset + sg->length,
  733. msg, len);
  734. if (err)
  735. goto unlock;
  736. sg->length += len;
  737. ctx->merge = (sg->offset + sg->length) &
  738. (PAGE_SIZE - 1);
  739. ctx->used += len;
  740. copied += len;
  741. size -= len;
  742. continue;
  743. }
  744. if (!af_alg_writable(sk)) {
  745. err = af_alg_wait_for_wmem(sk, msg->msg_flags);
  746. if (err)
  747. goto unlock;
  748. }
  749. /* allocate a new page */
  750. len = min_t(unsigned long, len, af_alg_sndbuf(sk));
  751. err = af_alg_alloc_tsgl(sk);
  752. if (err)
  753. goto unlock;
  754. sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
  755. list);
  756. sg = sgl->sg;
  757. if (sgl->cur)
  758. sg_unmark_end(sg + sgl->cur - 1);
  759. do {
  760. unsigned int i = sgl->cur;
  761. plen = min_t(size_t, len, PAGE_SIZE);
  762. sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
  763. if (!sg_page(sg + i)) {
  764. err = -ENOMEM;
  765. goto unlock;
  766. }
  767. err = memcpy_from_msg(page_address(sg_page(sg + i)),
  768. msg, plen);
  769. if (err) {
  770. __free_page(sg_page(sg + i));
  771. sg_assign_page(sg + i, NULL);
  772. goto unlock;
  773. }
  774. sg[i].length = plen;
  775. len -= plen;
  776. ctx->used += plen;
  777. copied += plen;
  778. size -= plen;
  779. sgl->cur++;
  780. } while (len && sgl->cur < MAX_SGL_ENTS);
  781. if (!size)
  782. sg_mark_end(sg + sgl->cur - 1);
  783. ctx->merge = plen & (PAGE_SIZE - 1);
  784. }
  785. err = 0;
  786. ctx->more = msg->msg_flags & MSG_MORE;
  787. unlock:
  788. af_alg_data_wakeup(sk);
  789. release_sock(sk);
  790. return copied ?: err;
  791. }
  792. EXPORT_SYMBOL_GPL(af_alg_sendmsg);
  793. /**
  794. * af_alg_sendpage - sendpage system call handler
  795. *
  796. * This is a generic implementation of sendpage to fill ctx->tsgl_list.
  797. */
  798. ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
  799. int offset, size_t size, int flags)
  800. {
  801. struct sock *sk = sock->sk;
  802. struct alg_sock *ask = alg_sk(sk);
  803. struct af_alg_ctx *ctx = ask->private;
  804. struct af_alg_tsgl *sgl;
  805. int err = -EINVAL;
  806. if (flags & MSG_SENDPAGE_NOTLAST)
  807. flags |= MSG_MORE;
  808. lock_sock(sk);
  809. if (!ctx->more && ctx->used)
  810. goto unlock;
  811. if (!size)
  812. goto done;
  813. if (!af_alg_writable(sk)) {
  814. err = af_alg_wait_for_wmem(sk, flags);
  815. if (err)
  816. goto unlock;
  817. }
  818. err = af_alg_alloc_tsgl(sk);
  819. if (err)
  820. goto unlock;
  821. ctx->merge = 0;
  822. sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
  823. if (sgl->cur)
  824. sg_unmark_end(sgl->sg + sgl->cur - 1);
  825. sg_mark_end(sgl->sg + sgl->cur);
  826. get_page(page);
  827. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  828. sgl->cur++;
  829. ctx->used += size;
  830. done:
  831. ctx->more = flags & MSG_MORE;
  832. unlock:
  833. af_alg_data_wakeup(sk);
  834. release_sock(sk);
  835. return err ?: size;
  836. }
  837. EXPORT_SYMBOL_GPL(af_alg_sendpage);
  838. /**
  839. * af_alg_free_resources - release resources required for crypto request
  840. */
  841. void af_alg_free_resources(struct af_alg_async_req *areq)
  842. {
  843. struct sock *sk = areq->sk;
  844. af_alg_free_areq_sgls(areq);
  845. sock_kfree_s(sk, areq, areq->areqlen);
  846. }
  847. EXPORT_SYMBOL_GPL(af_alg_free_resources);
  848. /**
  849. * af_alg_async_cb - AIO callback handler
  850. *
  851. * This handler cleans up the struct af_alg_async_req upon completion of the
  852. * AIO operation.
  853. *
  854. * The number of bytes to be generated with the AIO operation must be set
  855. * in areq->outlen before the AIO callback handler is invoked.
  856. */
  857. void af_alg_async_cb(struct crypto_async_request *_req, int err)
  858. {
  859. struct af_alg_async_req *areq = _req->data;
  860. struct sock *sk = areq->sk;
  861. struct kiocb *iocb = areq->iocb;
  862. unsigned int resultlen;
  863. /* Buffer size written by crypto operation. */
  864. resultlen = areq->outlen;
  865. af_alg_free_resources(areq);
  866. sock_put(sk);
  867. iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
  868. }
  869. EXPORT_SYMBOL_GPL(af_alg_async_cb);
  870. /**
  871. * af_alg_poll - poll system call handler
  872. */
  873. __poll_t af_alg_poll(struct file *file, struct socket *sock,
  874. poll_table *wait)
  875. {
  876. struct sock *sk = sock->sk;
  877. struct alg_sock *ask = alg_sk(sk);
  878. struct af_alg_ctx *ctx = ask->private;
  879. __poll_t mask;
  880. sock_poll_wait(file, sock, wait);
  881. mask = 0;
  882. if (!ctx->more || ctx->used)
  883. mask |= EPOLLIN | EPOLLRDNORM;
  884. if (af_alg_writable(sk))
  885. mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
  886. return mask;
  887. }
  888. EXPORT_SYMBOL_GPL(af_alg_poll);
  889. /**
  890. * af_alg_alloc_areq - allocate struct af_alg_async_req
  891. *
  892. * @sk socket of connection to user space
  893. * @areqlen size of struct af_alg_async_req + crypto_*_reqsize
  894. * @return allocated data structure or ERR_PTR upon error
  895. */
  896. struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
  897. unsigned int areqlen)
  898. {
  899. struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
  900. if (unlikely(!areq))
  901. return ERR_PTR(-ENOMEM);
  902. areq->areqlen = areqlen;
  903. areq->sk = sk;
  904. areq->last_rsgl = NULL;
  905. INIT_LIST_HEAD(&areq->rsgl_list);
  906. areq->tsgl = NULL;
  907. areq->tsgl_entries = 0;
  908. return areq;
  909. }
  910. EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
  911. /**
  912. * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
  913. * operation
  914. *
  915. * @sk socket of connection to user space
  916. * @msg user space message
  917. * @flags flags used to invoke recvmsg with
  918. * @areq instance of the cryptographic request that will hold the RX SGL
  919. * @maxsize maximum number of bytes to be pulled from user space
  920. * @outlen number of bytes in the RX SGL
  921. * @return 0 on success, < 0 upon error
  922. */
  923. int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
  924. struct af_alg_async_req *areq, size_t maxsize,
  925. size_t *outlen)
  926. {
  927. struct alg_sock *ask = alg_sk(sk);
  928. struct af_alg_ctx *ctx = ask->private;
  929. size_t len = 0;
  930. while (maxsize > len && msg_data_left(msg)) {
  931. struct af_alg_rsgl *rsgl;
  932. size_t seglen;
  933. int err;
  934. /* limit the amount of readable buffers */
  935. if (!af_alg_readable(sk))
  936. break;
  937. seglen = min_t(size_t, (maxsize - len),
  938. msg_data_left(msg));
  939. if (list_empty(&areq->rsgl_list)) {
  940. rsgl = &areq->first_rsgl;
  941. } else {
  942. rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
  943. if (unlikely(!rsgl))
  944. return -ENOMEM;
  945. }
  946. rsgl->sgl.npages = 0;
  947. list_add_tail(&rsgl->list, &areq->rsgl_list);
  948. /* make one iovec available as scatterlist */
  949. err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
  950. if (err < 0) {
  951. rsgl->sg_num_bytes = 0;
  952. return err;
  953. }
  954. /* chain the new scatterlist with previous one */
  955. if (areq->last_rsgl)
  956. af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
  957. areq->last_rsgl = rsgl;
  958. len += err;
  959. atomic_add(err, &ctx->rcvused);
  960. rsgl->sg_num_bytes = err;
  961. iov_iter_advance(&msg->msg_iter, err);
  962. }
  963. *outlen = len;
  964. return 0;
  965. }
  966. EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
  967. static int __init af_alg_init(void)
  968. {
  969. int err = proto_register(&alg_proto, 0);
  970. if (err)
  971. goto out;
  972. err = sock_register(&alg_family);
  973. if (err != 0)
  974. goto out_unregister_proto;
  975. out:
  976. return err;
  977. out_unregister_proto:
  978. proto_unregister(&alg_proto);
  979. goto out;
  980. }
  981. static void __exit af_alg_exit(void)
  982. {
  983. sock_unregister(PF_ALG);
  984. proto_unregister(&alg_proto);
  985. }
  986. module_init(af_alg_init);
  987. module_exit(af_alg_exit);
  988. MODULE_LICENSE("GPL");
  989. MODULE_ALIAS_NETPROTO(AF_ALG);