tls_sw.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
  5. * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
  6. * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/sched/signal.h>
  37. #include <linux/module.h>
  38. #include <crypto/aead.h>
  39. #include <net/strparser.h>
  40. #include <net/tls.h>
  41. #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
  42. static int tls_do_decryption(struct sock *sk,
  43. struct scatterlist *sgin,
  44. struct scatterlist *sgout,
  45. char *iv_recv,
  46. size_t data_len,
  47. struct aead_request *aead_req)
  48. {
  49. struct tls_context *tls_ctx = tls_get_ctx(sk);
  50. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  51. int ret;
  52. aead_request_set_tfm(aead_req, ctx->aead_recv);
  53. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  54. aead_request_set_crypt(aead_req, sgin, sgout,
  55. data_len + tls_ctx->rx.tag_size,
  56. (u8 *)iv_recv);
  57. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  58. crypto_req_done, &ctx->async_wait);
  59. ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
  60. return ret;
  61. }
  62. static void trim_sg(struct sock *sk, struct scatterlist *sg,
  63. int *sg_num_elem, unsigned int *sg_size, int target_size)
  64. {
  65. int i = *sg_num_elem - 1;
  66. int trim = *sg_size - target_size;
  67. if (trim <= 0) {
  68. WARN_ON(trim < 0);
  69. return;
  70. }
  71. *sg_size = target_size;
  72. while (trim >= sg[i].length) {
  73. trim -= sg[i].length;
  74. sk_mem_uncharge(sk, sg[i].length);
  75. put_page(sg_page(&sg[i]));
  76. i--;
  77. if (i < 0)
  78. goto out;
  79. }
  80. sg[i].length -= trim;
  81. sk_mem_uncharge(sk, trim);
  82. out:
  83. *sg_num_elem = i + 1;
  84. }
  85. static void trim_both_sgl(struct sock *sk, int target_size)
  86. {
  87. struct tls_context *tls_ctx = tls_get_ctx(sk);
  88. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  89. trim_sg(sk, ctx->sg_plaintext_data,
  90. &ctx->sg_plaintext_num_elem,
  91. &ctx->sg_plaintext_size,
  92. target_size);
  93. if (target_size > 0)
  94. target_size += tls_ctx->tx.overhead_size;
  95. trim_sg(sk, ctx->sg_encrypted_data,
  96. &ctx->sg_encrypted_num_elem,
  97. &ctx->sg_encrypted_size,
  98. target_size);
  99. }
  100. static int alloc_encrypted_sg(struct sock *sk, int len)
  101. {
  102. struct tls_context *tls_ctx = tls_get_ctx(sk);
  103. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  104. int rc = 0;
  105. rc = sk_alloc_sg(sk, len,
  106. ctx->sg_encrypted_data, 0,
  107. &ctx->sg_encrypted_num_elem,
  108. &ctx->sg_encrypted_size, 0);
  109. if (rc == -ENOSPC)
  110. ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
  111. return rc;
  112. }
  113. static int alloc_plaintext_sg(struct sock *sk, int len)
  114. {
  115. struct tls_context *tls_ctx = tls_get_ctx(sk);
  116. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  117. int rc = 0;
  118. rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
  119. &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
  120. tls_ctx->pending_open_record_frags);
  121. if (rc == -ENOSPC)
  122. ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
  123. return rc;
  124. }
  125. static void free_sg(struct sock *sk, struct scatterlist *sg,
  126. int *sg_num_elem, unsigned int *sg_size)
  127. {
  128. int i, n = *sg_num_elem;
  129. for (i = 0; i < n; ++i) {
  130. sk_mem_uncharge(sk, sg[i].length);
  131. put_page(sg_page(&sg[i]));
  132. }
  133. *sg_num_elem = 0;
  134. *sg_size = 0;
  135. }
  136. static void tls_free_both_sg(struct sock *sk)
  137. {
  138. struct tls_context *tls_ctx = tls_get_ctx(sk);
  139. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  140. free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
  141. &ctx->sg_encrypted_size);
  142. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  143. &ctx->sg_plaintext_size);
  144. }
  145. static int tls_do_encryption(struct tls_context *tls_ctx,
  146. struct tls_sw_context_tx *ctx,
  147. struct aead_request *aead_req,
  148. size_t data_len)
  149. {
  150. int rc;
  151. ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
  152. ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
  153. aead_request_set_tfm(aead_req, ctx->aead_send);
  154. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  155. aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
  156. data_len, tls_ctx->tx.iv);
  157. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  158. crypto_req_done, &ctx->async_wait);
  159. rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
  160. ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
  161. ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
  162. return rc;
  163. }
  164. static int tls_push_record(struct sock *sk, int flags,
  165. unsigned char record_type)
  166. {
  167. struct tls_context *tls_ctx = tls_get_ctx(sk);
  168. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  169. struct aead_request *req;
  170. int rc;
  171. req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
  172. if (!req)
  173. return -ENOMEM;
  174. sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
  175. sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
  176. tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
  177. tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
  178. record_type);
  179. tls_fill_prepend(tls_ctx,
  180. page_address(sg_page(&ctx->sg_encrypted_data[0])) +
  181. ctx->sg_encrypted_data[0].offset,
  182. ctx->sg_plaintext_size, record_type);
  183. tls_ctx->pending_open_record_frags = 0;
  184. set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
  185. rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
  186. if (rc < 0) {
  187. /* If we are called from write_space and
  188. * we fail, we need to set this SOCK_NOSPACE
  189. * to trigger another write_space in the future.
  190. */
  191. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  192. goto out_req;
  193. }
  194. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  195. &ctx->sg_plaintext_size);
  196. ctx->sg_encrypted_num_elem = 0;
  197. ctx->sg_encrypted_size = 0;
  198. /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
  199. rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
  200. if (rc < 0 && rc != -EAGAIN)
  201. tls_err_abort(sk, EBADMSG);
  202. tls_advance_record_sn(sk, &tls_ctx->tx);
  203. out_req:
  204. aead_request_free(req);
  205. return rc;
  206. }
  207. static int tls_sw_push_pending_record(struct sock *sk, int flags)
  208. {
  209. return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
  210. }
  211. static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
  212. int length, int *pages_used,
  213. unsigned int *size_used,
  214. struct scatterlist *to, int to_max_pages,
  215. bool charge)
  216. {
  217. struct page *pages[MAX_SKB_FRAGS];
  218. size_t offset;
  219. ssize_t copied, use;
  220. int i = 0;
  221. unsigned int size = *size_used;
  222. int num_elem = *pages_used;
  223. int rc = 0;
  224. int maxpages;
  225. while (length > 0) {
  226. i = 0;
  227. maxpages = to_max_pages - num_elem;
  228. if (maxpages == 0) {
  229. rc = -EFAULT;
  230. goto out;
  231. }
  232. copied = iov_iter_get_pages(from, pages,
  233. length,
  234. maxpages, &offset);
  235. if (copied <= 0) {
  236. rc = -EFAULT;
  237. goto out;
  238. }
  239. iov_iter_advance(from, copied);
  240. length -= copied;
  241. size += copied;
  242. while (copied) {
  243. use = min_t(int, copied, PAGE_SIZE - offset);
  244. sg_set_page(&to[num_elem],
  245. pages[i], use, offset);
  246. sg_unmark_end(&to[num_elem]);
  247. if (charge)
  248. sk_mem_charge(sk, use);
  249. offset = 0;
  250. copied -= use;
  251. ++i;
  252. ++num_elem;
  253. }
  254. }
  255. /* Mark the end in the last sg entry if newly added */
  256. if (num_elem > *pages_used)
  257. sg_mark_end(&to[num_elem - 1]);
  258. out:
  259. if (rc)
  260. iov_iter_revert(from, size - *size_used);
  261. *size_used = size;
  262. *pages_used = num_elem;
  263. return rc;
  264. }
  265. static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
  266. int bytes)
  267. {
  268. struct tls_context *tls_ctx = tls_get_ctx(sk);
  269. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  270. struct scatterlist *sg = ctx->sg_plaintext_data;
  271. int copy, i, rc = 0;
  272. for (i = tls_ctx->pending_open_record_frags;
  273. i < ctx->sg_plaintext_num_elem; ++i) {
  274. copy = sg[i].length;
  275. if (copy_from_iter(
  276. page_address(sg_page(&sg[i])) + sg[i].offset,
  277. copy, from) != copy) {
  278. rc = -EFAULT;
  279. goto out;
  280. }
  281. bytes -= copy;
  282. ++tls_ctx->pending_open_record_frags;
  283. if (!bytes)
  284. break;
  285. }
  286. out:
  287. return rc;
  288. }
  289. int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  290. {
  291. struct tls_context *tls_ctx = tls_get_ctx(sk);
  292. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  293. int ret;
  294. int required_size;
  295. long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  296. bool eor = !(msg->msg_flags & MSG_MORE);
  297. size_t try_to_copy, copied = 0;
  298. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  299. int record_room;
  300. bool full_record;
  301. int orig_size;
  302. bool is_kvec = msg->msg_iter.type & ITER_KVEC;
  303. if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
  304. return -ENOTSUPP;
  305. lock_sock(sk);
  306. ret = tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo);
  307. if (ret)
  308. goto send_end;
  309. if (unlikely(msg->msg_controllen)) {
  310. ret = tls_proccess_cmsg(sk, msg, &record_type);
  311. if (ret)
  312. goto send_end;
  313. }
  314. while (msg_data_left(msg)) {
  315. if (sk->sk_err) {
  316. ret = -sk->sk_err;
  317. goto send_end;
  318. }
  319. orig_size = ctx->sg_plaintext_size;
  320. full_record = false;
  321. try_to_copy = msg_data_left(msg);
  322. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  323. if (try_to_copy >= record_room) {
  324. try_to_copy = record_room;
  325. full_record = true;
  326. }
  327. required_size = ctx->sg_plaintext_size + try_to_copy +
  328. tls_ctx->tx.overhead_size;
  329. if (!sk_stream_memory_free(sk))
  330. goto wait_for_sndbuf;
  331. alloc_encrypted:
  332. ret = alloc_encrypted_sg(sk, required_size);
  333. if (ret) {
  334. if (ret != -ENOSPC)
  335. goto wait_for_memory;
  336. /* Adjust try_to_copy according to the amount that was
  337. * actually allocated. The difference is due
  338. * to max sg elements limit
  339. */
  340. try_to_copy -= required_size - ctx->sg_encrypted_size;
  341. full_record = true;
  342. }
  343. if (!is_kvec && (full_record || eor)) {
  344. ret = zerocopy_from_iter(sk, &msg->msg_iter,
  345. try_to_copy, &ctx->sg_plaintext_num_elem,
  346. &ctx->sg_plaintext_size,
  347. ctx->sg_plaintext_data,
  348. ARRAY_SIZE(ctx->sg_plaintext_data),
  349. true);
  350. if (ret)
  351. goto fallback_to_reg_send;
  352. copied += try_to_copy;
  353. ret = tls_push_record(sk, msg->msg_flags, record_type);
  354. if (ret)
  355. goto send_end;
  356. continue;
  357. fallback_to_reg_send:
  358. trim_sg(sk, ctx->sg_plaintext_data,
  359. &ctx->sg_plaintext_num_elem,
  360. &ctx->sg_plaintext_size,
  361. orig_size);
  362. }
  363. required_size = ctx->sg_plaintext_size + try_to_copy;
  364. alloc_plaintext:
  365. ret = alloc_plaintext_sg(sk, required_size);
  366. if (ret) {
  367. if (ret != -ENOSPC)
  368. goto wait_for_memory;
  369. /* Adjust try_to_copy according to the amount that was
  370. * actually allocated. The difference is due
  371. * to max sg elements limit
  372. */
  373. try_to_copy -= required_size - ctx->sg_plaintext_size;
  374. full_record = true;
  375. trim_sg(sk, ctx->sg_encrypted_data,
  376. &ctx->sg_encrypted_num_elem,
  377. &ctx->sg_encrypted_size,
  378. ctx->sg_plaintext_size +
  379. tls_ctx->tx.overhead_size);
  380. }
  381. ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
  382. if (ret)
  383. goto trim_sgl;
  384. copied += try_to_copy;
  385. if (full_record || eor) {
  386. push_record:
  387. ret = tls_push_record(sk, msg->msg_flags, record_type);
  388. if (ret) {
  389. if (ret == -ENOMEM)
  390. goto wait_for_memory;
  391. goto send_end;
  392. }
  393. }
  394. continue;
  395. wait_for_sndbuf:
  396. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  397. wait_for_memory:
  398. ret = sk_stream_wait_memory(sk, &timeo);
  399. if (ret) {
  400. trim_sgl:
  401. trim_both_sgl(sk, orig_size);
  402. goto send_end;
  403. }
  404. if (tls_is_pending_closed_record(tls_ctx))
  405. goto push_record;
  406. if (ctx->sg_encrypted_size < required_size)
  407. goto alloc_encrypted;
  408. goto alloc_plaintext;
  409. }
  410. send_end:
  411. ret = sk_stream_error(sk, msg->msg_flags, ret);
  412. release_sock(sk);
  413. return copied ? copied : ret;
  414. }
  415. int tls_sw_sendpage(struct sock *sk, struct page *page,
  416. int offset, size_t size, int flags)
  417. {
  418. struct tls_context *tls_ctx = tls_get_ctx(sk);
  419. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  420. int ret;
  421. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  422. bool eor;
  423. size_t orig_size = size;
  424. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  425. struct scatterlist *sg;
  426. bool full_record;
  427. int record_room;
  428. if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
  429. MSG_SENDPAGE_NOTLAST))
  430. return -ENOTSUPP;
  431. /* No MSG_EOR from splice, only look at MSG_MORE */
  432. eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
  433. lock_sock(sk);
  434. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  435. ret = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
  436. if (ret)
  437. goto sendpage_end;
  438. /* Call the sk_stream functions to manage the sndbuf mem. */
  439. while (size > 0) {
  440. size_t copy, required_size;
  441. if (sk->sk_err) {
  442. ret = -sk->sk_err;
  443. goto sendpage_end;
  444. }
  445. full_record = false;
  446. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  447. copy = size;
  448. if (copy >= record_room) {
  449. copy = record_room;
  450. full_record = true;
  451. }
  452. required_size = ctx->sg_plaintext_size + copy +
  453. tls_ctx->tx.overhead_size;
  454. if (!sk_stream_memory_free(sk))
  455. goto wait_for_sndbuf;
  456. alloc_payload:
  457. ret = alloc_encrypted_sg(sk, required_size);
  458. if (ret) {
  459. if (ret != -ENOSPC)
  460. goto wait_for_memory;
  461. /* Adjust copy according to the amount that was
  462. * actually allocated. The difference is due
  463. * to max sg elements limit
  464. */
  465. copy -= required_size - ctx->sg_plaintext_size;
  466. full_record = true;
  467. }
  468. get_page(page);
  469. sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
  470. sg_set_page(sg, page, copy, offset);
  471. sg_unmark_end(sg);
  472. ctx->sg_plaintext_num_elem++;
  473. sk_mem_charge(sk, copy);
  474. offset += copy;
  475. size -= copy;
  476. ctx->sg_plaintext_size += copy;
  477. tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
  478. if (full_record || eor ||
  479. ctx->sg_plaintext_num_elem ==
  480. ARRAY_SIZE(ctx->sg_plaintext_data)) {
  481. push_record:
  482. ret = tls_push_record(sk, flags, record_type);
  483. if (ret) {
  484. if (ret == -ENOMEM)
  485. goto wait_for_memory;
  486. goto sendpage_end;
  487. }
  488. }
  489. continue;
  490. wait_for_sndbuf:
  491. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  492. wait_for_memory:
  493. ret = sk_stream_wait_memory(sk, &timeo);
  494. if (ret) {
  495. trim_both_sgl(sk, ctx->sg_plaintext_size);
  496. goto sendpage_end;
  497. }
  498. if (tls_is_pending_closed_record(tls_ctx))
  499. goto push_record;
  500. goto alloc_payload;
  501. }
  502. sendpage_end:
  503. if (orig_size > size)
  504. ret = orig_size - size;
  505. else
  506. ret = sk_stream_error(sk, flags, ret);
  507. release_sock(sk);
  508. return ret;
  509. }
  510. static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
  511. long timeo, int *err)
  512. {
  513. struct tls_context *tls_ctx = tls_get_ctx(sk);
  514. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  515. struct sk_buff *skb;
  516. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  517. while (!(skb = ctx->recv_pkt)) {
  518. if (sk->sk_err) {
  519. *err = sock_error(sk);
  520. return NULL;
  521. }
  522. if (!skb_queue_empty(&sk->sk_receive_queue)) {
  523. __strp_unpause(&ctx->strp);
  524. if (ctx->recv_pkt)
  525. return ctx->recv_pkt;
  526. }
  527. if (sk->sk_shutdown & RCV_SHUTDOWN)
  528. return NULL;
  529. if (sock_flag(sk, SOCK_DONE))
  530. return NULL;
  531. if ((flags & MSG_DONTWAIT) || !timeo) {
  532. *err = -EAGAIN;
  533. return NULL;
  534. }
  535. add_wait_queue(sk_sleep(sk), &wait);
  536. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  537. sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
  538. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  539. remove_wait_queue(sk_sleep(sk), &wait);
  540. /* Handle signals */
  541. if (signal_pending(current)) {
  542. *err = sock_intr_errno(timeo);
  543. return NULL;
  544. }
  545. }
  546. return skb;
  547. }
  548. /* This function decrypts the input skb into either out_iov or in out_sg
  549. * or in skb buffers itself. The input parameter 'zc' indicates if
  550. * zero-copy mode needs to be tried or not. With zero-copy mode, either
  551. * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
  552. * NULL, then the decryption happens inside skb buffers itself, i.e.
  553. * zero-copy gets disabled and 'zc' is updated.
  554. */
  555. static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
  556. struct iov_iter *out_iov,
  557. struct scatterlist *out_sg,
  558. int *chunk, bool *zc)
  559. {
  560. struct tls_context *tls_ctx = tls_get_ctx(sk);
  561. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  562. struct strp_msg *rxm = strp_msg(skb);
  563. int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
  564. struct aead_request *aead_req;
  565. struct sk_buff *unused;
  566. u8 *aad, *iv, *mem = NULL;
  567. struct scatterlist *sgin = NULL;
  568. struct scatterlist *sgout = NULL;
  569. const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
  570. if (*zc && (out_iov || out_sg)) {
  571. if (out_iov)
  572. n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
  573. else
  574. n_sgout = sg_nents(out_sg);
  575. } else {
  576. n_sgout = 0;
  577. *zc = false;
  578. }
  579. n_sgin = skb_cow_data(skb, 0, &unused);
  580. if (n_sgin < 1)
  581. return -EBADMSG;
  582. /* Increment to accommodate AAD */
  583. n_sgin = n_sgin + 1;
  584. nsg = n_sgin + n_sgout;
  585. aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
  586. mem_size = aead_size + (nsg * sizeof(struct scatterlist));
  587. mem_size = mem_size + TLS_AAD_SPACE_SIZE;
  588. mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
  589. /* Allocate a single block of memory which contains
  590. * aead_req || sgin[] || sgout[] || aad || iv.
  591. * This order achieves correct alignment for aead_req, sgin, sgout.
  592. */
  593. mem = kmalloc(mem_size, sk->sk_allocation);
  594. if (!mem)
  595. return -ENOMEM;
  596. /* Segment the allocated memory */
  597. aead_req = (struct aead_request *)mem;
  598. sgin = (struct scatterlist *)(mem + aead_size);
  599. sgout = sgin + n_sgin;
  600. aad = (u8 *)(sgout + n_sgout);
  601. iv = aad + TLS_AAD_SPACE_SIZE;
  602. /* Prepare IV */
  603. err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
  604. iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  605. tls_ctx->rx.iv_size);
  606. if (err < 0) {
  607. kfree(mem);
  608. return err;
  609. }
  610. memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  611. /* Prepare AAD */
  612. tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
  613. tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
  614. ctx->control);
  615. /* Prepare sgin */
  616. sg_init_table(sgin, n_sgin);
  617. sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
  618. err = skb_to_sgvec(skb, &sgin[1],
  619. rxm->offset + tls_ctx->rx.prepend_size,
  620. rxm->full_len - tls_ctx->rx.prepend_size);
  621. if (err < 0) {
  622. kfree(mem);
  623. return err;
  624. }
  625. if (n_sgout) {
  626. if (out_iov) {
  627. sg_init_table(sgout, n_sgout);
  628. sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
  629. *chunk = 0;
  630. err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
  631. chunk, &sgout[1],
  632. (n_sgout - 1), false);
  633. if (err < 0)
  634. goto fallback_to_reg_recv;
  635. } else if (out_sg) {
  636. memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
  637. } else {
  638. goto fallback_to_reg_recv;
  639. }
  640. } else {
  641. fallback_to_reg_recv:
  642. sgout = sgin;
  643. pages = 0;
  644. *chunk = 0;
  645. *zc = false;
  646. }
  647. /* Prepare and submit AEAD request */
  648. err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
  649. /* Release the pages in case iov was mapped to pages */
  650. for (; pages > 0; pages--)
  651. put_page(sg_page(&sgout[pages]));
  652. kfree(mem);
  653. return err;
  654. }
  655. static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
  656. struct iov_iter *dest, int *chunk, bool *zc)
  657. {
  658. struct tls_context *tls_ctx = tls_get_ctx(sk);
  659. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  660. struct strp_msg *rxm = strp_msg(skb);
  661. int err = 0;
  662. #ifdef CONFIG_TLS_DEVICE
  663. err = tls_device_decrypted(sk, skb);
  664. if (err < 0)
  665. return err;
  666. #endif
  667. if (!ctx->decrypted) {
  668. err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
  669. if (err < 0)
  670. return err;
  671. } else {
  672. *zc = false;
  673. }
  674. rxm->offset += tls_ctx->rx.prepend_size;
  675. rxm->full_len -= tls_ctx->rx.overhead_size;
  676. tls_advance_record_sn(sk, &tls_ctx->rx);
  677. ctx->decrypted = true;
  678. ctx->saved_data_ready(sk);
  679. return err;
  680. }
  681. int decrypt_skb(struct sock *sk, struct sk_buff *skb,
  682. struct scatterlist *sgout)
  683. {
  684. bool zc = true;
  685. int chunk;
  686. return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
  687. }
  688. static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
  689. unsigned int len)
  690. {
  691. struct tls_context *tls_ctx = tls_get_ctx(sk);
  692. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  693. struct strp_msg *rxm = strp_msg(skb);
  694. if (len < rxm->full_len) {
  695. rxm->offset += len;
  696. rxm->full_len -= len;
  697. return false;
  698. }
  699. /* Finished with message */
  700. ctx->recv_pkt = NULL;
  701. kfree_skb(skb);
  702. __strp_unpause(&ctx->strp);
  703. return true;
  704. }
  705. int tls_sw_recvmsg(struct sock *sk,
  706. struct msghdr *msg,
  707. size_t len,
  708. int nonblock,
  709. int flags,
  710. int *addr_len)
  711. {
  712. struct tls_context *tls_ctx = tls_get_ctx(sk);
  713. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  714. unsigned char control;
  715. struct strp_msg *rxm;
  716. struct sk_buff *skb;
  717. ssize_t copied = 0;
  718. bool cmsg = false;
  719. int target, err = 0;
  720. long timeo;
  721. bool is_kvec = msg->msg_iter.type & ITER_KVEC;
  722. flags |= nonblock;
  723. if (unlikely(flags & MSG_ERRQUEUE))
  724. return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
  725. lock_sock(sk);
  726. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  727. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  728. do {
  729. bool zc = false;
  730. int chunk = 0;
  731. skb = tls_wait_data(sk, flags, timeo, &err);
  732. if (!skb)
  733. goto recv_end;
  734. rxm = strp_msg(skb);
  735. if (!cmsg) {
  736. int cerr;
  737. cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
  738. sizeof(ctx->control), &ctx->control);
  739. cmsg = true;
  740. control = ctx->control;
  741. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  742. if (cerr || msg->msg_flags & MSG_CTRUNC) {
  743. err = -EIO;
  744. goto recv_end;
  745. }
  746. }
  747. } else if (control != ctx->control) {
  748. goto recv_end;
  749. }
  750. if (!ctx->decrypted) {
  751. int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
  752. if (!is_kvec && to_copy <= len &&
  753. likely(!(flags & MSG_PEEK)))
  754. zc = true;
  755. err = decrypt_skb_update(sk, skb, &msg->msg_iter,
  756. &chunk, &zc);
  757. if (err < 0) {
  758. tls_err_abort(sk, EBADMSG);
  759. goto recv_end;
  760. }
  761. ctx->decrypted = true;
  762. }
  763. if (!zc) {
  764. chunk = min_t(unsigned int, rxm->full_len, len);
  765. err = skb_copy_datagram_msg(skb, rxm->offset, msg,
  766. chunk);
  767. if (err < 0)
  768. goto recv_end;
  769. }
  770. copied += chunk;
  771. len -= chunk;
  772. if (likely(!(flags & MSG_PEEK))) {
  773. u8 control = ctx->control;
  774. if (tls_sw_advance_skb(sk, skb, chunk)) {
  775. /* Return full control message to
  776. * userspace before trying to parse
  777. * another message type
  778. */
  779. msg->msg_flags |= MSG_EOR;
  780. if (control != TLS_RECORD_TYPE_DATA)
  781. goto recv_end;
  782. }
  783. } else {
  784. /* MSG_PEEK right now cannot look beyond current skb
  785. * from strparser, meaning we cannot advance skb here
  786. * and thus unpause strparser since we'd loose original
  787. * one.
  788. */
  789. break;
  790. }
  791. /* If we have a new message from strparser, continue now. */
  792. if (copied >= target && !ctx->recv_pkt)
  793. break;
  794. } while (len);
  795. recv_end:
  796. release_sock(sk);
  797. return copied ? : err;
  798. }
  799. ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
  800. struct pipe_inode_info *pipe,
  801. size_t len, unsigned int flags)
  802. {
  803. struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
  804. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  805. struct strp_msg *rxm = NULL;
  806. struct sock *sk = sock->sk;
  807. struct sk_buff *skb;
  808. ssize_t copied = 0;
  809. int err = 0;
  810. long timeo;
  811. int chunk;
  812. bool zc = false;
  813. lock_sock(sk);
  814. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  815. skb = tls_wait_data(sk, flags, timeo, &err);
  816. if (!skb)
  817. goto splice_read_end;
  818. /* splice does not support reading control messages */
  819. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  820. err = -ENOTSUPP;
  821. goto splice_read_end;
  822. }
  823. if (!ctx->decrypted) {
  824. err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
  825. if (err < 0) {
  826. tls_err_abort(sk, EBADMSG);
  827. goto splice_read_end;
  828. }
  829. ctx->decrypted = true;
  830. }
  831. rxm = strp_msg(skb);
  832. chunk = min_t(unsigned int, rxm->full_len, len);
  833. copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
  834. if (copied < 0)
  835. goto splice_read_end;
  836. if (likely(!(flags & MSG_PEEK)))
  837. tls_sw_advance_skb(sk, skb, copied);
  838. splice_read_end:
  839. release_sock(sk);
  840. return copied ? : err;
  841. }
  842. unsigned int tls_sw_poll(struct file *file, struct socket *sock,
  843. struct poll_table_struct *wait)
  844. {
  845. unsigned int ret;
  846. struct sock *sk = sock->sk;
  847. struct tls_context *tls_ctx = tls_get_ctx(sk);
  848. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  849. /* Grab POLLOUT and POLLHUP from the underlying socket */
  850. ret = ctx->sk_poll(file, sock, wait);
  851. /* Clear POLLIN bits, and set based on recv_pkt */
  852. ret &= ~(POLLIN | POLLRDNORM);
  853. if (ctx->recv_pkt)
  854. ret |= POLLIN | POLLRDNORM;
  855. return ret;
  856. }
  857. static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
  858. {
  859. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  860. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  861. char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
  862. struct strp_msg *rxm = strp_msg(skb);
  863. size_t cipher_overhead;
  864. size_t data_len = 0;
  865. int ret;
  866. /* Verify that we have a full TLS header, or wait for more data */
  867. if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
  868. return 0;
  869. /* Sanity-check size of on-stack buffer. */
  870. if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
  871. ret = -EINVAL;
  872. goto read_failure;
  873. }
  874. /* Linearize header to local buffer */
  875. ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
  876. if (ret < 0)
  877. goto read_failure;
  878. ctx->control = header[0];
  879. data_len = ((header[4] & 0xFF) | (header[3] << 8));
  880. cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
  881. if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
  882. ret = -EMSGSIZE;
  883. goto read_failure;
  884. }
  885. if (data_len < cipher_overhead) {
  886. ret = -EBADMSG;
  887. goto read_failure;
  888. }
  889. if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
  890. header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
  891. ret = -EINVAL;
  892. goto read_failure;
  893. }
  894. #ifdef CONFIG_TLS_DEVICE
  895. handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
  896. *(u64*)tls_ctx->rx.rec_seq);
  897. #endif
  898. return data_len + TLS_HEADER_SIZE;
  899. read_failure:
  900. tls_err_abort(strp->sk, ret);
  901. return ret;
  902. }
  903. static void tls_queue(struct strparser *strp, struct sk_buff *skb)
  904. {
  905. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  906. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  907. ctx->decrypted = false;
  908. ctx->recv_pkt = skb;
  909. strp_pause(strp);
  910. ctx->saved_data_ready(strp->sk);
  911. }
  912. static void tls_data_ready(struct sock *sk)
  913. {
  914. struct tls_context *tls_ctx = tls_get_ctx(sk);
  915. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  916. strp_data_ready(&ctx->strp);
  917. }
  918. void tls_sw_free_resources_tx(struct sock *sk)
  919. {
  920. struct tls_context *tls_ctx = tls_get_ctx(sk);
  921. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  922. crypto_free_aead(ctx->aead_send);
  923. tls_free_both_sg(sk);
  924. kfree(ctx);
  925. }
  926. void tls_sw_release_resources_rx(struct sock *sk)
  927. {
  928. struct tls_context *tls_ctx = tls_get_ctx(sk);
  929. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  930. kfree(tls_ctx->rx.rec_seq);
  931. kfree(tls_ctx->rx.iv);
  932. if (ctx->aead_recv) {
  933. kfree_skb(ctx->recv_pkt);
  934. ctx->recv_pkt = NULL;
  935. crypto_free_aead(ctx->aead_recv);
  936. strp_stop(&ctx->strp);
  937. write_lock_bh(&sk->sk_callback_lock);
  938. sk->sk_data_ready = ctx->saved_data_ready;
  939. write_unlock_bh(&sk->sk_callback_lock);
  940. release_sock(sk);
  941. strp_done(&ctx->strp);
  942. lock_sock(sk);
  943. }
  944. }
  945. void tls_sw_free_resources_rx(struct sock *sk)
  946. {
  947. struct tls_context *tls_ctx = tls_get_ctx(sk);
  948. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  949. tls_sw_release_resources_rx(sk);
  950. kfree(ctx);
  951. }
  952. int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
  953. {
  954. struct tls_crypto_info *crypto_info;
  955. struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  956. struct tls_sw_context_tx *sw_ctx_tx = NULL;
  957. struct tls_sw_context_rx *sw_ctx_rx = NULL;
  958. struct cipher_context *cctx;
  959. struct crypto_aead **aead;
  960. struct strp_callbacks cb;
  961. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  962. char *iv, *rec_seq;
  963. int rc = 0;
  964. if (!ctx) {
  965. rc = -EINVAL;
  966. goto out;
  967. }
  968. if (tx) {
  969. if (!ctx->priv_ctx_tx) {
  970. sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
  971. if (!sw_ctx_tx) {
  972. rc = -ENOMEM;
  973. goto out;
  974. }
  975. ctx->priv_ctx_tx = sw_ctx_tx;
  976. } else {
  977. sw_ctx_tx =
  978. (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
  979. }
  980. } else {
  981. if (!ctx->priv_ctx_rx) {
  982. sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
  983. if (!sw_ctx_rx) {
  984. rc = -ENOMEM;
  985. goto out;
  986. }
  987. ctx->priv_ctx_rx = sw_ctx_rx;
  988. } else {
  989. sw_ctx_rx =
  990. (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
  991. }
  992. }
  993. if (tx) {
  994. crypto_init_wait(&sw_ctx_tx->async_wait);
  995. crypto_info = &ctx->crypto_send.info;
  996. cctx = &ctx->tx;
  997. aead = &sw_ctx_tx->aead_send;
  998. } else {
  999. crypto_init_wait(&sw_ctx_rx->async_wait);
  1000. crypto_info = &ctx->crypto_recv.info;
  1001. cctx = &ctx->rx;
  1002. aead = &sw_ctx_rx->aead_recv;
  1003. }
  1004. switch (crypto_info->cipher_type) {
  1005. case TLS_CIPHER_AES_GCM_128: {
  1006. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  1007. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  1008. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  1009. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  1010. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  1011. rec_seq =
  1012. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  1013. gcm_128_info =
  1014. (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  1015. break;
  1016. }
  1017. default:
  1018. rc = -EINVAL;
  1019. goto free_priv;
  1020. }
  1021. /* Sanity-check the IV size for stack allocations. */
  1022. if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
  1023. rc = -EINVAL;
  1024. goto free_priv;
  1025. }
  1026. cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
  1027. cctx->tag_size = tag_size;
  1028. cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
  1029. cctx->iv_size = iv_size;
  1030. cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  1031. GFP_KERNEL);
  1032. if (!cctx->iv) {
  1033. rc = -ENOMEM;
  1034. goto free_priv;
  1035. }
  1036. memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  1037. memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  1038. cctx->rec_seq_size = rec_seq_size;
  1039. cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
  1040. if (!cctx->rec_seq) {
  1041. rc = -ENOMEM;
  1042. goto free_iv;
  1043. }
  1044. if (sw_ctx_tx) {
  1045. sg_init_table(sw_ctx_tx->sg_encrypted_data,
  1046. ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
  1047. sg_init_table(sw_ctx_tx->sg_plaintext_data,
  1048. ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
  1049. sg_init_table(sw_ctx_tx->sg_aead_in, 2);
  1050. sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
  1051. sizeof(sw_ctx_tx->aad_space));
  1052. sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
  1053. sg_chain(sw_ctx_tx->sg_aead_in, 2,
  1054. sw_ctx_tx->sg_plaintext_data);
  1055. sg_init_table(sw_ctx_tx->sg_aead_out, 2);
  1056. sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
  1057. sizeof(sw_ctx_tx->aad_space));
  1058. sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
  1059. sg_chain(sw_ctx_tx->sg_aead_out, 2,
  1060. sw_ctx_tx->sg_encrypted_data);
  1061. }
  1062. if (!*aead) {
  1063. *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
  1064. if (IS_ERR(*aead)) {
  1065. rc = PTR_ERR(*aead);
  1066. *aead = NULL;
  1067. goto free_rec_seq;
  1068. }
  1069. }
  1070. ctx->push_pending_record = tls_sw_push_pending_record;
  1071. rc = crypto_aead_setkey(*aead, gcm_128_info->key,
  1072. TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  1073. if (rc)
  1074. goto free_aead;
  1075. rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
  1076. if (rc)
  1077. goto free_aead;
  1078. if (sw_ctx_rx) {
  1079. /* Set up strparser */
  1080. memset(&cb, 0, sizeof(cb));
  1081. cb.rcv_msg = tls_queue;
  1082. cb.parse_msg = tls_read_size;
  1083. strp_init(&sw_ctx_rx->strp, sk, &cb);
  1084. write_lock_bh(&sk->sk_callback_lock);
  1085. sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
  1086. sk->sk_data_ready = tls_data_ready;
  1087. write_unlock_bh(&sk->sk_callback_lock);
  1088. sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
  1089. strp_check_rcv(&sw_ctx_rx->strp);
  1090. }
  1091. goto out;
  1092. free_aead:
  1093. crypto_free_aead(*aead);
  1094. *aead = NULL;
  1095. free_rec_seq:
  1096. kfree(cctx->rec_seq);
  1097. cctx->rec_seq = NULL;
  1098. free_iv:
  1099. kfree(cctx->iv);
  1100. cctx->iv = NULL;
  1101. free_priv:
  1102. if (tx) {
  1103. kfree(ctx->priv_ctx_tx);
  1104. ctx->priv_ctx_tx = NULL;
  1105. } else {
  1106. kfree(ctx->priv_ctx_rx);
  1107. ctx->priv_ctx_rx = NULL;
  1108. }
  1109. out:
  1110. return rc;
  1111. }