sendmsg.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* AF_RXRPC sendmsg() implementation.
  3. *
  4. * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/net.h>
  9. #include <linux/gfp.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/export.h>
  12. #include <linux/sched/signal.h>
  13. #include <net/sock.h>
  14. #include <net/af_rxrpc.h>
  15. #include "ar-internal.h"
  16. /*
  17. * Propose an abort to be made in the I/O thread.
  18. */
  19. bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
  20. enum rxrpc_abort_reason why)
  21. {
  22. _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
  23. if (!call->send_abort && !rxrpc_call_is_complete(call)) {
  24. call->send_abort_why = why;
  25. call->send_abort_err = error;
  26. call->send_abort_seq = 0;
  27. /* Request abort locklessly vs rxrpc_input_call_event(). */
  28. smp_store_release(&call->send_abort, abort_code);
  29. rxrpc_poke_call(call, rxrpc_call_poke_abort);
  30. return true;
  31. }
  32. return false;
  33. }
  34. /*
  35. * Wait for a call to become connected. Interruption here doesn't cause the
  36. * call to be aborted.
  37. */
  38. static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
  39. {
  40. DECLARE_WAITQUEUE(myself, current);
  41. int ret = 0;
  42. _enter("%d", call->debug_id);
  43. if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
  44. goto no_wait;
  45. add_wait_queue_exclusive(&call->waitq, &myself);
  46. for (;;) {
  47. switch (call->interruptibility) {
  48. case RXRPC_INTERRUPTIBLE:
  49. case RXRPC_PREINTERRUPTIBLE:
  50. set_current_state(TASK_INTERRUPTIBLE);
  51. break;
  52. case RXRPC_UNINTERRUPTIBLE:
  53. default:
  54. set_current_state(TASK_UNINTERRUPTIBLE);
  55. break;
  56. }
  57. if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
  58. break;
  59. if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
  60. call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
  61. signal_pending(current)) {
  62. ret = sock_intr_errno(*timeo);
  63. break;
  64. }
  65. *timeo = schedule_timeout(*timeo);
  66. }
  67. remove_wait_queue(&call->waitq, &myself);
  68. __set_current_state(TASK_RUNNING);
  69. no_wait:
  70. if (ret == 0 && rxrpc_call_is_complete(call))
  71. ret = call->error;
  72. _leave(" = %d", ret);
  73. return ret;
  74. }
  75. /*
  76. * Return true if there's sufficient Tx queue space.
  77. */
  78. static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
  79. {
  80. if (_tx_win)
  81. *_tx_win = call->tx_bottom;
  82. return call->tx_prepared - call->tx_bottom < 256;
  83. }
  84. /*
  85. * Wait for space to appear in the Tx queue or a signal to occur.
  86. */
  87. static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
  88. struct rxrpc_call *call,
  89. long *timeo)
  90. {
  91. for (;;) {
  92. set_current_state(TASK_INTERRUPTIBLE);
  93. if (rxrpc_check_tx_space(call, NULL))
  94. return 0;
  95. if (rxrpc_call_is_complete(call))
  96. return call->error;
  97. if (signal_pending(current))
  98. return sock_intr_errno(*timeo);
  99. trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
  100. *timeo = schedule_timeout(*timeo);
  101. }
  102. }
  103. /*
  104. * Wait for space to appear in the Tx queue uninterruptibly, but with
  105. * a timeout of 2*RTT if no progress was made and a signal occurred.
  106. */
  107. static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
  108. struct rxrpc_call *call)
  109. {
  110. rxrpc_seq_t tx_start, tx_win;
  111. signed long rtt, timeout;
  112. rtt = READ_ONCE(call->peer->srtt_us) >> 3;
  113. rtt = usecs_to_jiffies(rtt) * 2;
  114. if (rtt < 2)
  115. rtt = 2;
  116. timeout = rtt;
  117. tx_start = smp_load_acquire(&call->acks_hard_ack);
  118. for (;;) {
  119. set_current_state(TASK_UNINTERRUPTIBLE);
  120. if (rxrpc_check_tx_space(call, &tx_win))
  121. return 0;
  122. if (rxrpc_call_is_complete(call))
  123. return call->error;
  124. if (timeout == 0 &&
  125. tx_win == tx_start && signal_pending(current))
  126. return -EINTR;
  127. if (tx_win != tx_start) {
  128. timeout = rtt;
  129. tx_start = tx_win;
  130. }
  131. trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
  132. timeout = schedule_timeout(timeout);
  133. }
  134. }
  135. /*
  136. * Wait for space to appear in the Tx queue uninterruptibly.
  137. */
  138. static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
  139. struct rxrpc_call *call,
  140. long *timeo)
  141. {
  142. for (;;) {
  143. set_current_state(TASK_UNINTERRUPTIBLE);
  144. if (rxrpc_check_tx_space(call, NULL))
  145. return 0;
  146. if (rxrpc_call_is_complete(call))
  147. return call->error;
  148. trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
  149. *timeo = schedule_timeout(*timeo);
  150. }
  151. }
  152. /*
  153. * wait for space to appear in the transmit/ACK window
  154. * - caller holds the socket locked
  155. */
  156. static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
  157. struct rxrpc_call *call,
  158. long *timeo,
  159. bool waitall)
  160. {
  161. DECLARE_WAITQUEUE(myself, current);
  162. int ret;
  163. _enter(",{%u,%u,%u,%u}",
  164. call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
  165. add_wait_queue(&call->waitq, &myself);
  166. switch (call->interruptibility) {
  167. case RXRPC_INTERRUPTIBLE:
  168. if (waitall)
  169. ret = rxrpc_wait_for_tx_window_waitall(rx, call);
  170. else
  171. ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
  172. break;
  173. case RXRPC_PREINTERRUPTIBLE:
  174. case RXRPC_UNINTERRUPTIBLE:
  175. default:
  176. ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
  177. break;
  178. }
  179. remove_wait_queue(&call->waitq, &myself);
  180. set_current_state(TASK_RUNNING);
  181. _leave(" = %d", ret);
  182. return ret;
  183. }
  184. /*
  185. * Notify the owner of the call that the transmit phase is ended and the last
  186. * packet has been queued.
  187. */
  188. static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
  189. rxrpc_notify_end_tx_t notify_end_tx)
  190. {
  191. if (notify_end_tx)
  192. notify_end_tx(&rx->sk, call, call->user_call_ID);
  193. }
  194. /*
  195. * Queue a DATA packet for transmission, set the resend timeout and send
  196. * the packet immediately. Returns the error from rxrpc_send_data_packet()
  197. * in case the caller wants to do something with it.
  198. */
  199. static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
  200. struct rxrpc_txbuf *txb,
  201. rxrpc_notify_end_tx_t notify_end_tx)
  202. {
  203. rxrpc_seq_t seq = txb->seq;
  204. bool poke, last = txb->flags & RXRPC_LAST_PACKET;
  205. rxrpc_inc_stat(call->rxnet, stat_tx_data);
  206. ASSERTCMP(txb->seq, ==, call->tx_prepared + 1);
  207. /* We have to set the timestamp before queueing as the retransmit
  208. * algorithm can see the packet as soon as we queue it.
  209. */
  210. txb->last_sent = ktime_get_real();
  211. if (last)
  212. trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last);
  213. else
  214. trace_rxrpc_txqueue(call, rxrpc_txqueue_queue);
  215. /* Add the packet to the call's output buffer */
  216. spin_lock(&call->tx_lock);
  217. poke = list_empty(&call->tx_sendmsg);
  218. list_add_tail(&txb->call_link, &call->tx_sendmsg);
  219. call->tx_prepared = seq;
  220. if (last)
  221. rxrpc_notify_end_tx(rx, call, notify_end_tx);
  222. spin_unlock(&call->tx_lock);
  223. if (poke)
  224. rxrpc_poke_call(call, rxrpc_call_poke_start);
  225. }
  226. /*
  227. * send data through a socket
  228. * - must be called in process context
  229. * - The caller holds the call user access mutex, but not the socket lock.
  230. */
  231. static int rxrpc_send_data(struct rxrpc_sock *rx,
  232. struct rxrpc_call *call,
  233. struct msghdr *msg, size_t len,
  234. rxrpc_notify_end_tx_t notify_end_tx,
  235. bool *_dropped_lock)
  236. {
  237. struct rxrpc_txbuf *txb;
  238. struct sock *sk = &rx->sk;
  239. enum rxrpc_call_state state;
  240. long timeo;
  241. bool more = msg->msg_flags & MSG_MORE;
  242. int ret, copied = 0;
  243. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  244. ret = rxrpc_wait_to_be_connected(call, &timeo);
  245. if (ret < 0)
  246. return ret;
  247. if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
  248. ret = rxrpc_init_client_conn_security(call->conn);
  249. if (ret < 0)
  250. return ret;
  251. }
  252. /* this should be in poll */
  253. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  254. reload:
  255. txb = call->tx_pending;
  256. call->tx_pending = NULL;
  257. if (txb)
  258. rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
  259. ret = -EPIPE;
  260. if (sk->sk_shutdown & SEND_SHUTDOWN)
  261. goto maybe_error;
  262. state = rxrpc_call_state(call);
  263. ret = -ESHUTDOWN;
  264. if (state >= RXRPC_CALL_COMPLETE)
  265. goto maybe_error;
  266. ret = -EPROTO;
  267. if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  268. state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  269. state != RXRPC_CALL_SERVER_SEND_REPLY) {
  270. /* Request phase complete for this client call */
  271. trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
  272. call->cid, call->call_id, call->rx_consumed,
  273. 0, -EPROTO);
  274. goto maybe_error;
  275. }
  276. ret = -EMSGSIZE;
  277. if (call->tx_total_len != -1) {
  278. if (len - copied > call->tx_total_len)
  279. goto maybe_error;
  280. if (!more && len - copied != call->tx_total_len)
  281. goto maybe_error;
  282. }
  283. do {
  284. if (!txb) {
  285. size_t remain;
  286. _debug("alloc");
  287. if (!rxrpc_check_tx_space(call, NULL))
  288. goto wait_for_space;
  289. /* Work out the maximum size of a packet. Assume that
  290. * the security header is going to be in the padded
  291. * region (enc blocksize), but the trailer is not.
  292. */
  293. remain = more ? INT_MAX : msg_data_left(msg);
  294. txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
  295. if (!txb) {
  296. ret = -ENOMEM;
  297. goto maybe_error;
  298. }
  299. }
  300. _debug("append");
  301. /* append next segment of data to the current buffer */
  302. if (msg_data_left(msg) > 0) {
  303. size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
  304. _debug("add %zu", copy);
  305. if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
  306. copy, &msg->msg_iter))
  307. goto efault;
  308. _debug("added");
  309. txb->space -= copy;
  310. txb->len += copy;
  311. txb->offset += copy;
  312. copied += copy;
  313. if (call->tx_total_len != -1)
  314. call->tx_total_len -= copy;
  315. }
  316. /* check for the far side aborting the call or a network error
  317. * occurring */
  318. if (rxrpc_call_is_complete(call))
  319. goto call_terminated;
  320. /* add the packet to the send queue if it's now full */
  321. if (!txb->space ||
  322. (msg_data_left(msg) == 0 && !more)) {
  323. if (msg_data_left(msg) == 0 && !more)
  324. txb->flags |= RXRPC_LAST_PACKET;
  325. else if (call->tx_top - call->acks_hard_ack <
  326. call->tx_winsize)
  327. txb->flags |= RXRPC_MORE_PACKETS;
  328. ret = call->security->secure_packet(call, txb);
  329. if (ret < 0)
  330. goto out;
  331. txb->kvec[0].iov_len += txb->len;
  332. txb->len = txb->kvec[0].iov_len;
  333. rxrpc_queue_packet(rx, call, txb, notify_end_tx);
  334. txb = NULL;
  335. }
  336. } while (msg_data_left(msg) > 0);
  337. success:
  338. ret = copied;
  339. if (rxrpc_call_is_complete(call) &&
  340. call->error < 0)
  341. ret = call->error;
  342. out:
  343. call->tx_pending = txb;
  344. _leave(" = %d", ret);
  345. return ret;
  346. call_terminated:
  347. rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted);
  348. _leave(" = %d", call->error);
  349. return call->error;
  350. maybe_error:
  351. if (copied)
  352. goto success;
  353. goto out;
  354. efault:
  355. ret = -EFAULT;
  356. goto out;
  357. wait_for_space:
  358. ret = -EAGAIN;
  359. if (msg->msg_flags & MSG_DONTWAIT)
  360. goto maybe_error;
  361. mutex_unlock(&call->user_mutex);
  362. *_dropped_lock = true;
  363. ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
  364. msg->msg_flags & MSG_WAITALL);
  365. if (ret < 0)
  366. goto maybe_error;
  367. if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
  368. if (mutex_lock_interruptible(&call->user_mutex) < 0) {
  369. ret = sock_intr_errno(timeo);
  370. goto maybe_error;
  371. }
  372. } else {
  373. mutex_lock(&call->user_mutex);
  374. }
  375. *_dropped_lock = false;
  376. goto reload;
  377. }
  378. /*
  379. * extract control messages from the sendmsg() control buffer
  380. */
  381. static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
  382. {
  383. struct cmsghdr *cmsg;
  384. bool got_user_ID = false;
  385. int len;
  386. if (msg->msg_controllen == 0)
  387. return -EINVAL;
  388. for_each_cmsghdr(cmsg, msg) {
  389. if (!CMSG_OK(msg, cmsg))
  390. return -EINVAL;
  391. len = cmsg->cmsg_len - sizeof(struct cmsghdr);
  392. _debug("CMSG %d, %d, %d",
  393. cmsg->cmsg_level, cmsg->cmsg_type, len);
  394. if (cmsg->cmsg_level != SOL_RXRPC)
  395. continue;
  396. switch (cmsg->cmsg_type) {
  397. case RXRPC_USER_CALL_ID:
  398. if (msg->msg_flags & MSG_CMSG_COMPAT) {
  399. if (len != sizeof(u32))
  400. return -EINVAL;
  401. p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
  402. } else {
  403. if (len != sizeof(unsigned long))
  404. return -EINVAL;
  405. p->call.user_call_ID = *(unsigned long *)
  406. CMSG_DATA(cmsg);
  407. }
  408. got_user_ID = true;
  409. break;
  410. case RXRPC_ABORT:
  411. if (p->command != RXRPC_CMD_SEND_DATA)
  412. return -EINVAL;
  413. p->command = RXRPC_CMD_SEND_ABORT;
  414. if (len != sizeof(p->abort_code))
  415. return -EINVAL;
  416. p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
  417. if (p->abort_code == 0)
  418. return -EINVAL;
  419. break;
  420. case RXRPC_CHARGE_ACCEPT:
  421. if (p->command != RXRPC_CMD_SEND_DATA)
  422. return -EINVAL;
  423. p->command = RXRPC_CMD_CHARGE_ACCEPT;
  424. if (len != 0)
  425. return -EINVAL;
  426. break;
  427. case RXRPC_EXCLUSIVE_CALL:
  428. p->exclusive = true;
  429. if (len != 0)
  430. return -EINVAL;
  431. break;
  432. case RXRPC_UPGRADE_SERVICE:
  433. p->upgrade = true;
  434. if (len != 0)
  435. return -EINVAL;
  436. break;
  437. case RXRPC_TX_LENGTH:
  438. if (p->call.tx_total_len != -1 || len != sizeof(__s64))
  439. return -EINVAL;
  440. p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
  441. if (p->call.tx_total_len < 0)
  442. return -EINVAL;
  443. break;
  444. case RXRPC_SET_CALL_TIMEOUT:
  445. if (len & 3 || len < 4 || len > 12)
  446. return -EINVAL;
  447. memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
  448. p->call.nr_timeouts = len / 4;
  449. if (p->call.timeouts.hard > INT_MAX / HZ)
  450. return -ERANGE;
  451. if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
  452. return -ERANGE;
  453. if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
  454. return -ERANGE;
  455. break;
  456. default:
  457. return -EINVAL;
  458. }
  459. }
  460. if (!got_user_ID)
  461. return -EINVAL;
  462. if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
  463. return -EINVAL;
  464. _leave(" = 0");
  465. return 0;
  466. }
  467. /*
  468. * Create a new client call for sendmsg().
  469. * - Called with the socket lock held, which it must release.
  470. * - If it returns a call, the call's lock will need releasing by the caller.
  471. */
  472. static struct rxrpc_call *
  473. rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
  474. struct rxrpc_send_params *p)
  475. __releases(&rx->sk.sk_lock.slock)
  476. __acquires(&call->user_mutex)
  477. {
  478. struct rxrpc_conn_parameters cp;
  479. struct rxrpc_peer *peer;
  480. struct rxrpc_call *call;
  481. struct key *key;
  482. DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
  483. _enter("");
  484. if (!msg->msg_name) {
  485. release_sock(&rx->sk);
  486. return ERR_PTR(-EDESTADDRREQ);
  487. }
  488. peer = rxrpc_lookup_peer(rx->local, srx, GFP_KERNEL);
  489. if (!peer) {
  490. release_sock(&rx->sk);
  491. return ERR_PTR(-ENOMEM);
  492. }
  493. key = rx->key;
  494. if (key && !rx->key->payload.data[0])
  495. key = NULL;
  496. memset(&cp, 0, sizeof(cp));
  497. cp.local = rx->local;
  498. cp.peer = peer;
  499. cp.key = rx->key;
  500. cp.security_level = rx->min_sec_level;
  501. cp.exclusive = rx->exclusive | p->exclusive;
  502. cp.upgrade = p->upgrade;
  503. cp.service_id = srx->srx_service;
  504. call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL,
  505. atomic_inc_return(&rxrpc_debug_id));
  506. /* The socket is now unlocked */
  507. rxrpc_put_peer(peer, rxrpc_peer_put_application);
  508. _leave(" = %p\n", call);
  509. return call;
  510. }
  511. /*
  512. * send a message forming part of a client call through an RxRPC socket
  513. * - caller holds the socket locked
  514. * - the socket may be either a client socket or a server socket
  515. */
  516. int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
  517. __releases(&rx->sk.sk_lock.slock)
  518. {
  519. struct rxrpc_call *call;
  520. bool dropped_lock = false;
  521. int ret;
  522. struct rxrpc_send_params p = {
  523. .call.tx_total_len = -1,
  524. .call.user_call_ID = 0,
  525. .call.nr_timeouts = 0,
  526. .call.interruptibility = RXRPC_INTERRUPTIBLE,
  527. .abort_code = 0,
  528. .command = RXRPC_CMD_SEND_DATA,
  529. .exclusive = false,
  530. .upgrade = false,
  531. };
  532. _enter("");
  533. ret = rxrpc_sendmsg_cmsg(msg, &p);
  534. if (ret < 0)
  535. goto error_release_sock;
  536. if (p.command == RXRPC_CMD_CHARGE_ACCEPT) {
  537. ret = -EINVAL;
  538. if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
  539. goto error_release_sock;
  540. ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
  541. goto error_release_sock;
  542. }
  543. call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
  544. if (!call) {
  545. ret = -EBADSLT;
  546. if (p.command != RXRPC_CMD_SEND_DATA)
  547. goto error_release_sock;
  548. call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
  549. /* The socket is now unlocked... */
  550. if (IS_ERR(call))
  551. return PTR_ERR(call);
  552. /* ... and we have the call lock. */
  553. p.call.nr_timeouts = 0;
  554. ret = 0;
  555. if (rxrpc_call_is_complete(call))
  556. goto out_put_unlock;
  557. } else {
  558. switch (rxrpc_call_state(call)) {
  559. case RXRPC_CALL_CLIENT_AWAIT_CONN:
  560. case RXRPC_CALL_SERVER_RECV_REQUEST:
  561. if (p.command == RXRPC_CMD_SEND_ABORT)
  562. break;
  563. fallthrough;
  564. case RXRPC_CALL_UNINITIALISED:
  565. case RXRPC_CALL_SERVER_PREALLOC:
  566. rxrpc_put_call(call, rxrpc_call_put_sendmsg);
  567. ret = -EBUSY;
  568. goto error_release_sock;
  569. default:
  570. break;
  571. }
  572. ret = mutex_lock_interruptible(&call->user_mutex);
  573. release_sock(&rx->sk);
  574. if (ret < 0) {
  575. ret = -ERESTARTSYS;
  576. goto error_put;
  577. }
  578. if (p.call.tx_total_len != -1) {
  579. ret = -EINVAL;
  580. if (call->tx_total_len != -1 ||
  581. call->tx_pending ||
  582. call->tx_top != 0)
  583. goto out_put_unlock;
  584. call->tx_total_len = p.call.tx_total_len;
  585. }
  586. }
  587. switch (p.call.nr_timeouts) {
  588. case 3:
  589. WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
  590. fallthrough;
  591. case 2:
  592. WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
  593. fallthrough;
  594. case 1:
  595. if (p.call.timeouts.hard > 0) {
  596. ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
  597. WRITE_ONCE(call->expect_term_by,
  598. ktime_add(p.call.timeouts.hard,
  599. ktime_get_real()));
  600. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
  601. rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
  602. }
  603. break;
  604. }
  605. if (rxrpc_call_is_complete(call)) {
  606. /* it's too late for this call */
  607. ret = -ESHUTDOWN;
  608. } else if (p.command == RXRPC_CMD_SEND_ABORT) {
  609. rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
  610. rxrpc_abort_call_sendmsg);
  611. ret = 0;
  612. } else if (p.command != RXRPC_CMD_SEND_DATA) {
  613. ret = -EINVAL;
  614. } else {
  615. ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
  616. }
  617. out_put_unlock:
  618. if (!dropped_lock)
  619. mutex_unlock(&call->user_mutex);
  620. error_put:
  621. rxrpc_put_call(call, rxrpc_call_put_sendmsg);
  622. _leave(" = %d", ret);
  623. return ret;
  624. error_release_sock:
  625. release_sock(&rx->sk);
  626. return ret;
  627. }
  628. /**
  629. * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
  630. * @sock: The socket the call is on
  631. * @call: The call to send data through
  632. * @msg: The data to send
  633. * @len: The amount of data to send
  634. * @notify_end_tx: Notification that the last packet is queued.
  635. *
  636. * Allow a kernel service to send data on a call. The call must be in an state
  637. * appropriate to sending data. No control data should be supplied in @msg,
  638. * nor should an address be supplied. MSG_MORE should be flagged if there's
  639. * more data to come, otherwise this data will end the transmission phase.
  640. */
  641. int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
  642. struct msghdr *msg, size_t len,
  643. rxrpc_notify_end_tx_t notify_end_tx)
  644. {
  645. bool dropped_lock = false;
  646. int ret;
  647. _enter("{%d},", call->debug_id);
  648. ASSERTCMP(msg->msg_name, ==, NULL);
  649. ASSERTCMP(msg->msg_control, ==, NULL);
  650. mutex_lock(&call->user_mutex);
  651. ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
  652. notify_end_tx, &dropped_lock);
  653. if (ret == -ESHUTDOWN)
  654. ret = call->error;
  655. if (!dropped_lock)
  656. mutex_unlock(&call->user_mutex);
  657. _leave(" = %d", ret);
  658. return ret;
  659. }
  660. EXPORT_SYMBOL(rxrpc_kernel_send_data);
  661. /**
  662. * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
  663. * @sock: The socket the call is on
  664. * @call: The call to be aborted
  665. * @abort_code: The abort code to stick into the ABORT packet
  666. * @error: Local error value
  667. * @why: Indication as to why.
  668. *
  669. * Allow a kernel service to abort a call, if it's still in an abortable state
  670. * and return true if the call was aborted, false if it was already complete.
  671. */
  672. bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
  673. u32 abort_code, int error, enum rxrpc_abort_reason why)
  674. {
  675. bool aborted;
  676. _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
  677. mutex_lock(&call->user_mutex);
  678. aborted = rxrpc_propose_abort(call, abort_code, error, why);
  679. mutex_unlock(&call->user_mutex);
  680. return aborted;
  681. }
  682. EXPORT_SYMBOL(rxrpc_kernel_abort_call);
  683. /**
  684. * rxrpc_kernel_set_tx_length - Set the total Tx length on a call
  685. * @sock: The socket the call is on
  686. * @call: The call to be informed
  687. * @tx_total_len: The amount of data to be transmitted for this call
  688. *
  689. * Allow a kernel service to set the total transmit length on a call. This
  690. * allows buffer-to-packet encrypt-and-copy to be performed.
  691. *
  692. * This function is primarily for use for setting the reply length since the
  693. * request length can be set when beginning the call.
  694. */
  695. void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
  696. s64 tx_total_len)
  697. {
  698. WARN_ON(call->tx_total_len != -1);
  699. call->tx_total_len = tx_total_len;
  700. }
  701. EXPORT_SYMBOL(rxrpc_kernel_set_tx_length);