output.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* RxRPC packet transmission
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/net.h>
  9. #include <linux/gfp.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/export.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include <net/udp.h>
  15. #include "ar-internal.h"
  16. extern int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
  17. static ssize_t do_udp_sendmsg(struct socket *socket, struct msghdr *msg, size_t len)
  18. {
  19. struct sockaddr *sa = msg->msg_name;
  20. struct sock *sk = socket->sk;
  21. if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6)) {
  22. if (sa->sa_family == AF_INET6) {
  23. if (sk->sk_family != AF_INET6) {
  24. pr_warn("AF_INET6 address on AF_INET socket\n");
  25. return -ENOPROTOOPT;
  26. }
  27. return udpv6_sendmsg(sk, msg, len);
  28. }
  29. }
  30. return udp_sendmsg(sk, msg, len);
  31. }
  32. struct rxrpc_abort_buffer {
  33. struct rxrpc_wire_header whdr;
  34. __be32 abort_code;
  35. };
  36. static const char rxrpc_keepalive_string[] = "";
  37. /*
  38. * Increase Tx backoff on transmission failure and clear it on success.
  39. */
  40. static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
  41. {
  42. if (ret < 0) {
  43. if (call->tx_backoff < 1000)
  44. call->tx_backoff += 100;
  45. } else {
  46. call->tx_backoff = 0;
  47. }
  48. }
  49. /*
  50. * Arrange for a keepalive ping a certain time after we last transmitted. This
  51. * lets the far side know we're still interested in this call and helps keep
  52. * the route through any intervening firewall open.
  53. *
  54. * Receiving a response to the ping will prevent the ->expect_rx_by timer from
  55. * expiring.
  56. */
  57. static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now)
  58. {
  59. ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6);
  60. call->keepalive_at = ktime_add(ktime_get_real(), delay);
  61. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive);
  62. }
  63. /*
  64. * Fill out an ACK packet.
  65. */
  66. static void rxrpc_fill_out_ack(struct rxrpc_call *call,
  67. struct rxrpc_txbuf *txb,
  68. u8 ack_reason,
  69. rxrpc_serial_t serial)
  70. {
  71. struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
  72. struct rxrpc_acktrailer *trailer = txb->kvec[2].iov_base + 3;
  73. struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
  74. unsigned int qsize, sack, wrap, to;
  75. rxrpc_seq_t window, wtop;
  76. int rsize;
  77. u32 mtu, jmax;
  78. u8 *filler = txb->kvec[2].iov_base;
  79. u8 *sackp = txb->kvec[1].iov_base;
  80. rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
  81. window = call->ackr_window;
  82. wtop = call->ackr_wtop;
  83. sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
  84. whdr->seq = 0;
  85. whdr->type = RXRPC_PACKET_TYPE_ACK;
  86. txb->flags |= RXRPC_SLOW_START_OK;
  87. ack->bufferSpace = 0;
  88. ack->maxSkew = 0;
  89. ack->firstPacket = htonl(window);
  90. ack->previousPacket = htonl(call->rx_highest_seq);
  91. ack->serial = htonl(serial);
  92. ack->reason = ack_reason;
  93. ack->nAcks = wtop - window;
  94. filler[0] = 0;
  95. filler[1] = 0;
  96. filler[2] = 0;
  97. if (ack_reason == RXRPC_ACK_PING)
  98. txb->flags |= RXRPC_REQUEST_ACK;
  99. if (after(wtop, window)) {
  100. txb->len += ack->nAcks;
  101. txb->kvec[1].iov_base = sackp;
  102. txb->kvec[1].iov_len = ack->nAcks;
  103. wrap = RXRPC_SACK_SIZE - sack;
  104. to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
  105. if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
  106. memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
  107. } else {
  108. memcpy(sackp, call->ackr_sack_table + sack, wrap);
  109. memcpy(sackp + wrap, call->ackr_sack_table, to - wrap);
  110. }
  111. } else if (before(wtop, window)) {
  112. pr_warn("ack window backward %x %x", window, wtop);
  113. } else if (ack->reason == RXRPC_ACK_DELAY) {
  114. ack->reason = RXRPC_ACK_IDLE;
  115. }
  116. mtu = call->peer->if_mtu;
  117. mtu -= call->peer->hdrsize;
  118. jmax = rxrpc_rx_jumbo_max;
  119. qsize = (window - 1) - call->rx_consumed;
  120. rsize = max_t(int, call->rx_winsize - qsize, 0);
  121. txb->ack_rwind = rsize;
  122. trailer->maxMTU = htonl(rxrpc_rx_mtu);
  123. trailer->ifMTU = htonl(mtu);
  124. trailer->rwind = htonl(rsize);
  125. trailer->jumbo_max = htonl(jmax);
  126. }
  127. /*
  128. * Record the beginning of an RTT probe.
  129. */
  130. static void rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
  131. ktime_t now, enum rxrpc_rtt_tx_trace why)
  132. {
  133. unsigned long avail = call->rtt_avail;
  134. int rtt_slot = 9;
  135. if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
  136. goto no_slot;
  137. rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
  138. if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
  139. goto no_slot;
  140. call->rtt_serial[rtt_slot] = serial;
  141. call->rtt_sent_at[rtt_slot] = now;
  142. smp_wmb(); /* Write data before avail bit */
  143. set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
  144. trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
  145. return;
  146. no_slot:
  147. trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
  148. }
  149. /*
  150. * Transmit an ACK packet.
  151. */
  152. static void rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
  153. {
  154. struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
  155. struct rxrpc_connection *conn;
  156. struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
  157. struct msghdr msg;
  158. ktime_t now;
  159. int ret;
  160. if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
  161. return;
  162. conn = call->conn;
  163. msg.msg_name = &call->peer->srx.transport;
  164. msg.msg_namelen = call->peer->srx.transport_len;
  165. msg.msg_control = NULL;
  166. msg.msg_controllen = 0;
  167. msg.msg_flags = MSG_SPLICE_PAGES;
  168. whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
  169. txb->serial = rxrpc_get_next_serial(conn);
  170. whdr->serial = htonl(txb->serial);
  171. trace_rxrpc_tx_ack(call->debug_id, txb->serial,
  172. ntohl(ack->firstPacket),
  173. ntohl(ack->serial), ack->reason, ack->nAcks,
  174. txb->ack_rwind);
  175. rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
  176. iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, txb->len);
  177. rxrpc_local_dont_fragment(conn->local, false);
  178. ret = do_udp_sendmsg(conn->local->socket, &msg, txb->len);
  179. call->peer->last_tx_at = ktime_get_seconds();
  180. if (ret < 0) {
  181. trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret,
  182. rxrpc_tx_point_call_ack);
  183. } else {
  184. trace_rxrpc_tx_packet(call->debug_id, whdr,
  185. rxrpc_tx_point_call_ack);
  186. now = ktime_get_real();
  187. if (ack->reason == RXRPC_ACK_PING)
  188. rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_ping);
  189. if (txb->flags & RXRPC_REQUEST_ACK)
  190. call->peer->rtt_last_req = now;
  191. rxrpc_set_keepalive(call, now);
  192. }
  193. rxrpc_tx_backoff(call, ret);
  194. }
  195. /*
  196. * Queue an ACK for immediate transmission.
  197. */
  198. void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
  199. rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
  200. {
  201. struct rxrpc_txbuf *txb;
  202. if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
  203. return;
  204. rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
  205. txb = rxrpc_alloc_ack_txbuf(call, call->ackr_wtop - call->ackr_window);
  206. if (!txb) {
  207. kleave(" = -ENOMEM");
  208. return;
  209. }
  210. txb->ack_why = why;
  211. rxrpc_fill_out_ack(call, txb, ack_reason, serial);
  212. call->ackr_nr_unacked = 0;
  213. atomic_set(&call->ackr_nr_consumed, 0);
  214. clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
  215. trace_rxrpc_send_ack(call, why, ack_reason, serial);
  216. rxrpc_send_ack_packet(call, txb);
  217. rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
  218. }
  219. /*
  220. * Send an ABORT call packet.
  221. */
  222. int rxrpc_send_abort_packet(struct rxrpc_call *call)
  223. {
  224. struct rxrpc_connection *conn;
  225. struct rxrpc_abort_buffer pkt;
  226. struct msghdr msg;
  227. struct kvec iov[1];
  228. rxrpc_serial_t serial;
  229. int ret;
  230. /* Don't bother sending aborts for a client call once the server has
  231. * hard-ACK'd all of its request data. After that point, we're not
  232. * going to stop the operation proceeding, and whilst we might limit
  233. * the reply, it's not worth it if we can send a new call on the same
  234. * channel instead, thereby closing off this call.
  235. */
  236. if (rxrpc_is_client_call(call) &&
  237. test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
  238. return 0;
  239. if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
  240. return -ECONNRESET;
  241. conn = call->conn;
  242. msg.msg_name = &call->peer->srx.transport;
  243. msg.msg_namelen = call->peer->srx.transport_len;
  244. msg.msg_control = NULL;
  245. msg.msg_controllen = 0;
  246. msg.msg_flags = 0;
  247. pkt.whdr.epoch = htonl(conn->proto.epoch);
  248. pkt.whdr.cid = htonl(call->cid);
  249. pkt.whdr.callNumber = htonl(call->call_id);
  250. pkt.whdr.seq = 0;
  251. pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT;
  252. pkt.whdr.flags = conn->out_clientflag;
  253. pkt.whdr.userStatus = 0;
  254. pkt.whdr.securityIndex = call->security_ix;
  255. pkt.whdr._rsvd = 0;
  256. pkt.whdr.serviceId = htons(call->dest_srx.srx_service);
  257. pkt.abort_code = htonl(call->abort_code);
  258. iov[0].iov_base = &pkt;
  259. iov[0].iov_len = sizeof(pkt);
  260. serial = rxrpc_get_next_serial(conn);
  261. pkt.whdr.serial = htonl(serial);
  262. iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
  263. ret = do_udp_sendmsg(conn->local->socket, &msg, sizeof(pkt));
  264. conn->peer->last_tx_at = ktime_get_seconds();
  265. if (ret < 0)
  266. trace_rxrpc_tx_fail(call->debug_id, serial, ret,
  267. rxrpc_tx_point_call_abort);
  268. else
  269. trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
  270. rxrpc_tx_point_call_abort);
  271. rxrpc_tx_backoff(call, ret);
  272. return ret;
  273. }
  274. /*
  275. * Prepare a (sub)packet for transmission.
  276. */
  277. static void rxrpc_prepare_data_subpacket(struct rxrpc_call *call, struct rxrpc_txbuf *txb,
  278. rxrpc_serial_t serial)
  279. {
  280. struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
  281. enum rxrpc_req_ack_trace why;
  282. struct rxrpc_connection *conn = call->conn;
  283. _enter("%x,{%d}", txb->seq, txb->len);
  284. txb->serial = serial;
  285. if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
  286. txb->seq == 1)
  287. whdr->userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
  288. /* If our RTT cache needs working on, request an ACK. Also request
  289. * ACKs if a DATA packet appears to have been lost.
  290. *
  291. * However, we mustn't request an ACK on the last reply packet of a
  292. * service call, lest OpenAFS incorrectly send us an ACK with some
  293. * soft-ACKs in it and then never follow up with a proper hard ACK.
  294. */
  295. if (txb->flags & RXRPC_REQUEST_ACK)
  296. why = rxrpc_reqack_already_on;
  297. else if ((txb->flags & RXRPC_LAST_PACKET) && rxrpc_sending_to_client(txb))
  298. why = rxrpc_reqack_no_srv_last;
  299. else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
  300. why = rxrpc_reqack_ack_lost;
  301. else if (txb->flags & RXRPC_TXBUF_RESENT)
  302. why = rxrpc_reqack_retrans;
  303. else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
  304. why = rxrpc_reqack_slow_start;
  305. else if (call->tx_winsize <= 2)
  306. why = rxrpc_reqack_small_txwin;
  307. else if (call->peer->rtt_count < 3 && txb->seq & 1)
  308. why = rxrpc_reqack_more_rtt;
  309. else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real()))
  310. why = rxrpc_reqack_old_rtt;
  311. else
  312. goto dont_set_request_ack;
  313. rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
  314. trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
  315. if (why != rxrpc_reqack_no_srv_last)
  316. txb->flags |= RXRPC_REQUEST_ACK;
  317. dont_set_request_ack:
  318. whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
  319. whdr->serial = htonl(txb->serial);
  320. whdr->cksum = txb->cksum;
  321. trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags, false);
  322. }
  323. /*
  324. * Prepare a packet for transmission.
  325. */
  326. static size_t rxrpc_prepare_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
  327. {
  328. rxrpc_serial_t serial;
  329. /* Each transmission of a Tx packet needs a new serial number */
  330. serial = rxrpc_get_next_serial(call->conn);
  331. rxrpc_prepare_data_subpacket(call, txb, serial);
  332. return txb->len;
  333. }
  334. /*
  335. * Set timeouts after transmitting a packet.
  336. */
  337. static void rxrpc_tstamp_data_packets(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
  338. {
  339. ktime_t now = ktime_get_real();
  340. bool ack_requested = txb->flags & RXRPC_REQUEST_ACK;
  341. call->tx_last_sent = now;
  342. txb->last_sent = now;
  343. if (ack_requested) {
  344. rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_data);
  345. call->peer->rtt_last_req = now;
  346. if (call->peer->rtt_count > 1) {
  347. ktime_t delay = rxrpc_get_rto_backoff(call->peer, false);
  348. call->ack_lost_at = ktime_add(now, delay);
  349. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_lost_ack);
  350. }
  351. }
  352. if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) {
  353. ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo));
  354. call->expect_rx_by = ktime_add(now, delay);
  355. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
  356. }
  357. rxrpc_set_keepalive(call, now);
  358. }
  359. /*
  360. * send a packet through the transport endpoint
  361. */
  362. static int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
  363. {
  364. struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
  365. struct rxrpc_connection *conn = call->conn;
  366. enum rxrpc_tx_point frag;
  367. struct msghdr msg;
  368. size_t len;
  369. int ret;
  370. _enter("%x,{%d}", txb->seq, txb->len);
  371. len = rxrpc_prepare_data_packet(call, txb);
  372. if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
  373. static int lose;
  374. if ((lose++ & 7) == 7) {
  375. ret = 0;
  376. trace_rxrpc_tx_data(call, txb->seq, txb->serial,
  377. txb->flags, true);
  378. goto done;
  379. }
  380. }
  381. iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, len);
  382. msg.msg_name = &call->peer->srx.transport;
  383. msg.msg_namelen = call->peer->srx.transport_len;
  384. msg.msg_control = NULL;
  385. msg.msg_controllen = 0;
  386. msg.msg_flags = MSG_SPLICE_PAGES;
  387. /* Track what we've attempted to transmit at least once so that the
  388. * retransmission algorithm doesn't try to resend what we haven't sent
  389. * yet.
  390. */
  391. if (txb->seq == call->tx_transmitted + 1)
  392. call->tx_transmitted = txb->seq;
  393. /* send the packet with the don't fragment bit set if we currently
  394. * think it's small enough */
  395. if (txb->len >= call->peer->maxdata) {
  396. rxrpc_local_dont_fragment(conn->local, false);
  397. frag = rxrpc_tx_point_call_data_frag;
  398. } else {
  399. rxrpc_local_dont_fragment(conn->local, true);
  400. frag = rxrpc_tx_point_call_data_nofrag;
  401. }
  402. retry:
  403. /* send the packet by UDP
  404. * - returns -EMSGSIZE if UDP would have to fragment the packet
  405. * to go out of the interface
  406. * - in which case, we'll have processed the ICMP error
  407. * message and update the peer record
  408. */
  409. rxrpc_inc_stat(call->rxnet, stat_tx_data_send);
  410. ret = do_udp_sendmsg(conn->local->socket, &msg, len);
  411. conn->peer->last_tx_at = ktime_get_seconds();
  412. if (ret < 0) {
  413. rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
  414. trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret, frag);
  415. } else {
  416. trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
  417. }
  418. rxrpc_tx_backoff(call, ret);
  419. if (ret == -EMSGSIZE && frag == rxrpc_tx_point_call_data_frag) {
  420. rxrpc_local_dont_fragment(conn->local, false);
  421. frag = rxrpc_tx_point_call_data_frag;
  422. goto retry;
  423. }
  424. done:
  425. if (ret >= 0) {
  426. rxrpc_tstamp_data_packets(call, txb);
  427. } else {
  428. /* Cancel the call if the initial transmission fails,
  429. * particularly if that's due to network routing issues that
  430. * aren't going away anytime soon. The layer above can arrange
  431. * the retransmission.
  432. */
  433. if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
  434. rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
  435. RX_USER_ABORT, ret);
  436. }
  437. _leave(" = %d [%u]", ret, call->peer->maxdata);
  438. return ret;
  439. }
  440. /*
  441. * Transmit a connection-level abort.
  442. */
  443. void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
  444. {
  445. struct rxrpc_wire_header whdr;
  446. struct msghdr msg;
  447. struct kvec iov[2];
  448. __be32 word;
  449. size_t len;
  450. u32 serial;
  451. int ret;
  452. msg.msg_name = &conn->peer->srx.transport;
  453. msg.msg_namelen = conn->peer->srx.transport_len;
  454. msg.msg_control = NULL;
  455. msg.msg_controllen = 0;
  456. msg.msg_flags = 0;
  457. whdr.epoch = htonl(conn->proto.epoch);
  458. whdr.cid = htonl(conn->proto.cid);
  459. whdr.callNumber = 0;
  460. whdr.seq = 0;
  461. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  462. whdr.flags = conn->out_clientflag;
  463. whdr.userStatus = 0;
  464. whdr.securityIndex = conn->security_ix;
  465. whdr._rsvd = 0;
  466. whdr.serviceId = htons(conn->service_id);
  467. word = htonl(conn->abort_code);
  468. iov[0].iov_base = &whdr;
  469. iov[0].iov_len = sizeof(whdr);
  470. iov[1].iov_base = &word;
  471. iov[1].iov_len = sizeof(word);
  472. len = iov[0].iov_len + iov[1].iov_len;
  473. serial = rxrpc_get_next_serial(conn);
  474. whdr.serial = htonl(serial);
  475. iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
  476. ret = do_udp_sendmsg(conn->local->socket, &msg, len);
  477. if (ret < 0) {
  478. trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
  479. rxrpc_tx_point_conn_abort);
  480. _debug("sendmsg failed: %d", ret);
  481. return;
  482. }
  483. trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
  484. conn->peer->last_tx_at = ktime_get_seconds();
  485. }
  486. /*
  487. * Reject a packet through the local endpoint.
  488. */
  489. void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
  490. {
  491. struct rxrpc_wire_header whdr;
  492. struct sockaddr_rxrpc srx;
  493. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  494. struct msghdr msg;
  495. struct kvec iov[2];
  496. size_t size;
  497. __be32 code;
  498. int ret, ioc;
  499. if (sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
  500. return; /* Never abort an abort. */
  501. rxrpc_see_skb(skb, rxrpc_skb_see_reject);
  502. iov[0].iov_base = &whdr;
  503. iov[0].iov_len = sizeof(whdr);
  504. iov[1].iov_base = &code;
  505. iov[1].iov_len = sizeof(code);
  506. msg.msg_name = &srx.transport;
  507. msg.msg_control = NULL;
  508. msg.msg_controllen = 0;
  509. msg.msg_flags = 0;
  510. memset(&whdr, 0, sizeof(whdr));
  511. switch (skb->mark) {
  512. case RXRPC_SKB_MARK_REJECT_BUSY:
  513. whdr.type = RXRPC_PACKET_TYPE_BUSY;
  514. size = sizeof(whdr);
  515. ioc = 1;
  516. break;
  517. case RXRPC_SKB_MARK_REJECT_ABORT:
  518. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  519. code = htonl(skb->priority);
  520. size = sizeof(whdr) + sizeof(code);
  521. ioc = 2;
  522. break;
  523. default:
  524. return;
  525. }
  526. if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
  527. msg.msg_namelen = srx.transport_len;
  528. whdr.epoch = htonl(sp->hdr.epoch);
  529. whdr.cid = htonl(sp->hdr.cid);
  530. whdr.callNumber = htonl(sp->hdr.callNumber);
  531. whdr.serviceId = htons(sp->hdr.serviceId);
  532. whdr.flags = sp->hdr.flags;
  533. whdr.flags ^= RXRPC_CLIENT_INITIATED;
  534. whdr.flags &= RXRPC_CLIENT_INITIATED;
  535. iov_iter_kvec(&msg.msg_iter, WRITE, iov, ioc, size);
  536. ret = do_udp_sendmsg(local->socket, &msg, size);
  537. if (ret < 0)
  538. trace_rxrpc_tx_fail(local->debug_id, 0, ret,
  539. rxrpc_tx_point_reject);
  540. else
  541. trace_rxrpc_tx_packet(local->debug_id, &whdr,
  542. rxrpc_tx_point_reject);
  543. }
  544. }
  545. /*
  546. * Send a VERSION reply to a peer as a keepalive.
  547. */
  548. void rxrpc_send_keepalive(struct rxrpc_peer *peer)
  549. {
  550. struct rxrpc_wire_header whdr;
  551. struct msghdr msg;
  552. struct kvec iov[2];
  553. size_t len;
  554. int ret;
  555. _enter("");
  556. msg.msg_name = &peer->srx.transport;
  557. msg.msg_namelen = peer->srx.transport_len;
  558. msg.msg_control = NULL;
  559. msg.msg_controllen = 0;
  560. msg.msg_flags = 0;
  561. whdr.epoch = htonl(peer->local->rxnet->epoch);
  562. whdr.cid = 0;
  563. whdr.callNumber = 0;
  564. whdr.seq = 0;
  565. whdr.serial = 0;
  566. whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
  567. whdr.flags = RXRPC_LAST_PACKET;
  568. whdr.userStatus = 0;
  569. whdr.securityIndex = 0;
  570. whdr._rsvd = 0;
  571. whdr.serviceId = 0;
  572. iov[0].iov_base = &whdr;
  573. iov[0].iov_len = sizeof(whdr);
  574. iov[1].iov_base = (char *)rxrpc_keepalive_string;
  575. iov[1].iov_len = sizeof(rxrpc_keepalive_string);
  576. len = iov[0].iov_len + iov[1].iov_len;
  577. iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
  578. ret = do_udp_sendmsg(peer->local->socket, &msg, len);
  579. if (ret < 0)
  580. trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
  581. rxrpc_tx_point_version_keepalive);
  582. else
  583. trace_rxrpc_tx_packet(peer->debug_id, &whdr,
  584. rxrpc_tx_point_version_keepalive);
  585. peer->last_tx_at = ktime_get_seconds();
  586. _leave("");
  587. }
  588. /*
  589. * Schedule an instant Tx resend.
  590. */
  591. static inline void rxrpc_instant_resend(struct rxrpc_call *call,
  592. struct rxrpc_txbuf *txb)
  593. {
  594. if (!__rxrpc_call_is_complete(call))
  595. kdebug("resend");
  596. }
  597. /*
  598. * Transmit one packet.
  599. */
  600. void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
  601. {
  602. int ret;
  603. ret = rxrpc_send_data_packet(call, txb);
  604. if (ret < 0) {
  605. switch (ret) {
  606. case -ENETUNREACH:
  607. case -EHOSTUNREACH:
  608. case -ECONNREFUSED:
  609. rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
  610. 0, ret);
  611. break;
  612. default:
  613. _debug("need instant resend %d", ret);
  614. rxrpc_instant_resend(call, txb);
  615. }
  616. } else {
  617. ktime_t delay = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
  618. call->resend_at = ktime_add(ktime_get_real(), delay);
  619. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_resend_tx);
  620. }
  621. }