call_event.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/circ_buf.h>
  10. #include <linux/net.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/slab.h>
  13. #include <linux/udp.h>
  14. #include <net/sock.h>
  15. #include <net/af_rxrpc.h>
  16. #include "ar-internal.h"
  17. /*
  18. * Propose a PING ACK be sent.
  19. */
  20. void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
  21. enum rxrpc_propose_ack_trace why)
  22. {
  23. ktime_t delay = ms_to_ktime(READ_ONCE(rxrpc_idle_ack_delay));
  24. ktime_t now = ktime_get_real();
  25. ktime_t ping_at = ktime_add(now, delay);
  26. trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
  27. if (ktime_before(ping_at, call->ping_at)) {
  28. call->ping_at = ping_at;
  29. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_ping);
  30. }
  31. }
  32. /*
  33. * Propose a DELAY ACK be sent in the future.
  34. */
  35. void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
  36. enum rxrpc_propose_ack_trace why)
  37. {
  38. ktime_t now = ktime_get_real(), delay;
  39. trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
  40. if (call->peer->srtt_us)
  41. delay = (call->peer->srtt_us >> 3) * NSEC_PER_USEC;
  42. else
  43. delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay));
  44. ktime_add_ms(delay, call->tx_backoff);
  45. call->delay_ack_at = ktime_add(now, delay);
  46. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_delayed_ack);
  47. }
  48. /*
  49. * Handle congestion being detected by the retransmit timeout.
  50. */
  51. static void rxrpc_congestion_timeout(struct rxrpc_call *call)
  52. {
  53. set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
  54. }
  55. /*
  56. * Perform retransmission of NAK'd and unack'd packets.
  57. */
  58. void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
  59. {
  60. struct rxrpc_ackpacket *ack = NULL;
  61. struct rxrpc_skb_priv *sp;
  62. struct rxrpc_txbuf *txb;
  63. rxrpc_seq_t transmitted = call->tx_transmitted;
  64. ktime_t next_resend = KTIME_MAX, rto = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
  65. ktime_t resend_at = KTIME_MAX, now, delay;
  66. bool unacked = false, did_send = false;
  67. unsigned int i;
  68. _enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
  69. now = ktime_get_real();
  70. if (list_empty(&call->tx_buffer))
  71. goto no_resend;
  72. trace_rxrpc_resend(call, ack_skb);
  73. txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
  74. /* Scan the soft ACK table without dropping the lock and resend any
  75. * explicitly NAK'd packets.
  76. */
  77. if (ack_skb) {
  78. sp = rxrpc_skb(ack_skb);
  79. ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
  80. for (i = 0; i < sp->ack.nr_acks; i++) {
  81. rxrpc_seq_t seq;
  82. if (ack->acks[i] & 1)
  83. continue;
  84. seq = sp->ack.first_ack + i;
  85. if (after(txb->seq, transmitted))
  86. break;
  87. if (after(txb->seq, seq))
  88. continue; /* A new hard ACK probably came in */
  89. list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
  90. if (txb->seq == seq)
  91. goto found_txb;
  92. }
  93. goto no_further_resend;
  94. found_txb:
  95. resend_at = ktime_add(txb->last_sent, rto);
  96. if (after(txb->serial, call->acks_highest_serial)) {
  97. if (ktime_after(resend_at, now) &&
  98. ktime_before(resend_at, next_resend))
  99. next_resend = resend_at;
  100. continue; /* Ack point not yet reached */
  101. }
  102. rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
  103. trace_rxrpc_retransmit(call, txb->seq, txb->serial,
  104. ktime_sub(resend_at, now));
  105. txb->flags |= RXRPC_TXBUF_RESENT;
  106. rxrpc_transmit_one(call, txb);
  107. did_send = true;
  108. now = ktime_get_real();
  109. if (list_is_last(&txb->call_link, &call->tx_buffer))
  110. goto no_further_resend;
  111. txb = list_next_entry(txb, call_link);
  112. }
  113. }
  114. /* Fast-forward through the Tx queue to the point the peer says it has
  115. * seen. Anything between the soft-ACK table and that point will get
  116. * ACK'd or NACK'd in due course, so don't worry about it here; here we
  117. * need to consider retransmitting anything beyond that point.
  118. */
  119. if (after_eq(call->acks_prev_seq, call->tx_transmitted))
  120. goto no_further_resend;
  121. list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
  122. resend_at = ktime_add(txb->last_sent, rto);
  123. if (before_eq(txb->seq, call->acks_prev_seq))
  124. continue;
  125. if (after(txb->seq, call->tx_transmitted))
  126. break; /* Not transmitted yet */
  127. if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
  128. before(txb->serial, ntohl(ack->serial)))
  129. goto do_resend; /* Wasn't accounted for by a more recent ping. */
  130. if (ktime_after(resend_at, now)) {
  131. if (ktime_before(resend_at, next_resend))
  132. next_resend = resend_at;
  133. continue;
  134. }
  135. do_resend:
  136. unacked = true;
  137. txb->flags |= RXRPC_TXBUF_RESENT;
  138. rxrpc_transmit_one(call, txb);
  139. did_send = true;
  140. rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
  141. now = ktime_get_real();
  142. }
  143. no_further_resend:
  144. no_resend:
  145. if (resend_at < KTIME_MAX) {
  146. delay = rxrpc_get_rto_backoff(call->peer, did_send);
  147. resend_at = ktime_add(resend_at, delay);
  148. trace_rxrpc_timer_set(call, resend_at - now, rxrpc_timer_trace_resend_reset);
  149. }
  150. call->resend_at = resend_at;
  151. if (unacked)
  152. rxrpc_congestion_timeout(call);
  153. /* If there was nothing that needed retransmission then it's likely
  154. * that an ACK got lost somewhere. Send a ping to find out instead of
  155. * retransmitting data.
  156. */
  157. if (!did_send) {
  158. ktime_t next_ping = ktime_add_us(call->acks_latest_ts,
  159. call->peer->srtt_us >> 3);
  160. if (ktime_sub(next_ping, now) <= 0)
  161. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  162. rxrpc_propose_ack_ping_for_0_retrans);
  163. }
  164. _leave("");
  165. }
  166. /*
  167. * Start transmitting the reply to a service. This cancels the need to ACK the
  168. * request if we haven't yet done so.
  169. */
  170. static void rxrpc_begin_service_reply(struct rxrpc_call *call)
  171. {
  172. rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
  173. if (call->ackr_reason == RXRPC_ACK_DELAY)
  174. call->ackr_reason = 0;
  175. call->delay_ack_at = KTIME_MAX;
  176. trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
  177. }
  178. /*
  179. * Close the transmission phase. After this point there is no more data to be
  180. * transmitted in the call.
  181. */
  182. static void rxrpc_close_tx_phase(struct rxrpc_call *call)
  183. {
  184. _debug("________awaiting reply/ACK__________");
  185. switch (__rxrpc_call_state(call)) {
  186. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  187. rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
  188. break;
  189. case RXRPC_CALL_SERVER_SEND_REPLY:
  190. rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK);
  191. break;
  192. default:
  193. break;
  194. }
  195. }
  196. static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
  197. {
  198. unsigned int winsize = min_t(unsigned int, call->tx_winsize,
  199. call->cong_cwnd + call->cong_extra);
  200. rxrpc_seq_t window = call->acks_hard_ack, wtop = window + winsize;
  201. rxrpc_seq_t tx_top = call->tx_top;
  202. int space;
  203. space = wtop - tx_top;
  204. return space > 0;
  205. }
  206. /*
  207. * Decant some if the sendmsg prepared queue into the transmission buffer.
  208. */
  209. static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
  210. {
  211. struct rxrpc_txbuf *txb;
  212. if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
  213. if (list_empty(&call->tx_sendmsg))
  214. return;
  215. rxrpc_expose_client_call(call);
  216. }
  217. while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
  218. struct rxrpc_txbuf, call_link))) {
  219. spin_lock(&call->tx_lock);
  220. list_del(&txb->call_link);
  221. spin_unlock(&call->tx_lock);
  222. call->tx_top = txb->seq;
  223. list_add_tail(&txb->call_link, &call->tx_buffer);
  224. if (txb->flags & RXRPC_LAST_PACKET)
  225. rxrpc_close_tx_phase(call);
  226. rxrpc_transmit_one(call, txb);
  227. if (!rxrpc_tx_window_has_space(call))
  228. break;
  229. }
  230. }
  231. static void rxrpc_transmit_some_data(struct rxrpc_call *call)
  232. {
  233. switch (__rxrpc_call_state(call)) {
  234. case RXRPC_CALL_SERVER_ACK_REQUEST:
  235. if (list_empty(&call->tx_sendmsg))
  236. return;
  237. rxrpc_begin_service_reply(call);
  238. fallthrough;
  239. case RXRPC_CALL_SERVER_SEND_REPLY:
  240. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  241. if (!rxrpc_tx_window_has_space(call))
  242. return;
  243. if (list_empty(&call->tx_sendmsg)) {
  244. rxrpc_inc_stat(call->rxnet, stat_tx_data_underflow);
  245. return;
  246. }
  247. rxrpc_decant_prepared_tx(call);
  248. break;
  249. default:
  250. return;
  251. }
  252. }
  253. /*
  254. * Ping the other end to fill our RTT cache and to retrieve the rwind
  255. * and MTU parameters.
  256. */
  257. static void rxrpc_send_initial_ping(struct rxrpc_call *call)
  258. {
  259. if (call->peer->rtt_count < 3 ||
  260. ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
  261. ktime_get_real()))
  262. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  263. rxrpc_propose_ack_ping_for_params);
  264. }
  265. /*
  266. * Handle retransmission and deferred ACK/abort generation.
  267. */
  268. bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
  269. {
  270. ktime_t now, t;
  271. bool resend = false;
  272. s32 abort_code;
  273. rxrpc_see_call(call, rxrpc_call_see_input);
  274. //printk("\n--------------------\n");
  275. _enter("{%d,%s,%lx}",
  276. call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
  277. call->events);
  278. if (__rxrpc_call_is_complete(call))
  279. goto out;
  280. /* Handle abort request locklessly, vs rxrpc_propose_abort(). */
  281. abort_code = smp_load_acquire(&call->send_abort);
  282. if (abort_code) {
  283. rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err,
  284. call->send_abort_why);
  285. goto out;
  286. }
  287. if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
  288. goto out;
  289. if (skb)
  290. rxrpc_input_call_packet(call, skb);
  291. /* If we see our async-event poke, check for timeout trippage. */
  292. now = ktime_get_real();
  293. t = ktime_sub(call->expect_rx_by, now);
  294. if (t <= 0) {
  295. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_expect_rx);
  296. goto expired;
  297. }
  298. t = ktime_sub(call->expect_req_by, now);
  299. if (t <= 0) {
  300. call->expect_req_by = KTIME_MAX;
  301. if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST) {
  302. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_idle);
  303. goto expired;
  304. }
  305. }
  306. t = ktime_sub(READ_ONCE(call->expect_term_by), now);
  307. if (t <= 0) {
  308. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_hard);
  309. goto expired;
  310. }
  311. t = ktime_sub(call->delay_ack_at, now);
  312. if (t <= 0) {
  313. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_delayed_ack);
  314. call->delay_ack_at = KTIME_MAX;
  315. rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
  316. rxrpc_propose_ack_delayed_ack);
  317. }
  318. t = ktime_sub(call->ack_lost_at, now);
  319. if (t <= 0) {
  320. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_lost_ack);
  321. call->ack_lost_at = KTIME_MAX;
  322. set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
  323. }
  324. t = ktime_sub(call->ping_at, now);
  325. if (t <= 0) {
  326. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_ping);
  327. call->ping_at = KTIME_MAX;
  328. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  329. rxrpc_propose_ack_ping_for_keepalive);
  330. }
  331. t = ktime_sub(call->resend_at, now);
  332. if (t <= 0) {
  333. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_resend);
  334. call->resend_at = KTIME_MAX;
  335. resend = true;
  336. }
  337. rxrpc_transmit_some_data(call);
  338. now = ktime_get_real();
  339. t = ktime_sub(call->keepalive_at, now);
  340. if (t <= 0) {
  341. trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_keepalive);
  342. call->keepalive_at = KTIME_MAX;
  343. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  344. rxrpc_propose_ack_ping_for_keepalive);
  345. }
  346. if (skb) {
  347. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  348. if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK)
  349. rxrpc_congestion_degrade(call);
  350. }
  351. if (test_and_clear_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events))
  352. rxrpc_send_initial_ping(call);
  353. /* Process events */
  354. if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
  355. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  356. rxrpc_propose_ack_ping_for_lost_ack);
  357. if (resend &&
  358. __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY &&
  359. !test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
  360. rxrpc_resend(call, NULL);
  361. if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
  362. rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
  363. rxrpc_propose_ack_rx_idle);
  364. if (call->ackr_nr_unacked > 2) {
  365. if (call->peer->rtt_count < 3)
  366. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  367. rxrpc_propose_ack_ping_for_rtt);
  368. else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
  369. ktime_get_real()))
  370. rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
  371. rxrpc_propose_ack_ping_for_old_rtt);
  372. else
  373. rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0,
  374. rxrpc_propose_ack_input_data);
  375. }
  376. /* Make sure the timer is restarted */
  377. if (!__rxrpc_call_is_complete(call)) {
  378. ktime_t next = READ_ONCE(call->expect_term_by), delay;
  379. #define set(T) { ktime_t _t = (T); if (ktime_before(_t, next)) next = _t; }
  380. set(call->expect_req_by);
  381. set(call->expect_rx_by);
  382. set(call->delay_ack_at);
  383. set(call->ack_lost_at);
  384. set(call->resend_at);
  385. set(call->keepalive_at);
  386. set(call->ping_at);
  387. now = ktime_get_real();
  388. delay = ktime_sub(next, now);
  389. if (delay <= 0) {
  390. rxrpc_poke_call(call, rxrpc_call_poke_timer_now);
  391. } else {
  392. unsigned long nowj = jiffies, delayj, nextj;
  393. delayj = max(nsecs_to_jiffies(delay), 1);
  394. nextj = nowj + delayj;
  395. if (time_before(nextj, call->timer.expires) ||
  396. !timer_pending(&call->timer)) {
  397. trace_rxrpc_timer_restart(call, delay, delayj);
  398. timer_reduce(&call->timer, nextj);
  399. }
  400. }
  401. }
  402. out:
  403. if (__rxrpc_call_is_complete(call)) {
  404. del_timer_sync(&call->timer);
  405. if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
  406. rxrpc_disconnect_call(call);
  407. if (call->security)
  408. call->security->free_call_crypto(call);
  409. }
  410. if (call->acks_hard_ack != call->tx_bottom)
  411. rxrpc_shrink_call_tx_buffer(call);
  412. _leave("");
  413. return true;
  414. expired:
  415. if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
  416. (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
  417. trace_rxrpc_call_reset(call);
  418. rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
  419. rxrpc_abort_call_reset);
  420. } else {
  421. rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
  422. rxrpc_abort_call_timeout);
  423. }
  424. goto out;
  425. }