conn_event.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* connection-level event handling
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/net.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/errqueue.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include <net/ip.h>
  15. #include "ar-internal.h"
  16. /*
  17. * Set the completion state on an aborted connection.
  18. */
  19. static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb,
  20. s32 abort_code, int err,
  21. enum rxrpc_call_completion compl)
  22. {
  23. bool aborted = false;
  24. if (conn->state != RXRPC_CONN_ABORTED) {
  25. spin_lock(&conn->state_lock);
  26. if (conn->state != RXRPC_CONN_ABORTED) {
  27. conn->abort_code = abort_code;
  28. conn->error = err;
  29. conn->completion = compl;
  30. /* Order the abort info before the state change. */
  31. smp_store_release(&conn->state, RXRPC_CONN_ABORTED);
  32. set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
  33. set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
  34. aborted = true;
  35. }
  36. spin_unlock(&conn->state_lock);
  37. }
  38. return aborted;
  39. }
  40. /*
  41. * Mark a socket buffer to indicate that the connection it's on should be aborted.
  42. */
  43. int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
  44. s32 abort_code, int err, enum rxrpc_abort_reason why)
  45. {
  46. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  47. if (rxrpc_set_conn_aborted(conn, skb, abort_code, err,
  48. RXRPC_CALL_LOCALLY_ABORTED)) {
  49. trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber,
  50. sp->hdr.seq, abort_code, err);
  51. rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort);
  52. }
  53. return -EPROTO;
  54. }
  55. /*
  56. * Mark a connection as being remotely aborted.
  57. */
  58. static void rxrpc_input_conn_abort(struct rxrpc_connection *conn,
  59. struct sk_buff *skb)
  60. {
  61. trace_rxrpc_rx_conn_abort(conn, skb);
  62. rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
  63. RXRPC_CALL_REMOTELY_ABORTED);
  64. }
  65. /*
  66. * Retransmit terminal ACK or ABORT of the previous call.
  67. */
  68. void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  69. struct sk_buff *skb,
  70. unsigned int channel)
  71. {
  72. struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
  73. struct rxrpc_channel *chan;
  74. struct msghdr msg;
  75. struct kvec iov[3];
  76. struct {
  77. struct rxrpc_wire_header whdr;
  78. union {
  79. __be32 abort_code;
  80. struct rxrpc_ackpacket ack;
  81. };
  82. } __attribute__((packed)) pkt;
  83. struct rxrpc_acktrailer trailer;
  84. size_t len;
  85. int ret, ioc;
  86. u32 serial, mtu, call_id, padding;
  87. _enter("%d", conn->debug_id);
  88. if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
  89. if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
  90. &pkt.ack, sizeof(pkt.ack)) < 0)
  91. return;
  92. if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
  93. return;
  94. }
  95. chan = &conn->channels[channel];
  96. /* If the last call got moved on whilst we were waiting to run, just
  97. * ignore this packet.
  98. */
  99. call_id = chan->last_call;
  100. if (skb && call_id != sp->hdr.callNumber)
  101. return;
  102. msg.msg_name = &conn->peer->srx.transport;
  103. msg.msg_namelen = conn->peer->srx.transport_len;
  104. msg.msg_control = NULL;
  105. msg.msg_controllen = 0;
  106. msg.msg_flags = 0;
  107. iov[0].iov_base = &pkt;
  108. iov[0].iov_len = sizeof(pkt.whdr);
  109. iov[1].iov_base = &padding;
  110. iov[1].iov_len = 3;
  111. iov[2].iov_base = &trailer;
  112. iov[2].iov_len = sizeof(trailer);
  113. serial = rxrpc_get_next_serial(conn);
  114. pkt.whdr.epoch = htonl(conn->proto.epoch);
  115. pkt.whdr.cid = htonl(conn->proto.cid | channel);
  116. pkt.whdr.callNumber = htonl(call_id);
  117. pkt.whdr.serial = htonl(serial);
  118. pkt.whdr.seq = 0;
  119. pkt.whdr.type = chan->last_type;
  120. pkt.whdr.flags = conn->out_clientflag;
  121. pkt.whdr.userStatus = 0;
  122. pkt.whdr.securityIndex = conn->security_ix;
  123. pkt.whdr._rsvd = 0;
  124. pkt.whdr.serviceId = htons(conn->service_id);
  125. len = sizeof(pkt.whdr);
  126. switch (chan->last_type) {
  127. case RXRPC_PACKET_TYPE_ABORT:
  128. pkt.abort_code = htonl(chan->last_abort);
  129. iov[0].iov_len += sizeof(pkt.abort_code);
  130. len += sizeof(pkt.abort_code);
  131. ioc = 1;
  132. break;
  133. case RXRPC_PACKET_TYPE_ACK:
  134. mtu = conn->peer->if_mtu;
  135. mtu -= conn->peer->hdrsize;
  136. pkt.ack.bufferSpace = 0;
  137. pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
  138. pkt.ack.firstPacket = htonl(chan->last_seq + 1);
  139. pkt.ack.previousPacket = htonl(chan->last_seq);
  140. pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
  141. pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
  142. pkt.ack.nAcks = 0;
  143. trailer.maxMTU = htonl(rxrpc_rx_mtu);
  144. trailer.ifMTU = htonl(mtu);
  145. trailer.rwind = htonl(rxrpc_rx_window_size);
  146. trailer.jumbo_max = htonl(rxrpc_rx_jumbo_max);
  147. pkt.whdr.flags |= RXRPC_SLOW_START_OK;
  148. padding = 0;
  149. iov[0].iov_len += sizeof(pkt.ack);
  150. len += sizeof(pkt.ack) + 3 + sizeof(trailer);
  151. ioc = 3;
  152. trace_rxrpc_tx_ack(chan->call_debug_id, serial,
  153. ntohl(pkt.ack.firstPacket),
  154. ntohl(pkt.ack.serial),
  155. pkt.ack.reason, 0, rxrpc_rx_window_size);
  156. break;
  157. default:
  158. return;
  159. }
  160. ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
  161. conn->peer->last_tx_at = ktime_get_seconds();
  162. if (ret < 0)
  163. trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
  164. rxrpc_tx_point_call_final_resend);
  165. else
  166. trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr,
  167. rxrpc_tx_point_call_final_resend);
  168. _leave("");
  169. }
  170. /*
  171. * pass a connection-level abort onto all calls on that connection
  172. */
  173. static void rxrpc_abort_calls(struct rxrpc_connection *conn)
  174. {
  175. struct rxrpc_call *call;
  176. int i;
  177. _enter("{%d},%x", conn->debug_id, conn->abort_code);
  178. for (i = 0; i < RXRPC_MAXCALLS; i++) {
  179. call = conn->channels[i].call;
  180. if (call) {
  181. rxrpc_see_call(call, rxrpc_call_see_conn_abort);
  182. rxrpc_set_call_completion(call,
  183. conn->completion,
  184. conn->abort_code,
  185. conn->error);
  186. rxrpc_poke_call(call, rxrpc_call_poke_conn_abort);
  187. }
  188. }
  189. _leave("");
  190. }
  191. /*
  192. * mark a call as being on a now-secured channel
  193. * - must be called with BH's disabled.
  194. */
  195. static void rxrpc_call_is_secure(struct rxrpc_call *call)
  196. {
  197. if (call && __test_and_clear_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags))
  198. rxrpc_notify_socket(call);
  199. }
  200. /*
  201. * connection-level Rx packet processor
  202. */
  203. static int rxrpc_process_event(struct rxrpc_connection *conn,
  204. struct sk_buff *skb)
  205. {
  206. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  207. int ret;
  208. if (conn->state == RXRPC_CONN_ABORTED)
  209. return -ECONNABORTED;
  210. _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
  211. switch (sp->hdr.type) {
  212. case RXRPC_PACKET_TYPE_CHALLENGE:
  213. return conn->security->respond_to_challenge(conn, skb);
  214. case RXRPC_PACKET_TYPE_RESPONSE:
  215. ret = conn->security->verify_response(conn, skb);
  216. if (ret < 0)
  217. return ret;
  218. ret = conn->security->init_connection_security(
  219. conn, conn->key->payload.data[0]);
  220. if (ret < 0)
  221. return ret;
  222. spin_lock(&conn->state_lock);
  223. if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
  224. conn->state = RXRPC_CONN_SERVICE;
  225. spin_unlock(&conn->state_lock);
  226. if (conn->state == RXRPC_CONN_SERVICE) {
  227. /* Offload call state flipping to the I/O thread. As
  228. * we've already received the packet, put it on the
  229. * front of the queue.
  230. */
  231. sp->conn = rxrpc_get_connection(conn, rxrpc_conn_get_poke_secured);
  232. skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
  233. rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
  234. skb_queue_head(&conn->local->rx_queue, skb);
  235. rxrpc_wake_up_io_thread(conn->local);
  236. }
  237. return 0;
  238. default:
  239. WARN_ON_ONCE(1);
  240. return -EPROTO;
  241. }
  242. }
  243. /*
  244. * set up security and issue a challenge
  245. */
  246. static void rxrpc_secure_connection(struct rxrpc_connection *conn)
  247. {
  248. if (conn->security->issue_challenge(conn) < 0)
  249. rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM,
  250. rxrpc_abort_nomem);
  251. }
  252. /*
  253. * Process delayed final ACKs that we haven't subsumed into a subsequent call.
  254. */
  255. void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force)
  256. {
  257. unsigned long j = jiffies, next_j;
  258. unsigned int channel;
  259. bool set;
  260. again:
  261. next_j = j + LONG_MAX;
  262. set = false;
  263. for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
  264. struct rxrpc_channel *chan = &conn->channels[channel];
  265. unsigned long ack_at;
  266. if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
  267. continue;
  268. ack_at = chan->final_ack_at;
  269. if (time_before(j, ack_at) && !force) {
  270. if (time_before(ack_at, next_j)) {
  271. next_j = ack_at;
  272. set = true;
  273. }
  274. continue;
  275. }
  276. if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
  277. &conn->flags))
  278. rxrpc_conn_retransmit_call(conn, NULL, channel);
  279. }
  280. j = jiffies;
  281. if (time_before_eq(next_j, j))
  282. goto again;
  283. if (set)
  284. rxrpc_reduce_conn_timer(conn, next_j);
  285. }
  286. /*
  287. * connection-level event processor
  288. */
  289. static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
  290. {
  291. struct sk_buff *skb;
  292. int ret;
  293. if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
  294. rxrpc_secure_connection(conn);
  295. /* go through the conn-level event packets, releasing the ref on this
  296. * connection that each one has when we've finished with it */
  297. while ((skb = skb_dequeue(&conn->rx_queue))) {
  298. rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
  299. ret = rxrpc_process_event(conn, skb);
  300. switch (ret) {
  301. case -ENOMEM:
  302. case -EAGAIN:
  303. skb_queue_head(&conn->rx_queue, skb);
  304. rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
  305. break;
  306. default:
  307. rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
  308. break;
  309. }
  310. }
  311. }
  312. void rxrpc_process_connection(struct work_struct *work)
  313. {
  314. struct rxrpc_connection *conn =
  315. container_of(work, struct rxrpc_connection, processor);
  316. rxrpc_see_connection(conn, rxrpc_conn_see_work);
  317. if (__rxrpc_use_local(conn->local, rxrpc_local_use_conn_work)) {
  318. rxrpc_do_process_connection(conn);
  319. rxrpc_unuse_local(conn->local, rxrpc_local_unuse_conn_work);
  320. }
  321. }
  322. /*
  323. * post connection-level events to the connection
  324. * - this includes challenges, responses, some aborts and call terminal packet
  325. * retransmission.
  326. */
  327. static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
  328. struct sk_buff *skb)
  329. {
  330. _enter("%p,%p", conn, skb);
  331. rxrpc_get_skb(skb, rxrpc_skb_get_conn_work);
  332. skb_queue_tail(&conn->rx_queue, skb);
  333. rxrpc_queue_conn(conn, rxrpc_conn_queue_rx_work);
  334. }
  335. /*
  336. * Input a connection-level packet.
  337. */
  338. bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
  339. {
  340. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  341. switch (sp->hdr.type) {
  342. case RXRPC_PACKET_TYPE_BUSY:
  343. /* Just ignore BUSY packets for now. */
  344. return true;
  345. case RXRPC_PACKET_TYPE_ABORT:
  346. if (rxrpc_is_conn_aborted(conn))
  347. return true;
  348. rxrpc_input_conn_abort(conn, skb);
  349. rxrpc_abort_calls(conn);
  350. return true;
  351. case RXRPC_PACKET_TYPE_CHALLENGE:
  352. case RXRPC_PACKET_TYPE_RESPONSE:
  353. if (rxrpc_is_conn_aborted(conn)) {
  354. if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED)
  355. rxrpc_send_conn_abort(conn);
  356. return true;
  357. }
  358. rxrpc_post_packet_to_conn(conn, skb);
  359. return true;
  360. default:
  361. WARN_ON_ONCE(1);
  362. return true;
  363. }
  364. }
  365. /*
  366. * Input a connection event.
  367. */
  368. void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
  369. {
  370. unsigned int loop;
  371. if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
  372. rxrpc_abort_calls(conn);
  373. if (skb) {
  374. switch (skb->mark) {
  375. case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
  376. if (conn->state != RXRPC_CONN_SERVICE)
  377. break;
  378. for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
  379. rxrpc_call_is_secure(conn->channels[loop].call);
  380. break;
  381. }
  382. }
  383. /* Process delayed ACKs whose time has come. */
  384. if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
  385. rxrpc_process_delayed_final_acks(conn, false);
  386. }