conn_object.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* RxRPC virtual connection handler, common bits.
  3. *
  4. * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/net.h>
  11. #include <linux/skbuff.h>
  12. #include "ar-internal.h"
  13. /*
  14. * Time till a connection expires after last use (in seconds).
  15. */
  16. unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
  17. unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
  18. static void rxrpc_clean_up_connection(struct work_struct *work);
  19. static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
  20. unsigned long reap_at);
  21. void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
  22. {
  23. struct rxrpc_local *local = conn->local;
  24. bool busy;
  25. if (WARN_ON_ONCE(!local))
  26. return;
  27. spin_lock_bh(&local->lock);
  28. busy = !list_empty(&conn->attend_link);
  29. if (!busy) {
  30. rxrpc_get_connection(conn, why);
  31. list_add_tail(&conn->attend_link, &local->conn_attend_q);
  32. }
  33. spin_unlock_bh(&local->lock);
  34. rxrpc_wake_up_io_thread(local);
  35. }
  36. static void rxrpc_connection_timer(struct timer_list *timer)
  37. {
  38. struct rxrpc_connection *conn =
  39. container_of(timer, struct rxrpc_connection, timer);
  40. rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
  41. }
  42. /*
  43. * allocate a new connection
  44. */
  45. struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
  46. gfp_t gfp)
  47. {
  48. struct rxrpc_connection *conn;
  49. _enter("");
  50. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  51. if (conn) {
  52. INIT_LIST_HEAD(&conn->cache_link);
  53. timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
  54. INIT_WORK(&conn->processor, rxrpc_process_connection);
  55. INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
  56. INIT_LIST_HEAD(&conn->proc_link);
  57. INIT_LIST_HEAD(&conn->link);
  58. INIT_LIST_HEAD(&conn->attend_link);
  59. mutex_init(&conn->security_lock);
  60. mutex_init(&conn->tx_data_alloc_lock);
  61. skb_queue_head_init(&conn->rx_queue);
  62. conn->rxnet = rxnet;
  63. conn->security = &rxrpc_no_security;
  64. spin_lock_init(&conn->state_lock);
  65. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  66. conn->idle_timestamp = jiffies;
  67. }
  68. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  69. return conn;
  70. }
  71. /*
  72. * Look up a connection in the cache by protocol parameters.
  73. *
  74. * If successful, a pointer to the connection is returned, but no ref is taken.
  75. * NULL is returned if there is no match.
  76. *
  77. * When searching for a service call, if we find a peer but no connection, we
  78. * return that through *_peer in case we need to create a new service call.
  79. *
  80. * The caller must be holding the RCU read lock.
  81. */
  82. struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
  83. struct sockaddr_rxrpc *srx,
  84. struct sk_buff *skb)
  85. {
  86. struct rxrpc_connection *conn;
  87. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  88. struct rxrpc_peer *peer;
  89. _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  90. /* Look up client connections by connection ID alone as their
  91. * IDs are unique for this machine.
  92. */
  93. conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
  94. if (!conn || refcount_read(&conn->ref) == 0) {
  95. _debug("no conn");
  96. goto not_found;
  97. }
  98. if (conn->proto.epoch != sp->hdr.epoch ||
  99. conn->local != local)
  100. goto not_found;
  101. peer = conn->peer;
  102. switch (srx->transport.family) {
  103. case AF_INET:
  104. if (peer->srx.transport.sin.sin_port !=
  105. srx->transport.sin.sin_port)
  106. goto not_found;
  107. break;
  108. #ifdef CONFIG_AF_RXRPC_IPV6
  109. case AF_INET6:
  110. if (peer->srx.transport.sin6.sin6_port !=
  111. srx->transport.sin6.sin6_port)
  112. goto not_found;
  113. break;
  114. #endif
  115. default:
  116. BUG();
  117. }
  118. _leave(" = %p", conn);
  119. return conn;
  120. not_found:
  121. _leave(" = NULL");
  122. return NULL;
  123. }
  124. /*
  125. * Disconnect a call and clear any channel it occupies when that call
  126. * terminates. The caller must hold the channel_lock and must release the
  127. * call's ref on the connection.
  128. */
  129. void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
  130. struct rxrpc_call *call)
  131. {
  132. struct rxrpc_channel *chan =
  133. &conn->channels[call->cid & RXRPC_CHANNELMASK];
  134. _enter("%d,%x", conn->debug_id, call->cid);
  135. if (chan->call == call) {
  136. /* Save the result of the call so that we can repeat it if necessary
  137. * through the channel, whilst disposing of the actual call record.
  138. */
  139. trace_rxrpc_disconnect_call(call);
  140. switch (call->completion) {
  141. case RXRPC_CALL_SUCCEEDED:
  142. chan->last_seq = call->rx_highest_seq;
  143. chan->last_type = RXRPC_PACKET_TYPE_ACK;
  144. break;
  145. case RXRPC_CALL_LOCALLY_ABORTED:
  146. chan->last_abort = call->abort_code;
  147. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  148. break;
  149. default:
  150. chan->last_abort = RX_CALL_DEAD;
  151. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  152. break;
  153. }
  154. chan->last_call = chan->call_id;
  155. chan->call_id = chan->call_counter;
  156. chan->call = NULL;
  157. }
  158. _leave("");
  159. }
  160. /*
  161. * Disconnect a call and clear any channel it occupies when that call
  162. * terminates.
  163. */
  164. void rxrpc_disconnect_call(struct rxrpc_call *call)
  165. {
  166. struct rxrpc_connection *conn = call->conn;
  167. set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
  168. rxrpc_see_call(call, rxrpc_call_see_disconnected);
  169. call->peer->cong_ssthresh = call->cong_ssthresh;
  170. if (!hlist_unhashed(&call->error_link)) {
  171. spin_lock(&call->peer->lock);
  172. hlist_del_init(&call->error_link);
  173. spin_unlock(&call->peer->lock);
  174. }
  175. if (rxrpc_is_client_call(call)) {
  176. rxrpc_disconnect_client_call(call->bundle, call);
  177. } else {
  178. __rxrpc_disconnect_call(conn, call);
  179. conn->idle_timestamp = jiffies;
  180. if (atomic_dec_and_test(&conn->active))
  181. rxrpc_set_service_reap_timer(conn->rxnet,
  182. jiffies + rxrpc_connection_expiry * HZ);
  183. }
  184. rxrpc_put_call(call, rxrpc_call_put_io_thread);
  185. }
  186. /*
  187. * Queue a connection's work processor, getting a ref to pass to the work
  188. * queue.
  189. */
  190. void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
  191. {
  192. if (atomic_read(&conn->active) >= 0 &&
  193. rxrpc_queue_work(&conn->processor))
  194. rxrpc_see_connection(conn, why);
  195. }
  196. /*
  197. * Note the re-emergence of a connection.
  198. */
  199. void rxrpc_see_connection(struct rxrpc_connection *conn,
  200. enum rxrpc_conn_trace why)
  201. {
  202. if (conn) {
  203. int r = refcount_read(&conn->ref);
  204. trace_rxrpc_conn(conn->debug_id, r, why);
  205. }
  206. }
  207. /*
  208. * Get a ref on a connection.
  209. */
  210. struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
  211. enum rxrpc_conn_trace why)
  212. {
  213. int r;
  214. __refcount_inc(&conn->ref, &r);
  215. trace_rxrpc_conn(conn->debug_id, r + 1, why);
  216. return conn;
  217. }
  218. /*
  219. * Try to get a ref on a connection.
  220. */
  221. struct rxrpc_connection *
  222. rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
  223. enum rxrpc_conn_trace why)
  224. {
  225. int r;
  226. if (conn) {
  227. if (__refcount_inc_not_zero(&conn->ref, &r))
  228. trace_rxrpc_conn(conn->debug_id, r + 1, why);
  229. else
  230. conn = NULL;
  231. }
  232. return conn;
  233. }
  234. /*
  235. * Set the service connection reap timer.
  236. */
  237. static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
  238. unsigned long reap_at)
  239. {
  240. if (rxnet->live)
  241. timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
  242. }
  243. /*
  244. * destroy a virtual connection
  245. */
  246. static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
  247. {
  248. struct rxrpc_connection *conn =
  249. container_of(rcu, struct rxrpc_connection, rcu);
  250. struct rxrpc_net *rxnet = conn->rxnet;
  251. _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
  252. trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
  253. rxrpc_conn_free);
  254. kfree(conn);
  255. if (atomic_dec_and_test(&rxnet->nr_conns))
  256. wake_up_var(&rxnet->nr_conns);
  257. }
  258. /*
  259. * Clean up a dead connection.
  260. */
  261. static void rxrpc_clean_up_connection(struct work_struct *work)
  262. {
  263. struct rxrpc_connection *conn =
  264. container_of(work, struct rxrpc_connection, destructor);
  265. struct rxrpc_net *rxnet = conn->rxnet;
  266. ASSERT(!conn->channels[0].call &&
  267. !conn->channels[1].call &&
  268. !conn->channels[2].call &&
  269. !conn->channels[3].call);
  270. ASSERT(list_empty(&conn->cache_link));
  271. del_timer_sync(&conn->timer);
  272. cancel_work_sync(&conn->processor); /* Processing may restart the timer */
  273. del_timer_sync(&conn->timer);
  274. write_lock(&rxnet->conn_lock);
  275. list_del_init(&conn->proc_link);
  276. write_unlock(&rxnet->conn_lock);
  277. rxrpc_purge_queue(&conn->rx_queue);
  278. rxrpc_kill_client_conn(conn);
  279. conn->security->clear(conn);
  280. key_put(conn->key);
  281. rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
  282. rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
  283. rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
  284. /* Drain the Rx queue. Note that even though we've unpublished, an
  285. * incoming packet could still be being added to our Rx queue, so we
  286. * will need to drain it again in the RCU cleanup handler.
  287. */
  288. rxrpc_purge_queue(&conn->rx_queue);
  289. if (conn->tx_data_alloc.va)
  290. __page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
  291. conn->tx_data_alloc.pagecnt_bias);
  292. call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
  293. }
  294. /*
  295. * Drop a ref on a connection.
  296. */
  297. void rxrpc_put_connection(struct rxrpc_connection *conn,
  298. enum rxrpc_conn_trace why)
  299. {
  300. unsigned int debug_id;
  301. bool dead;
  302. int r;
  303. if (!conn)
  304. return;
  305. debug_id = conn->debug_id;
  306. dead = __refcount_dec_and_test(&conn->ref, &r);
  307. trace_rxrpc_conn(debug_id, r - 1, why);
  308. if (dead) {
  309. del_timer(&conn->timer);
  310. cancel_work(&conn->processor);
  311. if (in_softirq() || work_busy(&conn->processor) ||
  312. timer_pending(&conn->timer))
  313. /* Can't use the rxrpc workqueue as we need to cancel/flush
  314. * something that may be running/waiting there.
  315. */
  316. schedule_work(&conn->destructor);
  317. else
  318. rxrpc_clean_up_connection(&conn->destructor);
  319. }
  320. }
  321. /*
  322. * reap dead service connections
  323. */
  324. void rxrpc_service_connection_reaper(struct work_struct *work)
  325. {
  326. struct rxrpc_connection *conn, *_p;
  327. struct rxrpc_net *rxnet =
  328. container_of(work, struct rxrpc_net, service_conn_reaper);
  329. unsigned long expire_at, earliest, idle_timestamp, now;
  330. int active;
  331. LIST_HEAD(graveyard);
  332. _enter("");
  333. now = jiffies;
  334. earliest = now + MAX_JIFFY_OFFSET;
  335. write_lock(&rxnet->conn_lock);
  336. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  337. ASSERTCMP(atomic_read(&conn->active), >=, 0);
  338. if (likely(atomic_read(&conn->active) > 0))
  339. continue;
  340. if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
  341. continue;
  342. if (rxnet->live && !conn->local->dead) {
  343. idle_timestamp = READ_ONCE(conn->idle_timestamp);
  344. expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
  345. if (conn->local->service_closed)
  346. expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
  347. _debug("reap CONN %d { a=%d,t=%ld }",
  348. conn->debug_id, atomic_read(&conn->active),
  349. (long)expire_at - (long)now);
  350. if (time_before(now, expire_at)) {
  351. if (time_before(expire_at, earliest))
  352. earliest = expire_at;
  353. continue;
  354. }
  355. }
  356. /* The activity count sits at 0 whilst the conn is unused on
  357. * the list; we reduce that to -1 to make the conn unavailable.
  358. */
  359. active = 0;
  360. if (!atomic_try_cmpxchg(&conn->active, &active, -1))
  361. continue;
  362. rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
  363. if (rxrpc_conn_is_client(conn))
  364. BUG();
  365. else
  366. rxrpc_unpublish_service_conn(conn);
  367. list_move_tail(&conn->link, &graveyard);
  368. }
  369. write_unlock(&rxnet->conn_lock);
  370. if (earliest != now + MAX_JIFFY_OFFSET) {
  371. _debug("reschedule reaper %ld", (long)earliest - (long)now);
  372. ASSERT(time_after(earliest, now));
  373. rxrpc_set_service_reap_timer(rxnet, earliest);
  374. }
  375. while (!list_empty(&graveyard)) {
  376. conn = list_entry(graveyard.next, struct rxrpc_connection,
  377. link);
  378. list_del_init(&conn->link);
  379. ASSERTCMP(atomic_read(&conn->active), ==, -1);
  380. rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
  381. }
  382. _leave("");
  383. }
  384. /*
  385. * preemptively destroy all the service connection records rather than
  386. * waiting for them to time out
  387. */
  388. void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
  389. {
  390. struct rxrpc_connection *conn, *_p;
  391. bool leak = false;
  392. _enter("");
  393. atomic_dec(&rxnet->nr_conns);
  394. del_timer_sync(&rxnet->service_conn_reap_timer);
  395. rxrpc_queue_work(&rxnet->service_conn_reaper);
  396. flush_workqueue(rxrpc_workqueue);
  397. write_lock(&rxnet->conn_lock);
  398. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  399. pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
  400. conn, refcount_read(&conn->ref));
  401. leak = true;
  402. }
  403. write_unlock(&rxnet->conn_lock);
  404. BUG_ON(leak);
  405. ASSERT(list_empty(&rxnet->conn_proc_list));
  406. /* We need to wait for the connections to be destroyed by RCU as they
  407. * pin things that we still need to get rid of.
  408. */
  409. wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
  410. _leave("");
  411. }