af_rxrpc.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* AF_RXRPC implementation
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/net.h>
  11. #include <linux/slab.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/random.h>
  14. #include <linux/poll.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/key-type.h>
  17. #include <net/net_namespace.h>
  18. #include <net/sock.h>
  19. #include <net/af_rxrpc.h>
  20. #define CREATE_TRACE_POINTS
  21. #include "ar-internal.h"
  22. MODULE_DESCRIPTION("RxRPC network protocol");
  23. MODULE_AUTHOR("Red Hat, Inc.");
  24. MODULE_LICENSE("GPL");
  25. MODULE_ALIAS_NETPROTO(PF_RXRPC);
  26. unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
  27. module_param_named(debug, rxrpc_debug, uint, 0644);
  28. MODULE_PARM_DESC(debug, "RxRPC debugging mask");
  29. static struct proto rxrpc_proto;
  30. static const struct proto_ops rxrpc_rpc_ops;
  31. /* current debugging ID */
  32. atomic_t rxrpc_debug_id;
  33. EXPORT_SYMBOL(rxrpc_debug_id);
  34. /* count of skbs currently in use */
  35. atomic_t rxrpc_n_rx_skbs;
  36. struct workqueue_struct *rxrpc_workqueue;
  37. static void rxrpc_sock_destructor(struct sock *);
  38. /*
  39. * see if an RxRPC socket is currently writable
  40. */
  41. static inline int rxrpc_writable(struct sock *sk)
  42. {
  43. return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
  44. }
  45. /*
  46. * wait for write bufferage to become available
  47. */
  48. static void rxrpc_write_space(struct sock *sk)
  49. {
  50. _enter("%p", sk);
  51. rcu_read_lock();
  52. if (rxrpc_writable(sk)) {
  53. struct socket_wq *wq = rcu_dereference(sk->sk_wq);
  54. if (skwq_has_sleeper(wq))
  55. wake_up_interruptible(&wq->wait);
  56. sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
  57. }
  58. rcu_read_unlock();
  59. }
  60. /*
  61. * validate an RxRPC address
  62. */
  63. static int rxrpc_validate_address(struct rxrpc_sock *rx,
  64. struct sockaddr_rxrpc *srx,
  65. int len)
  66. {
  67. unsigned int tail;
  68. if (len < sizeof(struct sockaddr_rxrpc))
  69. return -EINVAL;
  70. if (srx->srx_family != AF_RXRPC)
  71. return -EAFNOSUPPORT;
  72. if (srx->transport_type != SOCK_DGRAM)
  73. return -ESOCKTNOSUPPORT;
  74. len -= offsetof(struct sockaddr_rxrpc, transport);
  75. if (srx->transport_len < sizeof(sa_family_t) ||
  76. srx->transport_len > len)
  77. return -EINVAL;
  78. switch (srx->transport.family) {
  79. case AF_INET:
  80. if (rx->family != AF_INET &&
  81. rx->family != AF_INET6)
  82. return -EAFNOSUPPORT;
  83. if (srx->transport_len < sizeof(struct sockaddr_in))
  84. return -EINVAL;
  85. tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
  86. break;
  87. #ifdef CONFIG_AF_RXRPC_IPV6
  88. case AF_INET6:
  89. if (rx->family != AF_INET6)
  90. return -EAFNOSUPPORT;
  91. if (srx->transport_len < sizeof(struct sockaddr_in6))
  92. return -EINVAL;
  93. tail = offsetof(struct sockaddr_rxrpc, transport) +
  94. sizeof(struct sockaddr_in6);
  95. break;
  96. #endif
  97. default:
  98. return -EAFNOSUPPORT;
  99. }
  100. if (tail < len)
  101. memset((void *)srx + tail, 0, len - tail);
  102. _debug("INET: %pISp", &srx->transport);
  103. return 0;
  104. }
  105. /*
  106. * bind a local address to an RxRPC socket
  107. */
  108. static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
  109. {
  110. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
  111. struct rxrpc_local *local;
  112. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  113. u16 service_id;
  114. int ret;
  115. _enter("%p,%p,%d", rx, saddr, len);
  116. ret = rxrpc_validate_address(rx, srx, len);
  117. if (ret < 0)
  118. goto error;
  119. service_id = srx->srx_service;
  120. lock_sock(&rx->sk);
  121. switch (rx->sk.sk_state) {
  122. case RXRPC_UNBOUND:
  123. rx->srx = *srx;
  124. local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
  125. if (IS_ERR(local)) {
  126. ret = PTR_ERR(local);
  127. goto error_unlock;
  128. }
  129. if (service_id) {
  130. write_lock(&local->services_lock);
  131. if (local->service)
  132. goto service_in_use;
  133. rx->local = local;
  134. local->service = rx;
  135. write_unlock(&local->services_lock);
  136. rx->sk.sk_state = RXRPC_SERVER_BOUND;
  137. } else {
  138. rx->local = local;
  139. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  140. }
  141. break;
  142. case RXRPC_SERVER_BOUND:
  143. ret = -EINVAL;
  144. if (service_id == 0)
  145. goto error_unlock;
  146. ret = -EADDRINUSE;
  147. if (service_id == rx->srx.srx_service)
  148. goto error_unlock;
  149. ret = -EINVAL;
  150. srx->srx_service = rx->srx.srx_service;
  151. if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0)
  152. goto error_unlock;
  153. rx->second_service = service_id;
  154. rx->sk.sk_state = RXRPC_SERVER_BOUND2;
  155. break;
  156. default:
  157. ret = -EINVAL;
  158. goto error_unlock;
  159. }
  160. release_sock(&rx->sk);
  161. _leave(" = 0");
  162. return 0;
  163. service_in_use:
  164. write_unlock(&local->services_lock);
  165. rxrpc_unuse_local(local, rxrpc_local_unuse_bind);
  166. rxrpc_put_local(local, rxrpc_local_put_bind);
  167. ret = -EADDRINUSE;
  168. error_unlock:
  169. release_sock(&rx->sk);
  170. error:
  171. _leave(" = %d", ret);
  172. return ret;
  173. }
  174. /*
  175. * set the number of pending calls permitted on a listening socket
  176. */
  177. static int rxrpc_listen(struct socket *sock, int backlog)
  178. {
  179. struct sock *sk = sock->sk;
  180. struct rxrpc_sock *rx = rxrpc_sk(sk);
  181. unsigned int max, old;
  182. int ret;
  183. _enter("%p,%d", rx, backlog);
  184. lock_sock(&rx->sk);
  185. switch (rx->sk.sk_state) {
  186. case RXRPC_UNBOUND:
  187. ret = -EADDRNOTAVAIL;
  188. break;
  189. case RXRPC_SERVER_BOUND:
  190. case RXRPC_SERVER_BOUND2:
  191. ASSERT(rx->local != NULL);
  192. max = READ_ONCE(rxrpc_max_backlog);
  193. ret = -EINVAL;
  194. if (backlog == INT_MAX)
  195. backlog = max;
  196. else if (backlog < 0 || backlog > max)
  197. break;
  198. old = sk->sk_max_ack_backlog;
  199. sk->sk_max_ack_backlog = backlog;
  200. ret = rxrpc_service_prealloc(rx, GFP_KERNEL);
  201. if (ret == 0)
  202. rx->sk.sk_state = RXRPC_SERVER_LISTENING;
  203. else
  204. sk->sk_max_ack_backlog = old;
  205. break;
  206. case RXRPC_SERVER_LISTENING:
  207. if (backlog == 0) {
  208. rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
  209. sk->sk_max_ack_backlog = 0;
  210. rxrpc_discard_prealloc(rx);
  211. ret = 0;
  212. break;
  213. }
  214. fallthrough;
  215. default:
  216. ret = -EBUSY;
  217. break;
  218. }
  219. release_sock(&rx->sk);
  220. _leave(" = %d", ret);
  221. return ret;
  222. }
  223. /**
  224. * rxrpc_kernel_lookup_peer - Obtain remote transport endpoint for an address
  225. * @sock: The socket through which it will be accessed
  226. * @srx: The network address
  227. * @gfp: Allocation flags
  228. *
  229. * Lookup or create a remote transport endpoint record for the specified
  230. * address and return it with a ref held.
  231. */
  232. struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
  233. struct sockaddr_rxrpc *srx, gfp_t gfp)
  234. {
  235. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  236. int ret;
  237. ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
  238. if (ret < 0)
  239. return ERR_PTR(ret);
  240. return rxrpc_lookup_peer(rx->local, srx, gfp);
  241. }
  242. EXPORT_SYMBOL(rxrpc_kernel_lookup_peer);
  243. /**
  244. * rxrpc_kernel_get_peer - Get a reference on a peer
  245. * @peer: The peer to get a reference on.
  246. *
  247. * Get a record for the remote peer in a call.
  248. */
  249. struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer)
  250. {
  251. return peer ? rxrpc_get_peer(peer, rxrpc_peer_get_application) : NULL;
  252. }
  253. EXPORT_SYMBOL(rxrpc_kernel_get_peer);
  254. /**
  255. * rxrpc_kernel_put_peer - Allow a kernel app to drop a peer reference
  256. * @peer: The peer to drop a ref on
  257. */
  258. void rxrpc_kernel_put_peer(struct rxrpc_peer *peer)
  259. {
  260. rxrpc_put_peer(peer, rxrpc_peer_put_application);
  261. }
  262. EXPORT_SYMBOL(rxrpc_kernel_put_peer);
  263. /**
  264. * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
  265. * @sock: The socket on which to make the call
  266. * @peer: The peer to contact
  267. * @key: The security context to use (defaults to socket setting)
  268. * @user_call_ID: The ID to use
  269. * @tx_total_len: Total length of data to transmit during the call (or -1)
  270. * @hard_timeout: The maximum lifespan of the call in sec
  271. * @gfp: The allocation constraints
  272. * @notify_rx: Where to send notifications instead of socket queue
  273. * @service_id: The ID of the service to contact
  274. * @upgrade: Request service upgrade for call
  275. * @interruptibility: The call is interruptible, or can be canceled.
  276. * @debug_id: The debug ID for tracing to be assigned to the call
  277. *
  278. * Allow a kernel service to begin a call on the nominated socket. This just
  279. * sets up all the internal tracking structures and allocates connection and
  280. * call IDs as appropriate. The call to be used is returned.
  281. *
  282. * The default socket destination address and security may be overridden by
  283. * supplying @srx and @key.
  284. */
  285. struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
  286. struct rxrpc_peer *peer,
  287. struct key *key,
  288. unsigned long user_call_ID,
  289. s64 tx_total_len,
  290. u32 hard_timeout,
  291. gfp_t gfp,
  292. rxrpc_notify_rx_t notify_rx,
  293. u16 service_id,
  294. bool upgrade,
  295. enum rxrpc_interruptibility interruptibility,
  296. unsigned int debug_id)
  297. {
  298. struct rxrpc_conn_parameters cp;
  299. struct rxrpc_call_params p;
  300. struct rxrpc_call *call;
  301. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  302. _enter(",,%x,%lx", key_serial(key), user_call_ID);
  303. if (WARN_ON_ONCE(peer->local != rx->local))
  304. return ERR_PTR(-EIO);
  305. lock_sock(&rx->sk);
  306. if (!key)
  307. key = rx->key;
  308. if (key && !key->payload.data[0])
  309. key = NULL; /* a no-security key */
  310. memset(&p, 0, sizeof(p));
  311. p.user_call_ID = user_call_ID;
  312. p.tx_total_len = tx_total_len;
  313. p.interruptibility = interruptibility;
  314. p.kernel = true;
  315. p.timeouts.hard = hard_timeout;
  316. memset(&cp, 0, sizeof(cp));
  317. cp.local = rx->local;
  318. cp.peer = peer;
  319. cp.key = key;
  320. cp.security_level = rx->min_sec_level;
  321. cp.exclusive = false;
  322. cp.upgrade = upgrade;
  323. cp.service_id = service_id;
  324. call = rxrpc_new_client_call(rx, &cp, &p, gfp, debug_id);
  325. /* The socket has been unlocked. */
  326. if (!IS_ERR(call)) {
  327. call->notify_rx = notify_rx;
  328. mutex_unlock(&call->user_mutex);
  329. }
  330. _leave(" = %p", call);
  331. return call;
  332. }
  333. EXPORT_SYMBOL(rxrpc_kernel_begin_call);
  334. /*
  335. * Dummy function used to stop the notifier talking to recvmsg().
  336. */
  337. static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
  338. unsigned long call_user_ID)
  339. {
  340. }
  341. /**
  342. * rxrpc_kernel_shutdown_call - Allow a kernel service to shut down a call it was using
  343. * @sock: The socket the call is on
  344. * @call: The call to end
  345. *
  346. * Allow a kernel service to shut down a call it was using. The call must be
  347. * complete before this is called (the call should be aborted if necessary).
  348. */
  349. void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call)
  350. {
  351. _enter("%d{%d}", call->debug_id, refcount_read(&call->ref));
  352. mutex_lock(&call->user_mutex);
  353. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
  354. rxrpc_release_call(rxrpc_sk(sock->sk), call);
  355. /* Make sure we're not going to call back into a kernel service */
  356. if (call->notify_rx) {
  357. spin_lock(&call->notify_lock);
  358. call->notify_rx = rxrpc_dummy_notify_rx;
  359. spin_unlock(&call->notify_lock);
  360. }
  361. }
  362. mutex_unlock(&call->user_mutex);
  363. }
  364. EXPORT_SYMBOL(rxrpc_kernel_shutdown_call);
  365. /**
  366. * rxrpc_kernel_put_call - Release a reference to a call
  367. * @sock: The socket the call is on
  368. * @call: The call to put
  369. *
  370. * Drop the application's ref on an rxrpc call.
  371. */
  372. void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call)
  373. {
  374. rxrpc_put_call(call, rxrpc_call_put_kernel);
  375. }
  376. EXPORT_SYMBOL(rxrpc_kernel_put_call);
  377. /**
  378. * rxrpc_kernel_check_life - Check to see whether a call is still alive
  379. * @sock: The socket the call is on
  380. * @call: The call to check
  381. *
  382. * Allow a kernel service to find out whether a call is still alive - whether
  383. * it has completed successfully and all received data has been consumed.
  384. */
  385. bool rxrpc_kernel_check_life(const struct socket *sock,
  386. const struct rxrpc_call *call)
  387. {
  388. if (!rxrpc_call_is_complete(call))
  389. return true;
  390. if (call->completion != RXRPC_CALL_SUCCEEDED)
  391. return false;
  392. return !skb_queue_empty(&call->recvmsg_queue);
  393. }
  394. EXPORT_SYMBOL(rxrpc_kernel_check_life);
  395. /**
  396. * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
  397. * @sock: The socket the call is on
  398. * @call: The call to query
  399. *
  400. * Allow a kernel service to retrieve the epoch value from a service call to
  401. * see if the client at the other end rebooted.
  402. */
  403. u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
  404. {
  405. return call->conn->proto.epoch;
  406. }
  407. EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
  408. /**
  409. * rxrpc_kernel_new_call_notification - Get notifications of new calls
  410. * @sock: The socket to intercept received messages on
  411. * @notify_new_call: Function to be called when new calls appear
  412. * @discard_new_call: Function to discard preallocated calls
  413. *
  414. * Allow a kernel service to be given notifications about new calls.
  415. */
  416. void rxrpc_kernel_new_call_notification(
  417. struct socket *sock,
  418. rxrpc_notify_new_call_t notify_new_call,
  419. rxrpc_discard_new_call_t discard_new_call)
  420. {
  421. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  422. rx->notify_new_call = notify_new_call;
  423. rx->discard_new_call = discard_new_call;
  424. }
  425. EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
  426. /**
  427. * rxrpc_kernel_set_max_life - Set maximum lifespan on a call
  428. * @sock: The socket the call is on
  429. * @call: The call to configure
  430. * @hard_timeout: The maximum lifespan of the call in ms
  431. *
  432. * Set the maximum lifespan of a call. The call will end with ETIME or
  433. * ETIMEDOUT if it takes longer than this.
  434. */
  435. void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
  436. unsigned long hard_timeout)
  437. {
  438. ktime_t delay = ms_to_ktime(hard_timeout), expect_term_by;
  439. mutex_lock(&call->user_mutex);
  440. expect_term_by = ktime_add(ktime_get_real(), delay);
  441. WRITE_ONCE(call->expect_term_by, expect_term_by);
  442. trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
  443. rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
  444. mutex_unlock(&call->user_mutex);
  445. }
  446. EXPORT_SYMBOL(rxrpc_kernel_set_max_life);
  447. /*
  448. * connect an RxRPC socket
  449. * - this just targets it at a specific destination; no actual connection
  450. * negotiation takes place
  451. */
  452. static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
  453. int addr_len, int flags)
  454. {
  455. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
  456. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  457. int ret;
  458. _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
  459. ret = rxrpc_validate_address(rx, srx, addr_len);
  460. if (ret < 0) {
  461. _leave(" = %d [bad addr]", ret);
  462. return ret;
  463. }
  464. lock_sock(&rx->sk);
  465. ret = -EISCONN;
  466. if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
  467. goto error;
  468. switch (rx->sk.sk_state) {
  469. case RXRPC_UNBOUND:
  470. rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
  471. break;
  472. case RXRPC_CLIENT_UNBOUND:
  473. case RXRPC_CLIENT_BOUND:
  474. break;
  475. default:
  476. ret = -EBUSY;
  477. goto error;
  478. }
  479. rx->connect_srx = *srx;
  480. set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
  481. ret = 0;
  482. error:
  483. release_sock(&rx->sk);
  484. return ret;
  485. }
  486. /*
  487. * send a message through an RxRPC socket
  488. * - in a client this does a number of things:
  489. * - finds/sets up a connection for the security specified (if any)
  490. * - initiates a call (ID in control data)
  491. * - ends the request phase of a call (if MSG_MORE is not set)
  492. * - sends a call data packet
  493. * - may send an abort (abort code in control data)
  494. */
  495. static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
  496. {
  497. struct rxrpc_local *local;
  498. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  499. int ret;
  500. _enter(",{%d},,%zu", rx->sk.sk_state, len);
  501. if (m->msg_flags & MSG_OOB)
  502. return -EOPNOTSUPP;
  503. if (m->msg_name) {
  504. ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
  505. if (ret < 0) {
  506. _leave(" = %d [bad addr]", ret);
  507. return ret;
  508. }
  509. }
  510. lock_sock(&rx->sk);
  511. switch (rx->sk.sk_state) {
  512. case RXRPC_UNBOUND:
  513. case RXRPC_CLIENT_UNBOUND:
  514. rx->srx.srx_family = AF_RXRPC;
  515. rx->srx.srx_service = 0;
  516. rx->srx.transport_type = SOCK_DGRAM;
  517. rx->srx.transport.family = rx->family;
  518. switch (rx->family) {
  519. case AF_INET:
  520. rx->srx.transport_len = sizeof(struct sockaddr_in);
  521. break;
  522. #ifdef CONFIG_AF_RXRPC_IPV6
  523. case AF_INET6:
  524. rx->srx.transport_len = sizeof(struct sockaddr_in6);
  525. break;
  526. #endif
  527. default:
  528. ret = -EAFNOSUPPORT;
  529. goto error_unlock;
  530. }
  531. local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
  532. if (IS_ERR(local)) {
  533. ret = PTR_ERR(local);
  534. goto error_unlock;
  535. }
  536. rx->local = local;
  537. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  538. fallthrough;
  539. case RXRPC_CLIENT_BOUND:
  540. if (!m->msg_name &&
  541. test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
  542. m->msg_name = &rx->connect_srx;
  543. m->msg_namelen = sizeof(rx->connect_srx);
  544. }
  545. fallthrough;
  546. case RXRPC_SERVER_BOUND:
  547. case RXRPC_SERVER_LISTENING:
  548. ret = rxrpc_do_sendmsg(rx, m, len);
  549. /* The socket has been unlocked */
  550. goto out;
  551. default:
  552. ret = -EINVAL;
  553. goto error_unlock;
  554. }
  555. error_unlock:
  556. release_sock(&rx->sk);
  557. out:
  558. _leave(" = %d", ret);
  559. return ret;
  560. }
  561. int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val)
  562. {
  563. if (sk->sk_state != RXRPC_UNBOUND)
  564. return -EISCONN;
  565. if (val > RXRPC_SECURITY_MAX)
  566. return -EINVAL;
  567. lock_sock(sk);
  568. rxrpc_sk(sk)->min_sec_level = val;
  569. release_sock(sk);
  570. return 0;
  571. }
  572. EXPORT_SYMBOL(rxrpc_sock_set_min_security_level);
  573. /*
  574. * set RxRPC socket options
  575. */
  576. static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
  577. sockptr_t optval, unsigned int optlen)
  578. {
  579. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  580. unsigned int min_sec_level;
  581. u16 service_upgrade[2];
  582. int ret;
  583. _enter(",%d,%d,,%d", level, optname, optlen);
  584. lock_sock(&rx->sk);
  585. ret = -EOPNOTSUPP;
  586. if (level == SOL_RXRPC) {
  587. switch (optname) {
  588. case RXRPC_EXCLUSIVE_CONNECTION:
  589. ret = -EINVAL;
  590. if (optlen != 0)
  591. goto error;
  592. ret = -EISCONN;
  593. if (rx->sk.sk_state != RXRPC_UNBOUND)
  594. goto error;
  595. rx->exclusive = true;
  596. goto success;
  597. case RXRPC_SECURITY_KEY:
  598. ret = -EINVAL;
  599. if (rx->key)
  600. goto error;
  601. ret = -EISCONN;
  602. if (rx->sk.sk_state != RXRPC_UNBOUND)
  603. goto error;
  604. ret = rxrpc_request_key(rx, optval, optlen);
  605. goto error;
  606. case RXRPC_SECURITY_KEYRING:
  607. ret = -EINVAL;
  608. if (rx->key)
  609. goto error;
  610. ret = -EISCONN;
  611. if (rx->sk.sk_state != RXRPC_UNBOUND)
  612. goto error;
  613. ret = rxrpc_server_keyring(rx, optval, optlen);
  614. goto error;
  615. case RXRPC_MIN_SECURITY_LEVEL:
  616. ret = -EINVAL;
  617. if (optlen != sizeof(unsigned int))
  618. goto error;
  619. ret = -EISCONN;
  620. if (rx->sk.sk_state != RXRPC_UNBOUND)
  621. goto error;
  622. ret = copy_safe_from_sockptr(&min_sec_level,
  623. sizeof(min_sec_level),
  624. optval, optlen);
  625. if (ret)
  626. goto error;
  627. ret = -EINVAL;
  628. if (min_sec_level > RXRPC_SECURITY_MAX)
  629. goto error;
  630. rx->min_sec_level = min_sec_level;
  631. goto success;
  632. case RXRPC_UPGRADEABLE_SERVICE:
  633. ret = -EINVAL;
  634. if (optlen != sizeof(service_upgrade) ||
  635. rx->service_upgrade.from != 0)
  636. goto error;
  637. ret = -EISCONN;
  638. if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
  639. goto error;
  640. ret = -EFAULT;
  641. if (copy_from_sockptr(service_upgrade, optval,
  642. sizeof(service_upgrade)) != 0)
  643. goto error;
  644. ret = -EINVAL;
  645. if ((service_upgrade[0] != rx->srx.srx_service ||
  646. service_upgrade[1] != rx->second_service) &&
  647. (service_upgrade[0] != rx->second_service ||
  648. service_upgrade[1] != rx->srx.srx_service))
  649. goto error;
  650. rx->service_upgrade.from = service_upgrade[0];
  651. rx->service_upgrade.to = service_upgrade[1];
  652. goto success;
  653. default:
  654. break;
  655. }
  656. }
  657. success:
  658. ret = 0;
  659. error:
  660. release_sock(&rx->sk);
  661. return ret;
  662. }
  663. /*
  664. * Get socket options.
  665. */
  666. static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
  667. char __user *optval, int __user *_optlen)
  668. {
  669. int optlen;
  670. if (level != SOL_RXRPC)
  671. return -EOPNOTSUPP;
  672. if (get_user(optlen, _optlen))
  673. return -EFAULT;
  674. switch (optname) {
  675. case RXRPC_SUPPORTED_CMSG:
  676. if (optlen < sizeof(int))
  677. return -ETOOSMALL;
  678. if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) ||
  679. put_user(sizeof(int), _optlen))
  680. return -EFAULT;
  681. return 0;
  682. default:
  683. return -EOPNOTSUPP;
  684. }
  685. }
  686. /*
  687. * permit an RxRPC socket to be polled
  688. */
  689. static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
  690. poll_table *wait)
  691. {
  692. struct sock *sk = sock->sk;
  693. struct rxrpc_sock *rx = rxrpc_sk(sk);
  694. __poll_t mask;
  695. sock_poll_wait(file, sock, wait);
  696. mask = 0;
  697. /* the socket is readable if there are any messages waiting on the Rx
  698. * queue */
  699. if (!list_empty(&rx->recvmsg_q))
  700. mask |= EPOLLIN | EPOLLRDNORM;
  701. /* the socket is writable if there is space to add new data to the
  702. * socket; there is no guarantee that any particular call in progress
  703. * on the socket may have space in the Tx ACK window */
  704. if (rxrpc_writable(sk))
  705. mask |= EPOLLOUT | EPOLLWRNORM;
  706. return mask;
  707. }
  708. /*
  709. * create an RxRPC socket
  710. */
  711. static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
  712. int kern)
  713. {
  714. struct rxrpc_net *rxnet;
  715. struct rxrpc_sock *rx;
  716. struct sock *sk;
  717. _enter("%p,%d", sock, protocol);
  718. /* we support transport protocol UDP/UDP6 only */
  719. if (protocol != PF_INET &&
  720. IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
  721. return -EPROTONOSUPPORT;
  722. if (sock->type != SOCK_DGRAM)
  723. return -ESOCKTNOSUPPORT;
  724. sock->ops = &rxrpc_rpc_ops;
  725. sock->state = SS_UNCONNECTED;
  726. sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
  727. if (!sk)
  728. return -ENOMEM;
  729. sock_init_data(sock, sk);
  730. sock_set_flag(sk, SOCK_RCU_FREE);
  731. sk->sk_state = RXRPC_UNBOUND;
  732. sk->sk_write_space = rxrpc_write_space;
  733. sk->sk_max_ack_backlog = 0;
  734. sk->sk_destruct = rxrpc_sock_destructor;
  735. rx = rxrpc_sk(sk);
  736. rx->family = protocol;
  737. rx->calls = RB_ROOT;
  738. spin_lock_init(&rx->incoming_lock);
  739. INIT_LIST_HEAD(&rx->sock_calls);
  740. INIT_LIST_HEAD(&rx->to_be_accepted);
  741. INIT_LIST_HEAD(&rx->recvmsg_q);
  742. spin_lock_init(&rx->recvmsg_lock);
  743. rwlock_init(&rx->call_lock);
  744. memset(&rx->srx, 0, sizeof(rx->srx));
  745. rxnet = rxrpc_net(sock_net(&rx->sk));
  746. timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
  747. _leave(" = 0 [%p]", rx);
  748. return 0;
  749. }
  750. /*
  751. * Kill all the calls on a socket and shut it down.
  752. */
  753. static int rxrpc_shutdown(struct socket *sock, int flags)
  754. {
  755. struct sock *sk = sock->sk;
  756. struct rxrpc_sock *rx = rxrpc_sk(sk);
  757. int ret = 0;
  758. _enter("%p,%d", sk, flags);
  759. if (flags != SHUT_RDWR)
  760. return -EOPNOTSUPP;
  761. if (sk->sk_state == RXRPC_CLOSE)
  762. return -ESHUTDOWN;
  763. lock_sock(sk);
  764. if (sk->sk_state < RXRPC_CLOSE) {
  765. sk->sk_state = RXRPC_CLOSE;
  766. sk->sk_shutdown = SHUTDOWN_MASK;
  767. } else {
  768. ret = -ESHUTDOWN;
  769. }
  770. rxrpc_discard_prealloc(rx);
  771. release_sock(sk);
  772. return ret;
  773. }
  774. /*
  775. * RxRPC socket destructor
  776. */
  777. static void rxrpc_sock_destructor(struct sock *sk)
  778. {
  779. _enter("%p", sk);
  780. rxrpc_purge_queue(&sk->sk_receive_queue);
  781. WARN_ON(refcount_read(&sk->sk_wmem_alloc));
  782. WARN_ON(!sk_unhashed(sk));
  783. WARN_ON(sk->sk_socket);
  784. if (!sock_flag(sk, SOCK_DEAD)) {
  785. printk("Attempt to release alive rxrpc socket: %p\n", sk);
  786. return;
  787. }
  788. }
  789. /*
  790. * release an RxRPC socket
  791. */
  792. static int rxrpc_release_sock(struct sock *sk)
  793. {
  794. struct rxrpc_sock *rx = rxrpc_sk(sk);
  795. _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
  796. /* declare the socket closed for business */
  797. sock_orphan(sk);
  798. sk->sk_shutdown = SHUTDOWN_MASK;
  799. /* We want to kill off all connections from a service socket
  800. * as fast as possible because we can't share these; client
  801. * sockets, on the other hand, can share an endpoint.
  802. */
  803. switch (sk->sk_state) {
  804. case RXRPC_SERVER_BOUND:
  805. case RXRPC_SERVER_BOUND2:
  806. case RXRPC_SERVER_LISTENING:
  807. case RXRPC_SERVER_LISTEN_DISABLED:
  808. rx->local->service_closed = true;
  809. break;
  810. }
  811. sk->sk_state = RXRPC_CLOSE;
  812. if (rx->local && rx->local->service == rx) {
  813. write_lock(&rx->local->services_lock);
  814. rx->local->service = NULL;
  815. write_unlock(&rx->local->services_lock);
  816. }
  817. /* try to flush out this socket */
  818. rxrpc_discard_prealloc(rx);
  819. rxrpc_release_calls_on_socket(rx);
  820. flush_workqueue(rxrpc_workqueue);
  821. rxrpc_purge_queue(&sk->sk_receive_queue);
  822. rxrpc_unuse_local(rx->local, rxrpc_local_unuse_release_sock);
  823. rxrpc_put_local(rx->local, rxrpc_local_put_release_sock);
  824. rx->local = NULL;
  825. key_put(rx->key);
  826. rx->key = NULL;
  827. key_put(rx->securities);
  828. rx->securities = NULL;
  829. sock_put(sk);
  830. _leave(" = 0");
  831. return 0;
  832. }
  833. /*
  834. * release an RxRPC BSD socket on close() or equivalent
  835. */
  836. static int rxrpc_release(struct socket *sock)
  837. {
  838. struct sock *sk = sock->sk;
  839. _enter("%p{%p}", sock, sk);
  840. if (!sk)
  841. return 0;
  842. sock->sk = NULL;
  843. return rxrpc_release_sock(sk);
  844. }
  845. /*
  846. * RxRPC network protocol
  847. */
  848. static const struct proto_ops rxrpc_rpc_ops = {
  849. .family = PF_RXRPC,
  850. .owner = THIS_MODULE,
  851. .release = rxrpc_release,
  852. .bind = rxrpc_bind,
  853. .connect = rxrpc_connect,
  854. .socketpair = sock_no_socketpair,
  855. .accept = sock_no_accept,
  856. .getname = sock_no_getname,
  857. .poll = rxrpc_poll,
  858. .ioctl = sock_no_ioctl,
  859. .listen = rxrpc_listen,
  860. .shutdown = rxrpc_shutdown,
  861. .setsockopt = rxrpc_setsockopt,
  862. .getsockopt = rxrpc_getsockopt,
  863. .sendmsg = rxrpc_sendmsg,
  864. .recvmsg = rxrpc_recvmsg,
  865. .mmap = sock_no_mmap,
  866. };
  867. static struct proto rxrpc_proto = {
  868. .name = "RXRPC",
  869. .owner = THIS_MODULE,
  870. .obj_size = sizeof(struct rxrpc_sock),
  871. .max_header = sizeof(struct rxrpc_wire_header),
  872. };
  873. static const struct net_proto_family rxrpc_family_ops = {
  874. .family = PF_RXRPC,
  875. .create = rxrpc_create,
  876. .owner = THIS_MODULE,
  877. };
  878. /*
  879. * initialise and register the RxRPC protocol
  880. */
  881. static int __init af_rxrpc_init(void)
  882. {
  883. int ret = -1;
  884. BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
  885. ret = -ENOMEM;
  886. rxrpc_gen_version_string();
  887. rxrpc_call_jar = kmem_cache_create(
  888. "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
  889. SLAB_HWCACHE_ALIGN, NULL);
  890. if (!rxrpc_call_jar) {
  891. pr_notice("Failed to allocate call jar\n");
  892. goto error_call_jar;
  893. }
  894. rxrpc_workqueue = alloc_ordered_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM);
  895. if (!rxrpc_workqueue) {
  896. pr_notice("Failed to allocate work queue\n");
  897. goto error_work_queue;
  898. }
  899. ret = rxrpc_init_security();
  900. if (ret < 0) {
  901. pr_crit("Cannot initialise security\n");
  902. goto error_security;
  903. }
  904. ret = register_pernet_device(&rxrpc_net_ops);
  905. if (ret)
  906. goto error_pernet;
  907. ret = proto_register(&rxrpc_proto, 1);
  908. if (ret < 0) {
  909. pr_crit("Cannot register protocol\n");
  910. goto error_proto;
  911. }
  912. ret = sock_register(&rxrpc_family_ops);
  913. if (ret < 0) {
  914. pr_crit("Cannot register socket family\n");
  915. goto error_sock;
  916. }
  917. ret = register_key_type(&key_type_rxrpc);
  918. if (ret < 0) {
  919. pr_crit("Cannot register client key type\n");
  920. goto error_key_type;
  921. }
  922. ret = register_key_type(&key_type_rxrpc_s);
  923. if (ret < 0) {
  924. pr_crit("Cannot register server key type\n");
  925. goto error_key_type_s;
  926. }
  927. ret = rxrpc_sysctl_init();
  928. if (ret < 0) {
  929. pr_crit("Cannot register sysctls\n");
  930. goto error_sysctls;
  931. }
  932. return 0;
  933. error_sysctls:
  934. unregister_key_type(&key_type_rxrpc_s);
  935. error_key_type_s:
  936. unregister_key_type(&key_type_rxrpc);
  937. error_key_type:
  938. sock_unregister(PF_RXRPC);
  939. error_sock:
  940. proto_unregister(&rxrpc_proto);
  941. error_proto:
  942. unregister_pernet_device(&rxrpc_net_ops);
  943. error_pernet:
  944. rxrpc_exit_security();
  945. error_security:
  946. destroy_workqueue(rxrpc_workqueue);
  947. error_work_queue:
  948. kmem_cache_destroy(rxrpc_call_jar);
  949. error_call_jar:
  950. return ret;
  951. }
  952. /*
  953. * unregister the RxRPC protocol
  954. */
  955. static void __exit af_rxrpc_exit(void)
  956. {
  957. _enter("");
  958. rxrpc_sysctl_exit();
  959. unregister_key_type(&key_type_rxrpc_s);
  960. unregister_key_type(&key_type_rxrpc);
  961. sock_unregister(PF_RXRPC);
  962. proto_unregister(&rxrpc_proto);
  963. unregister_pernet_device(&rxrpc_net_ops);
  964. ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
  965. /* Make sure the local and peer records pinned by any dying connections
  966. * are released.
  967. */
  968. rcu_barrier();
  969. destroy_workqueue(rxrpc_workqueue);
  970. rxrpc_exit_security();
  971. kmem_cache_destroy(rxrpc_call_jar);
  972. _leave("");
  973. }
  974. module_init(af_rxrpc_init);
  975. module_exit(af_rxrpc_exit);