ar-internal.h 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* AF_RXRPC internal definitions
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/atomic.h>
  8. #include <linux/seqlock.h>
  9. #include <linux/win_minmax.h>
  10. #include <net/net_namespace.h>
  11. #include <net/netns/generic.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include <keys/rxrpc-type.h>
  15. #include "protocol.h"
  16. #define FCRYPT_BSIZE 8
  17. struct rxrpc_crypt {
  18. union {
  19. u8 x[FCRYPT_BSIZE];
  20. __be32 n[2];
  21. };
  22. } __attribute__((aligned(8)));
  23. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  24. #define rxrpc_queue_delayed_work(WS,D) \
  25. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  26. struct key_preparsed_payload;
  27. struct rxrpc_connection;
  28. struct rxrpc_txbuf;
  29. /*
  30. * Mark applied to socket buffers in skb->mark. skb->priority is used
  31. * to pass supplementary information.
  32. */
  33. enum rxrpc_skb_mark {
  34. RXRPC_SKB_MARK_PACKET, /* Received packet */
  35. RXRPC_SKB_MARK_ERROR, /* Error notification */
  36. RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */
  37. RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
  38. RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
  39. };
  40. /*
  41. * sk_state for RxRPC sockets
  42. */
  43. enum {
  44. RXRPC_UNBOUND = 0,
  45. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  46. RXRPC_CLIENT_BOUND, /* client local address bound */
  47. RXRPC_SERVER_BOUND, /* server local address bound */
  48. RXRPC_SERVER_BOUND2, /* second server local address bound */
  49. RXRPC_SERVER_LISTENING, /* server listening for connections */
  50. RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
  51. RXRPC_CLOSE, /* socket is being closed */
  52. };
  53. /*
  54. * Per-network namespace data.
  55. */
  56. struct rxrpc_net {
  57. struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
  58. u32 epoch; /* Local epoch for detecting local-end reset */
  59. struct list_head calls; /* List of calls active in this namespace */
  60. spinlock_t call_lock; /* Lock for ->calls */
  61. atomic_t nr_calls; /* Count of allocated calls */
  62. atomic_t nr_conns;
  63. struct list_head bundle_proc_list; /* List of bundles for proc */
  64. struct list_head conn_proc_list; /* List of conns in this namespace for proc */
  65. struct list_head service_conns; /* Service conns in this namespace */
  66. rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
  67. struct work_struct service_conn_reaper;
  68. struct timer_list service_conn_reap_timer;
  69. bool live;
  70. atomic_t nr_client_conns;
  71. struct hlist_head local_endpoints;
  72. struct mutex local_mutex; /* Lock for ->local_endpoints */
  73. DECLARE_HASHTABLE (peer_hash, 10);
  74. spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
  75. #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
  76. u8 peer_keepalive_cursor;
  77. time64_t peer_keepalive_base;
  78. struct list_head peer_keepalive[32];
  79. struct list_head peer_keepalive_new;
  80. struct timer_list peer_keepalive_timer;
  81. struct work_struct peer_keepalive_work;
  82. atomic_t stat_tx_data;
  83. atomic_t stat_tx_data_retrans;
  84. atomic_t stat_tx_data_send;
  85. atomic_t stat_tx_data_send_frag;
  86. atomic_t stat_tx_data_send_fail;
  87. atomic_t stat_tx_data_underflow;
  88. atomic_t stat_tx_data_cwnd_reset;
  89. atomic_t stat_rx_data;
  90. atomic_t stat_rx_data_reqack;
  91. atomic_t stat_rx_data_jumbo;
  92. atomic_t stat_tx_ack_fill;
  93. atomic_t stat_tx_ack_send;
  94. atomic_t stat_tx_ack_skip;
  95. atomic_t stat_tx_acks[256];
  96. atomic_t stat_rx_acks[256];
  97. atomic_t stat_why_req_ack[8];
  98. atomic_t stat_io_loop;
  99. };
  100. /*
  101. * Service backlog preallocation.
  102. *
  103. * This contains circular buffers of preallocated peers, connections and calls
  104. * for incoming service calls and their head and tail pointers. This allows
  105. * calls to be set up in the data_ready handler, thereby avoiding the need to
  106. * shuffle packets around so much.
  107. */
  108. struct rxrpc_backlog {
  109. unsigned short peer_backlog_head;
  110. unsigned short peer_backlog_tail;
  111. unsigned short conn_backlog_head;
  112. unsigned short conn_backlog_tail;
  113. unsigned short call_backlog_head;
  114. unsigned short call_backlog_tail;
  115. #define RXRPC_BACKLOG_MAX 32
  116. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  117. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  118. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  119. };
  120. /*
  121. * RxRPC socket definition
  122. */
  123. struct rxrpc_sock {
  124. /* WARNING: sk has to be the first member */
  125. struct sock sk;
  126. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  127. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  128. struct rxrpc_local *local; /* local endpoint */
  129. struct rxrpc_backlog *backlog; /* Preallocation for services */
  130. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  131. struct list_head sock_calls; /* List of calls owned by this socket */
  132. struct list_head to_be_accepted; /* calls awaiting acceptance */
  133. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  134. spinlock_t recvmsg_lock; /* Lock for recvmsg_q */
  135. struct key *key; /* security for this socket */
  136. struct key *securities; /* list of server security descriptors */
  137. struct rb_root calls; /* User ID -> call mapping */
  138. unsigned long flags;
  139. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  140. rwlock_t call_lock; /* lock for calls */
  141. u32 min_sec_level; /* minimum security level */
  142. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  143. bool exclusive; /* Exclusive connection for a client socket */
  144. u16 second_service; /* Additional service bound to the endpoint */
  145. struct {
  146. /* Service upgrade information */
  147. u16 from; /* Service ID to upgrade (if not 0) */
  148. u16 to; /* service ID to upgrade to */
  149. } service_upgrade;
  150. sa_family_t family; /* Protocol family created with */
  151. struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
  152. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  153. };
  154. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  155. /*
  156. * CPU-byteorder normalised Rx packet header.
  157. */
  158. struct rxrpc_host_header {
  159. u32 epoch; /* client boot timestamp */
  160. u32 cid; /* connection and channel ID */
  161. u32 callNumber; /* call ID (0 for connection-level packets) */
  162. u32 seq; /* sequence number of pkt in call stream */
  163. u32 serial; /* serial number of pkt sent to network */
  164. u8 type; /* packet type */
  165. u8 flags; /* packet flags */
  166. u8 userStatus; /* app-layer defined status */
  167. u8 securityIndex; /* security protocol ID */
  168. union {
  169. u16 _rsvd; /* reserved */
  170. u16 cksum; /* kerberos security checksum */
  171. };
  172. u16 serviceId; /* service ID */
  173. } __packed;
  174. /*
  175. * RxRPC socket buffer private variables
  176. * - max 48 bytes (struct sk_buff::cb)
  177. */
  178. struct rxrpc_skb_priv {
  179. union {
  180. struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
  181. struct {
  182. u16 offset; /* Offset of data */
  183. u16 len; /* Length of data */
  184. u8 flags;
  185. #define RXRPC_RX_VERIFIED 0x01
  186. };
  187. struct {
  188. rxrpc_seq_t first_ack; /* First packet in acks table */
  189. rxrpc_seq_t prev_ack; /* Highest seq seen */
  190. rxrpc_serial_t acked_serial; /* Packet in response to (or 0) */
  191. u8 reason; /* Reason for ack */
  192. u8 nr_acks; /* Number of acks+nacks */
  193. u8 nr_nacks; /* Number of nacks */
  194. } ack;
  195. };
  196. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  197. };
  198. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  199. /*
  200. * RxRPC security module interface
  201. */
  202. struct rxrpc_security {
  203. const char *name; /* name of this service */
  204. u8 security_index; /* security type provided */
  205. u32 no_key_abort; /* Abort code indicating no key */
  206. /* Initialise a security service */
  207. int (*init)(void);
  208. /* Clean up a security service */
  209. void (*exit)(void);
  210. /* Parse the information from a server key */
  211. int (*preparse_server_key)(struct key_preparsed_payload *);
  212. /* Clean up the preparse buffer after parsing a server key */
  213. void (*free_preparse_server_key)(struct key_preparsed_payload *);
  214. /* Destroy the payload of a server key */
  215. void (*destroy_server_key)(struct key *);
  216. /* Describe a server key */
  217. void (*describe_server_key)(const struct key *, struct seq_file *);
  218. /* initialise a connection's security */
  219. int (*init_connection_security)(struct rxrpc_connection *,
  220. struct rxrpc_key_token *);
  221. /* Work out how much data we can store in a packet, given an estimate
  222. * of the amount of data remaining and allocate a data buffer.
  223. */
  224. struct rxrpc_txbuf *(*alloc_txbuf)(struct rxrpc_call *call, size_t remaining, gfp_t gfp);
  225. /* impose security on a packet */
  226. int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *);
  227. /* verify the security on a received packet */
  228. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *);
  229. /* Free crypto request on a call */
  230. void (*free_call_crypto)(struct rxrpc_call *);
  231. /* issue a challenge */
  232. int (*issue_challenge)(struct rxrpc_connection *);
  233. /* respond to a challenge */
  234. int (*respond_to_challenge)(struct rxrpc_connection *,
  235. struct sk_buff *);
  236. /* verify a response */
  237. int (*verify_response)(struct rxrpc_connection *,
  238. struct sk_buff *);
  239. /* clear connection security */
  240. void (*clear)(struct rxrpc_connection *);
  241. };
  242. /*
  243. * RxRPC local transport endpoint description
  244. * - owned by a single AF_RXRPC socket
  245. * - pointed to by transport socket struct sk_user_data
  246. */
  247. struct rxrpc_local {
  248. struct rcu_head rcu;
  249. atomic_t active_users; /* Number of users of the local endpoint */
  250. refcount_t ref; /* Number of references to the structure */
  251. struct net *net; /* The network namespace */
  252. struct rxrpc_net *rxnet; /* Our bits in the network namespace */
  253. struct hlist_node link;
  254. struct socket *socket; /* my UDP socket */
  255. struct task_struct *io_thread;
  256. struct completion io_thread_ready; /* Indication that the I/O thread started */
  257. struct page_frag_cache tx_alloc; /* Tx control packet allocation (I/O thread only) */
  258. struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
  259. #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
  260. struct sk_buff_head rx_delay_queue; /* Delay injection queue */
  261. #endif
  262. struct sk_buff_head rx_queue; /* Received packets */
  263. struct list_head conn_attend_q; /* Conns requiring immediate attention */
  264. struct list_head call_attend_q; /* Calls requiring immediate attention */
  265. struct rb_root client_bundles; /* Client connection bundles by socket params */
  266. spinlock_t client_bundles_lock; /* Lock for client_bundles */
  267. bool kill_all_client_conns;
  268. struct list_head idle_client_conns;
  269. struct timer_list client_conn_reap_timer;
  270. unsigned long client_conn_flags;
  271. #define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */
  272. spinlock_t lock; /* access lock */
  273. rwlock_t services_lock; /* lock for services list */
  274. int debug_id; /* debug ID for printks */
  275. bool dead;
  276. bool service_closed; /* Service socket closed */
  277. struct idr conn_ids; /* List of connection IDs */
  278. struct list_head new_client_calls; /* Newly created client calls need connection */
  279. spinlock_t client_call_lock; /* Lock for ->new_client_calls */
  280. struct sockaddr_rxrpc srx; /* local address */
  281. };
  282. /*
  283. * RxRPC remote transport endpoint definition
  284. * - matched by local endpoint, remote port, address and protocol type
  285. */
  286. struct rxrpc_peer {
  287. struct rcu_head rcu; /* This must be first */
  288. refcount_t ref;
  289. unsigned long hash_key;
  290. struct hlist_node hash_link;
  291. struct rxrpc_local *local;
  292. struct hlist_head error_targets; /* targets for net error distribution */
  293. struct rb_root service_conns; /* Service connections */
  294. struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
  295. time64_t last_tx_at; /* Last time packet sent here */
  296. seqlock_t service_conn_lock;
  297. spinlock_t lock; /* access lock */
  298. unsigned int if_mtu; /* interface MTU for this peer */
  299. unsigned int mtu; /* network MTU for this peer */
  300. unsigned int maxdata; /* data size (MTU - hdrsize) */
  301. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  302. int debug_id; /* debug ID for printks */
  303. struct sockaddr_rxrpc srx; /* remote address */
  304. /* calculated RTT cache */
  305. #define RXRPC_RTT_CACHE_SIZE 32
  306. spinlock_t rtt_input_lock; /* RTT lock for input routine */
  307. ktime_t rtt_last_req; /* Time of last RTT request */
  308. unsigned int rtt_count; /* Number of samples we've got */
  309. u32 srtt_us; /* smoothed round trip time << 3 in usecs */
  310. u32 mdev_us; /* medium deviation */
  311. u32 mdev_max_us; /* maximal mdev for the last rtt period */
  312. u32 rttvar_us; /* smoothed mdev_max */
  313. u32 rto_us; /* Retransmission timeout in usec */
  314. u8 backoff; /* Backoff timeout (as shift) */
  315. u8 cong_ssthresh; /* Congestion slow-start threshold */
  316. };
  317. /*
  318. * Keys for matching a connection.
  319. */
  320. struct rxrpc_conn_proto {
  321. union {
  322. struct {
  323. u32 epoch; /* epoch of this connection */
  324. u32 cid; /* connection ID */
  325. };
  326. u64 index_key;
  327. };
  328. };
  329. struct rxrpc_conn_parameters {
  330. struct rxrpc_local *local; /* Representation of local endpoint */
  331. struct rxrpc_peer *peer; /* Representation of remote endpoint */
  332. struct key *key; /* Security details */
  333. bool exclusive; /* T if conn is exclusive */
  334. bool upgrade; /* T if service ID can be upgraded */
  335. u16 service_id; /* Service ID for this connection */
  336. u32 security_level; /* Security level selected */
  337. };
  338. /*
  339. * Call completion condition (state == RXRPC_CALL_COMPLETE).
  340. */
  341. enum rxrpc_call_completion {
  342. RXRPC_CALL_SUCCEEDED, /* - Normal termination */
  343. RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
  344. RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
  345. RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
  346. RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
  347. NR__RXRPC_CALL_COMPLETIONS
  348. };
  349. /*
  350. * Bits in the connection flags.
  351. */
  352. enum rxrpc_conn_flag {
  353. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  354. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  355. RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
  356. RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
  357. RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
  358. RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
  359. RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
  360. };
  361. #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
  362. (1UL << RXRPC_CONN_FINAL_ACK_1) | \
  363. (1UL << RXRPC_CONN_FINAL_ACK_2) | \
  364. (1UL << RXRPC_CONN_FINAL_ACK_3))
  365. /*
  366. * Events that can be raised upon a connection.
  367. */
  368. enum rxrpc_conn_event {
  369. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  370. RXRPC_CONN_EV_ABORT_CALLS, /* Abort attached calls */
  371. };
  372. /*
  373. * The connection protocol state.
  374. */
  375. enum rxrpc_conn_proto_state {
  376. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  377. RXRPC_CONN_CLIENT_UNSECURED, /* Client connection needs security init */
  378. RXRPC_CONN_CLIENT, /* Client connection */
  379. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  380. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  381. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  382. RXRPC_CONN_SERVICE, /* Service secured connection */
  383. RXRPC_CONN_ABORTED, /* Conn aborted */
  384. RXRPC_CONN__NR_STATES
  385. };
  386. /*
  387. * RxRPC client connection bundle.
  388. */
  389. struct rxrpc_bundle {
  390. struct rxrpc_local *local; /* Representation of local endpoint */
  391. struct rxrpc_peer *peer; /* Remote endpoint */
  392. struct key *key; /* Security details */
  393. struct list_head proc_link; /* Link in net->bundle_proc_list */
  394. const struct rxrpc_security *security; /* applied security module */
  395. refcount_t ref;
  396. atomic_t active; /* Number of active users */
  397. unsigned int debug_id;
  398. u32 security_level; /* Security level selected */
  399. u16 service_id; /* Service ID for this connection */
  400. bool try_upgrade; /* True if the bundle is attempting upgrade */
  401. bool exclusive; /* T if conn is exclusive */
  402. bool upgrade; /* T if service ID can be upgraded */
  403. unsigned short alloc_error; /* Error from last conn allocation */
  404. struct rb_node local_node; /* Node in local->client_conns */
  405. struct list_head waiting_calls; /* Calls waiting for channels */
  406. unsigned long avail_chans; /* Mask of available channels */
  407. unsigned int conn_ids[4]; /* Connection IDs. */
  408. struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */
  409. };
  410. /*
  411. * RxRPC connection definition
  412. * - matched by { local, peer, epoch, conn_id, direction }
  413. * - each connection can only handle four simultaneous calls
  414. */
  415. struct rxrpc_connection {
  416. struct rxrpc_conn_proto proto;
  417. struct rxrpc_local *local; /* Representation of local endpoint */
  418. struct rxrpc_peer *peer; /* Remote endpoint */
  419. struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
  420. struct key *key; /* Security details */
  421. struct list_head attend_link; /* Link in local->conn_attend_q */
  422. refcount_t ref;
  423. atomic_t active; /* Active count for service conns */
  424. struct rcu_head rcu;
  425. struct list_head cache_link;
  426. unsigned char act_chans; /* Mask of active channels */
  427. struct rxrpc_channel {
  428. unsigned long final_ack_at; /* Time at which to issue final ACK */
  429. struct rxrpc_call *call; /* Active call */
  430. unsigned int call_debug_id; /* call->debug_id */
  431. u32 call_id; /* ID of current call */
  432. u32 call_counter; /* Call ID counter */
  433. u32 last_call; /* ID of last call */
  434. u8 last_type; /* Type of last packet */
  435. union {
  436. u32 last_seq;
  437. u32 last_abort;
  438. };
  439. } channels[RXRPC_MAXCALLS];
  440. struct timer_list timer; /* Conn event timer */
  441. struct work_struct processor; /* connection event processor */
  442. struct work_struct destructor; /* In-process-context destroyer */
  443. struct rxrpc_bundle *bundle; /* Client connection bundle */
  444. struct rb_node service_node; /* Node in peer->service_conns */
  445. struct list_head proc_link; /* link in procfs list */
  446. struct list_head link; /* link in master connection list */
  447. struct sk_buff_head rx_queue; /* received conn-level packets */
  448. struct page_frag_cache tx_data_alloc; /* Tx DATA packet allocation */
  449. struct mutex tx_data_alloc_lock;
  450. struct mutex security_lock; /* Lock for security management */
  451. const struct rxrpc_security *security; /* applied security module */
  452. union {
  453. struct {
  454. struct crypto_sync_skcipher *cipher; /* encryption handle */
  455. struct rxrpc_crypt csum_iv; /* packet checksum base */
  456. u32 nonce; /* response re-use preventer */
  457. } rxkad;
  458. };
  459. unsigned long flags;
  460. unsigned long events;
  461. unsigned long idle_timestamp; /* Time at which last became idle */
  462. spinlock_t state_lock; /* state-change lock */
  463. enum rxrpc_conn_proto_state state; /* current state of connection */
  464. enum rxrpc_call_completion completion; /* Completion condition */
  465. s32 abort_code; /* Abort code of connection abort */
  466. int debug_id; /* debug ID for printks */
  467. rxrpc_serial_t tx_serial; /* Outgoing packet serial number counter */
  468. unsigned int hi_serial; /* highest serial number received */
  469. u32 service_id; /* Service ID, possibly upgraded */
  470. u32 security_level; /* Security level selected */
  471. u8 security_ix; /* security type */
  472. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  473. u8 bundle_shift; /* Index into bundle->avail_chans */
  474. bool exclusive; /* T if conn is exclusive */
  475. bool upgrade; /* T if service ID can be upgraded */
  476. u16 orig_service_id; /* Originally requested service ID */
  477. short error; /* Local error code */
  478. };
  479. static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
  480. {
  481. return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
  482. }
  483. static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
  484. {
  485. return !rxrpc_to_server(sp);
  486. }
  487. /*
  488. * Flags in call->flags.
  489. */
  490. enum rxrpc_call_flag {
  491. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  492. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  493. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  494. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  495. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  496. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  497. RXRPC_CALL_TX_ALL_ACKED, /* Last packet has been hard-acked */
  498. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  499. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  500. RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
  501. RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
  502. RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
  503. RXRPC_CALL_KERNEL, /* The call was made by the kernel */
  504. RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
  505. RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */
  506. RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */
  507. RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */
  508. RXRPC_CALL_CONN_CHALLENGING, /* The connection is being challenged */
  509. };
  510. /*
  511. * Events that can be raised on a call.
  512. */
  513. enum rxrpc_call_event {
  514. RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
  515. RXRPC_CALL_EV_INITIAL_PING, /* Send initial ping for a new service call */
  516. };
  517. /*
  518. * The states that a call can be in.
  519. */
  520. enum rxrpc_call_state {
  521. RXRPC_CALL_UNINITIALISED,
  522. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  523. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  524. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  525. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  526. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  527. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  528. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  529. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  530. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  531. RXRPC_CALL_COMPLETE, /* - call complete */
  532. NR__RXRPC_CALL_STATES
  533. };
  534. /*
  535. * Call Tx congestion management modes.
  536. */
  537. enum rxrpc_congest_mode {
  538. RXRPC_CALL_SLOW_START,
  539. RXRPC_CALL_CONGEST_AVOIDANCE,
  540. RXRPC_CALL_PACKET_LOSS,
  541. RXRPC_CALL_FAST_RETRANSMIT,
  542. NR__RXRPC_CONGEST_MODES
  543. };
  544. /*
  545. * RxRPC call definition
  546. * - matched by { connection, call_id }
  547. */
  548. struct rxrpc_call {
  549. struct rcu_head rcu;
  550. struct rxrpc_connection *conn; /* connection carrying call */
  551. struct rxrpc_bundle *bundle; /* Connection bundle to use */
  552. struct rxrpc_peer *peer; /* Peer record for remote address */
  553. struct rxrpc_local *local; /* Representation of local endpoint */
  554. struct rxrpc_sock __rcu *socket; /* socket responsible */
  555. struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
  556. struct key *key; /* Security details */
  557. const struct rxrpc_security *security; /* applied security module */
  558. struct mutex user_mutex; /* User access mutex */
  559. struct sockaddr_rxrpc dest_srx; /* Destination address */
  560. ktime_t delay_ack_at; /* When DELAY ACK needs to happen */
  561. ktime_t ack_lost_at; /* When ACK is figured as lost */
  562. ktime_t resend_at; /* When next resend needs to happen */
  563. ktime_t ping_at; /* When next to send a ping */
  564. ktime_t keepalive_at; /* When next to send a keepalive ping */
  565. ktime_t expect_rx_by; /* When we expect to get a packet by */
  566. ktime_t expect_req_by; /* When we expect to get a request DATA packet by */
  567. ktime_t expect_term_by; /* When we expect call termination by */
  568. u32 next_rx_timo; /* Timeout for next Rx packet (ms) */
  569. u32 next_req_timo; /* Timeout for next Rx request packet (ms) */
  570. u32 hard_timo; /* Maximum lifetime or 0 (s) */
  571. struct timer_list timer; /* Combined event timer */
  572. struct work_struct destroyer; /* In-process-context destroyer */
  573. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  574. struct list_head link; /* link in master call list */
  575. struct list_head wait_link; /* Link in local->new_client_calls */
  576. struct hlist_node error_link; /* link in error distribution list */
  577. struct list_head accept_link; /* Link in rx->acceptq */
  578. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  579. struct list_head sock_link; /* Link in rx->sock_calls */
  580. struct rb_node sock_node; /* Node in rx->calls */
  581. struct list_head attend_link; /* Link in local->call_attend_q */
  582. struct rxrpc_txbuf *tx_pending; /* Tx buffer being filled */
  583. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  584. s64 tx_total_len; /* Total length left to be transmitted (or -1) */
  585. unsigned long user_call_ID; /* user-defined call ID */
  586. unsigned long flags;
  587. unsigned long events;
  588. spinlock_t notify_lock; /* Kernel notification lock */
  589. unsigned int send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */
  590. s32 send_abort; /* Abort code to be sent */
  591. short send_abort_err; /* Error to be associated with the abort */
  592. rxrpc_seq_t send_abort_seq; /* DATA packet that incurred the abort (or 0) */
  593. s32 abort_code; /* Local/remote abort code */
  594. int error; /* Local error incurred */
  595. enum rxrpc_call_state _state; /* Current state of call (needs barrier) */
  596. enum rxrpc_call_completion completion; /* Call completion condition */
  597. refcount_t ref;
  598. u8 security_ix; /* Security type */
  599. enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
  600. u32 call_id; /* call ID on connection */
  601. u32 cid; /* connection ID plus channel index */
  602. u32 security_level; /* Security level selected */
  603. int debug_id; /* debug ID for printks */
  604. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  605. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  606. /* Transmitted data tracking. */
  607. spinlock_t tx_lock; /* Transmit queue lock */
  608. struct list_head tx_sendmsg; /* Sendmsg prepared packets */
  609. struct list_head tx_buffer; /* Buffer of transmissible packets */
  610. rxrpc_seq_t tx_bottom; /* First packet in buffer */
  611. rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */
  612. rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */
  613. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  614. u16 tx_backoff; /* Delay to insert due to Tx failure (ms) */
  615. u8 tx_winsize; /* Maximum size of Tx window */
  616. #define RXRPC_TX_MAX_WINDOW 128
  617. ktime_t tx_last_sent; /* Last time a transmission occurred */
  618. /* Received data tracking */
  619. struct sk_buff_head recvmsg_queue; /* Queue of packets ready for recvmsg() */
  620. struct sk_buff_head rx_oos_queue; /* Queue of out of sequence packets */
  621. rxrpc_seq_t rx_highest_seq; /* Higest sequence number received */
  622. rxrpc_seq_t rx_consumed; /* Highest packet consumed */
  623. rxrpc_serial_t rx_serial; /* Highest serial received for this call */
  624. u8 rx_winsize; /* Size of Rx window */
  625. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  626. * is fixed, we keep these numbers in terms of segments (ie. DATA
  627. * packets) rather than bytes.
  628. */
  629. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  630. #define RXRPC_MIN_CWND 4
  631. u8 cong_cwnd; /* Congestion window size */
  632. u8 cong_extra; /* Extra to send for congestion management */
  633. u8 cong_ssthresh; /* Slow-start threshold */
  634. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  635. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  636. u8 cong_cumul_acks; /* Cumulative ACK count */
  637. ktime_t cong_tstamp; /* Last time cwnd was changed */
  638. struct sk_buff *cong_last_nack; /* Last ACK with nacks received */
  639. /* Receive-phase ACK management (ACKs we send). */
  640. u8 ackr_reason; /* reason to ACK */
  641. u16 ackr_sack_base; /* Starting slot in SACK table ring */
  642. rxrpc_seq_t ackr_window; /* Base of SACK window */
  643. rxrpc_seq_t ackr_wtop; /* Base of SACK window */
  644. unsigned int ackr_nr_unacked; /* Number of unacked packets */
  645. atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */
  646. struct {
  647. #define RXRPC_SACK_SIZE 256
  648. /* SACK table for soft-acked packets */
  649. u8 ackr_sack_table[RXRPC_SACK_SIZE];
  650. } __aligned(8);
  651. /* RTT management */
  652. rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
  653. ktime_t rtt_sent_at[4]; /* Time packet sent */
  654. unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
  655. * Mask of pending samples in 8-11 */
  656. #define RXRPC_CALL_RTT_AVAIL_MASK 0xf
  657. #define RXRPC_CALL_RTT_PEND_SHIFT 8
  658. /* Transmission-phase ACK management (ACKs we've received). */
  659. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  660. rxrpc_seq_t acks_first_seq; /* first sequence number received */
  661. rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
  662. rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */
  663. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  664. rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */
  665. };
  666. /*
  667. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  668. */
  669. struct rxrpc_ack_summary {
  670. u16 nr_acks; /* Number of ACKs in packet */
  671. u16 nr_new_acks; /* Number of new ACKs in packet */
  672. u16 nr_new_nacks; /* Number of new nacks in packet */
  673. u16 nr_retained_nacks; /* Number of nacks retained between ACKs */
  674. u8 ack_reason;
  675. bool saw_nacks; /* Saw NACKs in packet */
  676. bool new_low_nack; /* T if new low NACK found */
  677. bool retrans_timeo; /* T if reTx due to timeout happened */
  678. u8 flight_size; /* Number of unreceived transmissions */
  679. /* Place to stash values for tracing */
  680. enum rxrpc_congest_mode mode:8;
  681. u8 cwnd;
  682. u8 ssthresh;
  683. u8 dup_acks;
  684. u8 cumulative_acks;
  685. };
  686. /*
  687. * sendmsg() cmsg-specified parameters.
  688. */
  689. enum rxrpc_command {
  690. RXRPC_CMD_SEND_DATA, /* send data message */
  691. RXRPC_CMD_SEND_ABORT, /* request abort generation */
  692. RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
  693. RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */
  694. };
  695. struct rxrpc_call_params {
  696. s64 tx_total_len; /* Total Tx data length (if send data) */
  697. unsigned long user_call_ID; /* User's call ID */
  698. struct {
  699. u32 hard; /* Maximum lifetime (sec) */
  700. u32 idle; /* Max time since last data packet (msec) */
  701. u32 normal; /* Max time since last call packet (msec) */
  702. } timeouts;
  703. u8 nr_timeouts; /* Number of timeouts specified */
  704. bool kernel; /* T if kernel is making the call */
  705. enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
  706. };
  707. struct rxrpc_send_params {
  708. struct rxrpc_call_params call;
  709. u32 abort_code; /* Abort code to Tx (if abort) */
  710. enum rxrpc_command command : 8; /* The command to implement */
  711. bool exclusive; /* Shared or exclusive call */
  712. bool upgrade; /* If the connection is upgradeable */
  713. };
  714. /*
  715. * Buffer of data to be output as a packet.
  716. */
  717. struct rxrpc_txbuf {
  718. struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */
  719. struct list_head tx_link; /* Link in live Enc queue or Tx queue */
  720. ktime_t last_sent; /* Time at which last transmitted */
  721. refcount_t ref;
  722. rxrpc_seq_t seq; /* Sequence number of this packet */
  723. rxrpc_serial_t serial; /* Last serial number transmitted with */
  724. unsigned int call_debug_id;
  725. unsigned int debug_id;
  726. unsigned int len; /* Amount of data in buffer */
  727. unsigned int space; /* Remaining data space */
  728. unsigned int offset; /* Offset of fill point */
  729. unsigned int flags;
  730. #define RXRPC_TXBUF_WIRE_FLAGS 0xff /* The wire protocol flags */
  731. #define RXRPC_TXBUF_RESENT 0x100 /* Set if has been resent */
  732. __be16 cksum; /* Checksum to go in header */
  733. unsigned short ack_rwind; /* ACK receive window */
  734. u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */
  735. u8 nr_kvec; /* Amount of kvec[] used */
  736. struct kvec kvec[3];
  737. };
  738. static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb)
  739. {
  740. return txb->flags & RXRPC_CLIENT_INITIATED;
  741. }
  742. static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
  743. {
  744. return !rxrpc_sending_to_server(txb);
  745. }
  746. #include <trace/events/rxrpc.h>
  747. /*
  748. * Allocate the next serial number on a connection. 0 must be skipped.
  749. */
  750. static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn)
  751. {
  752. rxrpc_serial_t serial;
  753. serial = conn->tx_serial;
  754. if (serial == 0)
  755. serial = 1;
  756. conn->tx_serial = serial + 1;
  757. return serial;
  758. }
  759. /*
  760. * af_rxrpc.c
  761. */
  762. extern atomic_t rxrpc_n_rx_skbs;
  763. extern struct workqueue_struct *rxrpc_workqueue;
  764. /*
  765. * call_accept.c
  766. */
  767. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  768. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  769. bool rxrpc_new_incoming_call(struct rxrpc_local *local,
  770. struct rxrpc_peer *peer,
  771. struct rxrpc_connection *conn,
  772. struct sockaddr_rxrpc *peer_srx,
  773. struct sk_buff *skb);
  774. int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
  775. /*
  776. * call_event.c
  777. */
  778. void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
  779. enum rxrpc_propose_ack_trace why);
  780. void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t,
  781. enum rxrpc_propose_ack_trace);
  782. void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *);
  783. void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb);
  784. bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
  785. /*
  786. * call_object.c
  787. */
  788. extern const char *const rxrpc_call_states[];
  789. extern const char *const rxrpc_call_completions[];
  790. extern struct kmem_cache *rxrpc_call_jar;
  791. void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what);
  792. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  793. struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
  794. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  795. struct rxrpc_conn_parameters *,
  796. struct rxrpc_call_params *, gfp_t,
  797. unsigned int);
  798. void rxrpc_start_call_timer(struct rxrpc_call *call);
  799. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  800. struct sk_buff *);
  801. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  802. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  803. void rxrpc_see_call(struct rxrpc_call *, enum rxrpc_call_trace);
  804. struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  805. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  806. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  807. void rxrpc_cleanup_call(struct rxrpc_call *);
  808. void rxrpc_destroy_all_calls(struct rxrpc_net *);
  809. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  810. {
  811. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  812. }
  813. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  814. {
  815. return !rxrpc_is_service_call(call);
  816. }
  817. /*
  818. * call_state.c
  819. */
  820. bool rxrpc_set_call_completion(struct rxrpc_call *call,
  821. enum rxrpc_call_completion compl,
  822. u32 abort_code,
  823. int error);
  824. bool rxrpc_call_completed(struct rxrpc_call *call);
  825. bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
  826. u32 abort_code, int error, enum rxrpc_abort_reason why);
  827. void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
  828. int error);
  829. static inline void rxrpc_set_call_state(struct rxrpc_call *call,
  830. enum rxrpc_call_state state)
  831. {
  832. /* Order write of completion info before write of ->state. */
  833. smp_store_release(&call->_state, state);
  834. wake_up(&call->waitq);
  835. }
  836. static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call)
  837. {
  838. return call->_state; /* Only inside I/O thread */
  839. }
  840. static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call)
  841. {
  842. return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
  843. }
  844. static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call)
  845. {
  846. /* Order read ->state before read of completion info. */
  847. return smp_load_acquire(&call->_state);
  848. }
  849. static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call)
  850. {
  851. return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
  852. }
  853. static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call)
  854. {
  855. return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED;
  856. }
  857. /*
  858. * conn_client.c
  859. */
  860. extern unsigned int rxrpc_reap_client_connections;
  861. extern unsigned long rxrpc_conn_idle_client_expiry;
  862. extern unsigned long rxrpc_conn_idle_client_fast_expiry;
  863. void rxrpc_purge_client_connections(struct rxrpc_local *local);
  864. struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
  865. void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
  866. int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp);
  867. void rxrpc_connect_client_calls(struct rxrpc_local *local);
  868. void rxrpc_expose_client_call(struct rxrpc_call *);
  869. void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
  870. void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
  871. void rxrpc_discard_expired_client_conns(struct rxrpc_local *local);
  872. void rxrpc_clean_up_local_conns(struct rxrpc_local *);
  873. /*
  874. * conn_event.c
  875. */
  876. void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb,
  877. unsigned int channel);
  878. int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
  879. s32 abort_code, int err, enum rxrpc_abort_reason why);
  880. void rxrpc_process_connection(struct work_struct *);
  881. void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
  882. bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
  883. void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb);
  884. static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn)
  885. {
  886. /* Order reading the abort info after the state check. */
  887. return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED;
  888. }
  889. /*
  890. * conn_object.c
  891. */
  892. extern unsigned int rxrpc_connection_expiry;
  893. extern unsigned int rxrpc_closed_conn_expiry;
  894. void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why);
  895. struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t);
  896. struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *,
  897. struct sockaddr_rxrpc *,
  898. struct sk_buff *);
  899. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  900. void rxrpc_disconnect_call(struct rxrpc_call *);
  901. void rxrpc_kill_client_conn(struct rxrpc_connection *);
  902. void rxrpc_queue_conn(struct rxrpc_connection *, enum rxrpc_conn_trace);
  903. void rxrpc_see_connection(struct rxrpc_connection *, enum rxrpc_conn_trace);
  904. struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *,
  905. enum rxrpc_conn_trace);
  906. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *,
  907. enum rxrpc_conn_trace);
  908. void rxrpc_put_connection(struct rxrpc_connection *, enum rxrpc_conn_trace);
  909. void rxrpc_service_connection_reaper(struct work_struct *);
  910. void rxrpc_destroy_all_connections(struct rxrpc_net *);
  911. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  912. {
  913. return conn->out_clientflag;
  914. }
  915. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  916. {
  917. return !rxrpc_conn_is_client(conn);
  918. }
  919. static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
  920. unsigned long expire_at)
  921. {
  922. timer_reduce(&conn->timer, expire_at);
  923. }
  924. /*
  925. * conn_service.c
  926. */
  927. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  928. struct sk_buff *);
  929. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
  930. void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
  931. const struct rxrpc_security *, struct sk_buff *);
  932. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  933. /*
  934. * input.c
  935. */
  936. void rxrpc_congestion_degrade(struct rxrpc_call *);
  937. void rxrpc_input_call_packet(struct rxrpc_call *, struct sk_buff *);
  938. void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *);
  939. /*
  940. * io_thread.c
  941. */
  942. int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
  943. void rxrpc_error_report(struct sock *);
  944. bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
  945. s32 abort_code, int err);
  946. int rxrpc_io_thread(void *data);
  947. static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
  948. {
  949. wake_up_process(READ_ONCE(local->io_thread));
  950. }
  951. static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
  952. {
  953. return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO);
  954. }
  955. /*
  956. * insecure.c
  957. */
  958. extern const struct rxrpc_security rxrpc_no_security;
  959. /*
  960. * key.c
  961. */
  962. extern struct key_type key_type_rxrpc;
  963. int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int);
  964. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
  965. u32);
  966. /*
  967. * local_event.c
  968. */
  969. void rxrpc_gen_version_string(void);
  970. void rxrpc_send_version_request(struct rxrpc_local *local,
  971. struct rxrpc_host_header *hdr,
  972. struct sk_buff *skb);
  973. /*
  974. * local_object.c
  975. */
  976. void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set);
  977. struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
  978. struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace);
  979. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace);
  980. void rxrpc_put_local(struct rxrpc_local *, enum rxrpc_local_trace);
  981. struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *, enum rxrpc_local_trace);
  982. void rxrpc_unuse_local(struct rxrpc_local *, enum rxrpc_local_trace);
  983. void rxrpc_destroy_local(struct rxrpc_local *local);
  984. void rxrpc_destroy_all_locals(struct rxrpc_net *);
  985. static inline bool __rxrpc_use_local(struct rxrpc_local *local,
  986. enum rxrpc_local_trace why)
  987. {
  988. int r, u;
  989. r = refcount_read(&local->ref);
  990. u = atomic_fetch_add_unless(&local->active_users, 1, 0);
  991. trace_rxrpc_local(local->debug_id, why, r, u);
  992. return u != 0;
  993. }
  994. static inline void rxrpc_see_local(struct rxrpc_local *local,
  995. enum rxrpc_local_trace why)
  996. {
  997. int r, u;
  998. r = refcount_read(&local->ref);
  999. u = atomic_read(&local->active_users);
  1000. trace_rxrpc_local(local->debug_id, why, r, u);
  1001. }
  1002. /*
  1003. * misc.c
  1004. */
  1005. extern unsigned int rxrpc_max_backlog __read_mostly;
  1006. extern unsigned long rxrpc_soft_ack_delay;
  1007. extern unsigned long rxrpc_idle_ack_delay;
  1008. extern unsigned int rxrpc_rx_window_size;
  1009. extern unsigned int rxrpc_rx_mtu;
  1010. extern unsigned int rxrpc_rx_jumbo_max;
  1011. #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
  1012. extern unsigned long rxrpc_inject_rx_delay;
  1013. #endif
  1014. /*
  1015. * net_ns.c
  1016. */
  1017. extern unsigned int rxrpc_net_id;
  1018. extern struct pernet_operations rxrpc_net_ops;
  1019. static inline struct rxrpc_net *rxrpc_net(struct net *net)
  1020. {
  1021. return net_generic(net, rxrpc_net_id);
  1022. }
  1023. /*
  1024. * output.c
  1025. */
  1026. void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
  1027. rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why);
  1028. int rxrpc_send_abort_packet(struct rxrpc_call *);
  1029. void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
  1030. void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
  1031. void rxrpc_send_keepalive(struct rxrpc_peer *);
  1032. void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
  1033. /*
  1034. * peer_event.c
  1035. */
  1036. void rxrpc_input_error(struct rxrpc_local *, struct sk_buff *);
  1037. void rxrpc_peer_keepalive_worker(struct work_struct *);
  1038. /*
  1039. * peer_object.c
  1040. */
  1041. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  1042. const struct sockaddr_rxrpc *);
  1043. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
  1044. struct sockaddr_rxrpc *srx, gfp_t gfp);
  1045. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
  1046. enum rxrpc_peer_trace);
  1047. void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer);
  1048. void rxrpc_destroy_all_peers(struct rxrpc_net *);
  1049. struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
  1050. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
  1051. void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
  1052. /*
  1053. * proc.c
  1054. */
  1055. extern const struct seq_operations rxrpc_call_seq_ops;
  1056. extern const struct seq_operations rxrpc_connection_seq_ops;
  1057. extern const struct seq_operations rxrpc_bundle_seq_ops;
  1058. extern const struct seq_operations rxrpc_peer_seq_ops;
  1059. extern const struct seq_operations rxrpc_local_seq_ops;
  1060. /*
  1061. * recvmsg.c
  1062. */
  1063. void rxrpc_notify_socket(struct rxrpc_call *);
  1064. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  1065. /*
  1066. * Abort a call due to a protocol error.
  1067. */
  1068. static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
  1069. struct sk_buff *skb,
  1070. s32 abort_code,
  1071. enum rxrpc_abort_reason why)
  1072. {
  1073. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  1074. rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why);
  1075. return -EPROTO;
  1076. }
  1077. /*
  1078. * rtt.c
  1079. */
  1080. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
  1081. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  1082. ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans);
  1083. void rxrpc_peer_init_rtt(struct rxrpc_peer *);
  1084. /*
  1085. * rxkad.c
  1086. */
  1087. #ifdef CONFIG_RXKAD
  1088. extern const struct rxrpc_security rxkad;
  1089. #endif
  1090. /*
  1091. * security.c
  1092. */
  1093. int __init rxrpc_init_security(void);
  1094. const struct rxrpc_security *rxrpc_security_lookup(u8);
  1095. void rxrpc_exit_security(void);
  1096. int rxrpc_init_client_call_security(struct rxrpc_call *);
  1097. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  1098. const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *,
  1099. struct sk_buff *);
  1100. struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
  1101. struct sk_buff *, u32, u32);
  1102. /*
  1103. * sendmsg.c
  1104. */
  1105. bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
  1106. enum rxrpc_abort_reason why);
  1107. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  1108. /*
  1109. * server_key.c
  1110. */
  1111. extern struct key_type key_type_rxrpc_s;
  1112. int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
  1113. /*
  1114. * skbuff.c
  1115. */
  1116. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  1117. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1118. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1119. void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1120. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1121. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1122. void rxrpc_purge_queue(struct sk_buff_head *);
  1123. /*
  1124. * stats.c
  1125. */
  1126. int rxrpc_stats_show(struct seq_file *seq, void *v);
  1127. int rxrpc_stats_clear(struct file *file, char *buf, size_t size);
  1128. #define rxrpc_inc_stat(rxnet, s) atomic_inc(&(rxnet)->s)
  1129. #define rxrpc_dec_stat(rxnet, s) atomic_dec(&(rxnet)->s)
  1130. /*
  1131. * sysctl.c
  1132. */
  1133. #ifdef CONFIG_SYSCTL
  1134. extern int __init rxrpc_sysctl_init(void);
  1135. extern void rxrpc_sysctl_exit(void);
  1136. #else
  1137. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  1138. static inline void rxrpc_sysctl_exit(void) {}
  1139. #endif
  1140. /*
  1141. * txbuf.c
  1142. */
  1143. extern atomic_t rxrpc_nr_txbuf;
  1144. struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
  1145. size_t data_align, gfp_t gfp);
  1146. struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size);
  1147. void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
  1148. void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
  1149. void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
  1150. /*
  1151. * utils.c
  1152. */
  1153. int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
  1154. static inline bool before(u32 seq1, u32 seq2)
  1155. {
  1156. return (s32)(seq1 - seq2) < 0;
  1157. }
  1158. static inline bool before_eq(u32 seq1, u32 seq2)
  1159. {
  1160. return (s32)(seq1 - seq2) <= 0;
  1161. }
  1162. static inline bool after(u32 seq1, u32 seq2)
  1163. {
  1164. return (s32)(seq1 - seq2) > 0;
  1165. }
  1166. static inline bool after_eq(u32 seq1, u32 seq2)
  1167. {
  1168. return (s32)(seq1 - seq2) >= 0;
  1169. }
  1170. /*
  1171. * debug tracing
  1172. */
  1173. extern unsigned int rxrpc_debug;
  1174. #define dbgprintk(FMT,...) \
  1175. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  1176. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1177. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1178. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  1179. #if defined(__KDEBUG)
  1180. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  1181. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  1182. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  1183. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  1184. #define RXRPC_DEBUG_KENTER 0x01
  1185. #define RXRPC_DEBUG_KLEAVE 0x02
  1186. #define RXRPC_DEBUG_KDEBUG 0x04
  1187. #define _enter(FMT,...) \
  1188. do { \
  1189. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  1190. kenter(FMT,##__VA_ARGS__); \
  1191. } while (0)
  1192. #define _leave(FMT,...) \
  1193. do { \
  1194. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  1195. kleave(FMT,##__VA_ARGS__); \
  1196. } while (0)
  1197. #define _debug(FMT,...) \
  1198. do { \
  1199. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  1200. kdebug(FMT,##__VA_ARGS__); \
  1201. } while (0)
  1202. #else
  1203. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1204. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1205. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  1206. #endif
  1207. /*
  1208. * debug assertion checking
  1209. */
  1210. #if 1 // defined(__KDEBUGALL)
  1211. #define ASSERT(X) \
  1212. do { \
  1213. if (unlikely(!(X))) { \
  1214. pr_err("Assertion failed\n"); \
  1215. BUG(); \
  1216. } \
  1217. } while (0)
  1218. #define ASSERTCMP(X, OP, Y) \
  1219. do { \
  1220. __typeof__(X) _x = (X); \
  1221. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1222. if (unlikely(!(_x OP _y))) { \
  1223. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1224. (unsigned long)_x, (unsigned long)_x, #OP, \
  1225. (unsigned long)_y, (unsigned long)_y); \
  1226. BUG(); \
  1227. } \
  1228. } while (0)
  1229. #define ASSERTIF(C, X) \
  1230. do { \
  1231. if (unlikely((C) && !(X))) { \
  1232. pr_err("Assertion failed\n"); \
  1233. BUG(); \
  1234. } \
  1235. } while (0)
  1236. #define ASSERTIFCMP(C, X, OP, Y) \
  1237. do { \
  1238. __typeof__(X) _x = (X); \
  1239. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1240. if (unlikely((C) && !(_x OP _y))) { \
  1241. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1242. (unsigned long)_x, (unsigned long)_x, #OP, \
  1243. (unsigned long)_y, (unsigned long)_y); \
  1244. BUG(); \
  1245. } \
  1246. } while (0)
  1247. #else
  1248. #define ASSERT(X) \
  1249. do { \
  1250. } while (0)
  1251. #define ASSERTCMP(X, OP, Y) \
  1252. do { \
  1253. } while (0)
  1254. #define ASSERTIF(C, X) \
  1255. do { \
  1256. } while (0)
  1257. #define ASSERTIFCMP(C, X, OP, Y) \
  1258. do { \
  1259. } while (0)
  1260. #endif /* __KDEBUGALL */