ar-internal.h 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /* AF_RXRPC internal definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/atomic.h>
  12. #include <linux/seqlock.h>
  13. #include <net/net_namespace.h>
  14. #include <net/netns/generic.h>
  15. #include <net/sock.h>
  16. #include <net/af_rxrpc.h>
  17. #include "protocol.h"
  18. #if 0
  19. #define CHECK_SLAB_OKAY(X) \
  20. BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
  21. (POISON_FREE << 8 | POISON_FREE))
  22. #else
  23. #define CHECK_SLAB_OKAY(X) do {} while (0)
  24. #endif
  25. #define FCRYPT_BSIZE 8
  26. struct rxrpc_crypt {
  27. union {
  28. u8 x[FCRYPT_BSIZE];
  29. __be32 n[2];
  30. };
  31. } __attribute__((aligned(8)));
  32. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  33. #define rxrpc_queue_delayed_work(WS,D) \
  34. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  35. struct rxrpc_connection;
  36. /*
  37. * Mark applied to socket buffers in skb->mark. skb->priority is used
  38. * to pass supplementary information.
  39. */
  40. enum rxrpc_skb_mark {
  41. RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
  42. RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
  43. };
  44. /*
  45. * sk_state for RxRPC sockets
  46. */
  47. enum {
  48. RXRPC_UNBOUND = 0,
  49. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  50. RXRPC_CLIENT_BOUND, /* client local address bound */
  51. RXRPC_SERVER_BOUND, /* server local address bound */
  52. RXRPC_SERVER_BOUND2, /* second server local address bound */
  53. RXRPC_SERVER_LISTENING, /* server listening for connections */
  54. RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
  55. RXRPC_CLOSE, /* socket is being closed */
  56. };
  57. /*
  58. * Per-network namespace data.
  59. */
  60. struct rxrpc_net {
  61. struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
  62. u32 epoch; /* Local epoch for detecting local-end reset */
  63. struct list_head calls; /* List of calls active in this namespace */
  64. rwlock_t call_lock; /* Lock for ->calls */
  65. atomic_t nr_calls; /* Count of allocated calls */
  66. atomic_t nr_conns;
  67. struct list_head conn_proc_list; /* List of conns in this namespace for proc */
  68. struct list_head service_conns; /* Service conns in this namespace */
  69. rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
  70. struct work_struct service_conn_reaper;
  71. struct timer_list service_conn_reap_timer;
  72. unsigned int nr_client_conns;
  73. unsigned int nr_active_client_conns;
  74. bool kill_all_client_conns;
  75. bool live;
  76. spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
  77. spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
  78. struct list_head waiting_client_conns;
  79. struct list_head active_client_conns;
  80. struct list_head idle_client_conns;
  81. struct work_struct client_conn_reaper;
  82. struct timer_list client_conn_reap_timer;
  83. struct list_head local_endpoints;
  84. struct mutex local_mutex; /* Lock for ->local_endpoints */
  85. DECLARE_HASHTABLE (peer_hash, 10);
  86. spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
  87. #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
  88. u8 peer_keepalive_cursor;
  89. time64_t peer_keepalive_base;
  90. struct list_head peer_keepalive[32];
  91. struct list_head peer_keepalive_new;
  92. struct timer_list peer_keepalive_timer;
  93. struct work_struct peer_keepalive_work;
  94. };
  95. /*
  96. * Service backlog preallocation.
  97. *
  98. * This contains circular buffers of preallocated peers, connections and calls
  99. * for incoming service calls and their head and tail pointers. This allows
  100. * calls to be set up in the data_ready handler, thereby avoiding the need to
  101. * shuffle packets around so much.
  102. */
  103. struct rxrpc_backlog {
  104. unsigned short peer_backlog_head;
  105. unsigned short peer_backlog_tail;
  106. unsigned short conn_backlog_head;
  107. unsigned short conn_backlog_tail;
  108. unsigned short call_backlog_head;
  109. unsigned short call_backlog_tail;
  110. #define RXRPC_BACKLOG_MAX 32
  111. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  112. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  113. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  114. };
  115. /*
  116. * RxRPC socket definition
  117. */
  118. struct rxrpc_sock {
  119. /* WARNING: sk has to be the first member */
  120. struct sock sk;
  121. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  122. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  123. struct rxrpc_local *local; /* local endpoint */
  124. struct rxrpc_backlog *backlog; /* Preallocation for services */
  125. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  126. struct list_head sock_calls; /* List of calls owned by this socket */
  127. struct list_head to_be_accepted; /* calls awaiting acceptance */
  128. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  129. rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
  130. struct key *key; /* security for this socket */
  131. struct key *securities; /* list of server security descriptors */
  132. struct rb_root calls; /* User ID -> call mapping */
  133. unsigned long flags;
  134. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  135. rwlock_t call_lock; /* lock for calls */
  136. u32 min_sec_level; /* minimum security level */
  137. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  138. bool exclusive; /* Exclusive connection for a client socket */
  139. u16 second_service; /* Additional service bound to the endpoint */
  140. struct {
  141. /* Service upgrade information */
  142. u16 from; /* Service ID to upgrade (if not 0) */
  143. u16 to; /* service ID to upgrade to */
  144. } service_upgrade;
  145. sa_family_t family; /* Protocol family created with */
  146. struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
  147. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  148. };
  149. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  150. /*
  151. * CPU-byteorder normalised Rx packet header.
  152. */
  153. struct rxrpc_host_header {
  154. u32 epoch; /* client boot timestamp */
  155. u32 cid; /* connection and channel ID */
  156. u32 callNumber; /* call ID (0 for connection-level packets) */
  157. u32 seq; /* sequence number of pkt in call stream */
  158. u32 serial; /* serial number of pkt sent to network */
  159. u8 type; /* packet type */
  160. u8 flags; /* packet flags */
  161. u8 userStatus; /* app-layer defined status */
  162. u8 securityIndex; /* security protocol ID */
  163. union {
  164. u16 _rsvd; /* reserved */
  165. u16 cksum; /* kerberos security checksum */
  166. };
  167. u16 serviceId; /* service ID */
  168. } __packed;
  169. /*
  170. * RxRPC socket buffer private variables
  171. * - max 48 bytes (struct sk_buff::cb)
  172. */
  173. struct rxrpc_skb_priv {
  174. union {
  175. u8 nr_jumbo; /* Number of jumbo subpackets */
  176. };
  177. union {
  178. int remain; /* amount of space remaining for next write */
  179. };
  180. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  181. };
  182. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  183. /*
  184. * RxRPC security module interface
  185. */
  186. struct rxrpc_security {
  187. const char *name; /* name of this service */
  188. u8 security_index; /* security type provided */
  189. /* Initialise a security service */
  190. int (*init)(void);
  191. /* Clean up a security service */
  192. void (*exit)(void);
  193. /* initialise a connection's security */
  194. int (*init_connection_security)(struct rxrpc_connection *);
  195. /* prime a connection's packet security */
  196. int (*prime_packet_security)(struct rxrpc_connection *);
  197. /* impose security on a packet */
  198. int (*secure_packet)(struct rxrpc_call *,
  199. struct sk_buff *,
  200. size_t,
  201. void *);
  202. /* verify the security on a received packet */
  203. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
  204. unsigned int, unsigned int, rxrpc_seq_t, u16);
  205. /* Locate the data in a received packet that has been verified. */
  206. void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
  207. unsigned int *, unsigned int *);
  208. /* issue a challenge */
  209. int (*issue_challenge)(struct rxrpc_connection *);
  210. /* respond to a challenge */
  211. int (*respond_to_challenge)(struct rxrpc_connection *,
  212. struct sk_buff *,
  213. u32 *);
  214. /* verify a response */
  215. int (*verify_response)(struct rxrpc_connection *,
  216. struct sk_buff *,
  217. u32 *);
  218. /* clear connection security */
  219. void (*clear)(struct rxrpc_connection *);
  220. };
  221. /*
  222. * RxRPC local transport endpoint description
  223. * - owned by a single AF_RXRPC socket
  224. * - pointed to by transport socket struct sk_user_data
  225. */
  226. struct rxrpc_local {
  227. struct rcu_head rcu;
  228. atomic_t active_users; /* Number of users of the local endpoint */
  229. atomic_t usage; /* Number of references to the structure */
  230. struct rxrpc_net *rxnet; /* The network ns in which this resides */
  231. struct list_head link;
  232. struct socket *socket; /* my UDP socket */
  233. struct work_struct processor;
  234. struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
  235. struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
  236. struct sk_buff_head reject_queue; /* packets awaiting rejection */
  237. struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
  238. struct rb_root client_conns; /* Client connections by socket params */
  239. spinlock_t client_conns_lock; /* Lock for client_conns */
  240. spinlock_t lock; /* access lock */
  241. rwlock_t services_lock; /* lock for services list */
  242. int debug_id; /* debug ID for printks */
  243. bool dead;
  244. bool service_closed; /* Service socket closed */
  245. struct sockaddr_rxrpc srx; /* local address */
  246. };
  247. /*
  248. * RxRPC remote transport endpoint definition
  249. * - matched by local endpoint, remote port, address and protocol type
  250. */
  251. struct rxrpc_peer {
  252. struct rcu_head rcu; /* This must be first */
  253. atomic_t usage;
  254. unsigned long hash_key;
  255. struct hlist_node hash_link;
  256. struct rxrpc_local *local;
  257. struct hlist_head error_targets; /* targets for net error distribution */
  258. struct rb_root service_conns; /* Service connections */
  259. struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
  260. time64_t last_tx_at; /* Last time packet sent here */
  261. seqlock_t service_conn_lock;
  262. spinlock_t lock; /* access lock */
  263. unsigned int if_mtu; /* interface MTU for this peer */
  264. unsigned int mtu; /* network MTU for this peer */
  265. unsigned int maxdata; /* data size (MTU - hdrsize) */
  266. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  267. int debug_id; /* debug ID for printks */
  268. struct sockaddr_rxrpc srx; /* remote address */
  269. /* calculated RTT cache */
  270. #define RXRPC_RTT_CACHE_SIZE 32
  271. spinlock_t rtt_input_lock; /* RTT lock for input routine */
  272. ktime_t rtt_last_req; /* Time of last RTT request */
  273. u64 rtt; /* Current RTT estimate (in nS) */
  274. u64 rtt_sum; /* Sum of cache contents */
  275. u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
  276. u8 rtt_cursor; /* next entry at which to insert */
  277. u8 rtt_usage; /* amount of cache actually used */
  278. u8 cong_cwnd; /* Congestion window size */
  279. };
  280. /*
  281. * Keys for matching a connection.
  282. */
  283. struct rxrpc_conn_proto {
  284. union {
  285. struct {
  286. u32 epoch; /* epoch of this connection */
  287. u32 cid; /* connection ID */
  288. };
  289. u64 index_key;
  290. };
  291. };
  292. struct rxrpc_conn_parameters {
  293. struct rxrpc_local *local; /* Representation of local endpoint */
  294. struct rxrpc_peer *peer; /* Remote endpoint */
  295. struct key *key; /* Security details */
  296. bool exclusive; /* T if conn is exclusive */
  297. bool upgrade; /* T if service ID can be upgraded */
  298. u16 service_id; /* Service ID for this connection */
  299. u32 security_level; /* Security level selected */
  300. };
  301. /*
  302. * Bits in the connection flags.
  303. */
  304. enum rxrpc_conn_flag {
  305. RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
  306. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  307. RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
  308. RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
  309. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  310. RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
  311. RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
  312. RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
  313. RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
  314. RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
  315. RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
  316. };
  317. #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
  318. (1UL << RXRPC_CONN_FINAL_ACK_1) | \
  319. (1UL << RXRPC_CONN_FINAL_ACK_2) | \
  320. (1UL << RXRPC_CONN_FINAL_ACK_3))
  321. /*
  322. * Events that can be raised upon a connection.
  323. */
  324. enum rxrpc_conn_event {
  325. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  326. };
  327. /*
  328. * The connection cache state.
  329. */
  330. enum rxrpc_conn_cache_state {
  331. RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
  332. RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
  333. RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
  334. RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */
  335. RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
  336. RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
  337. RXRPC_CONN__NR_CACHE_STATES
  338. };
  339. /*
  340. * The connection protocol state.
  341. */
  342. enum rxrpc_conn_proto_state {
  343. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  344. RXRPC_CONN_CLIENT, /* Client connection */
  345. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  346. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  347. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  348. RXRPC_CONN_SERVICE, /* Service secured connection */
  349. RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
  350. RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
  351. RXRPC_CONN__NR_STATES
  352. };
  353. /*
  354. * RxRPC connection definition
  355. * - matched by { local, peer, epoch, conn_id, direction }
  356. * - each connection can only handle four simultaneous calls
  357. */
  358. struct rxrpc_connection {
  359. struct rxrpc_conn_proto proto;
  360. struct rxrpc_conn_parameters params;
  361. atomic_t usage;
  362. struct rcu_head rcu;
  363. struct list_head cache_link;
  364. spinlock_t channel_lock;
  365. unsigned char active_chans; /* Mask of active channels */
  366. #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
  367. struct list_head waiting_calls; /* Calls waiting for channels */
  368. struct rxrpc_channel {
  369. unsigned long final_ack_at; /* Time at which to issue final ACK */
  370. struct rxrpc_call __rcu *call; /* Active call */
  371. unsigned int call_debug_id; /* call->debug_id */
  372. u32 call_id; /* ID of current call */
  373. u32 call_counter; /* Call ID counter */
  374. u32 last_call; /* ID of last call */
  375. u8 last_type; /* Type of last packet */
  376. union {
  377. u32 last_seq;
  378. u32 last_abort;
  379. };
  380. } channels[RXRPC_MAXCALLS];
  381. struct timer_list timer; /* Conn event timer */
  382. struct work_struct processor; /* connection event processor */
  383. union {
  384. struct rb_node client_node; /* Node in local->client_conns */
  385. struct rb_node service_node; /* Node in peer->service_conns */
  386. };
  387. struct list_head proc_link; /* link in procfs list */
  388. struct list_head link; /* link in master connection list */
  389. struct sk_buff_head rx_queue; /* received conn-level packets */
  390. const struct rxrpc_security *security; /* applied security module */
  391. struct key *server_key; /* security for this service */
  392. struct crypto_skcipher *cipher; /* encryption handle */
  393. struct rxrpc_crypt csum_iv; /* packet checksum base */
  394. unsigned long flags;
  395. unsigned long events;
  396. unsigned long idle_timestamp; /* Time at which last became idle */
  397. spinlock_t state_lock; /* state-change lock */
  398. enum rxrpc_conn_cache_state cache_state;
  399. enum rxrpc_conn_proto_state state; /* current state of connection */
  400. u32 abort_code; /* Abort code of connection abort */
  401. int debug_id; /* debug ID for printks */
  402. atomic_t serial; /* packet serial number counter */
  403. unsigned int hi_serial; /* highest serial number received */
  404. u32 security_nonce; /* response re-use preventer */
  405. u32 service_id; /* Service ID, possibly upgraded */
  406. u8 size_align; /* data size alignment (for security) */
  407. u8 security_size; /* security header size */
  408. u8 security_ix; /* security type */
  409. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  410. short error; /* Local error code */
  411. };
  412. static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
  413. {
  414. return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
  415. }
  416. static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
  417. {
  418. return !rxrpc_to_server(sp);
  419. }
  420. /*
  421. * Flags in call->flags.
  422. */
  423. enum rxrpc_call_flag {
  424. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  425. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  426. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  427. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  428. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  429. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  430. RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
  431. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  432. RXRPC_CALL_PINGING, /* Ping in process */
  433. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  434. RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
  435. RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
  436. RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
  437. RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
  438. };
  439. /*
  440. * Events that can be raised on a call.
  441. */
  442. enum rxrpc_call_event {
  443. RXRPC_CALL_EV_ACK, /* need to generate ACK */
  444. RXRPC_CALL_EV_ABORT, /* need to generate abort */
  445. RXRPC_CALL_EV_RESEND, /* Tx resend required */
  446. RXRPC_CALL_EV_PING, /* Ping send required */
  447. RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
  448. RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
  449. };
  450. /*
  451. * The states that a call can be in.
  452. */
  453. enum rxrpc_call_state {
  454. RXRPC_CALL_UNINITIALISED,
  455. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  456. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  457. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  458. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  459. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  460. RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
  461. RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
  462. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  463. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  464. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  465. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  466. RXRPC_CALL_COMPLETE, /* - call complete */
  467. NR__RXRPC_CALL_STATES
  468. };
  469. /*
  470. * Call Tx congestion management modes.
  471. */
  472. enum rxrpc_congest_mode {
  473. RXRPC_CALL_SLOW_START,
  474. RXRPC_CALL_CONGEST_AVOIDANCE,
  475. RXRPC_CALL_PACKET_LOSS,
  476. RXRPC_CALL_FAST_RETRANSMIT,
  477. NR__RXRPC_CONGEST_MODES
  478. };
  479. /*
  480. * RxRPC call definition
  481. * - matched by { connection, call_id }
  482. */
  483. struct rxrpc_call {
  484. struct rcu_head rcu;
  485. struct rxrpc_connection *conn; /* connection carrying call */
  486. struct rxrpc_peer *peer; /* Peer record for remote address */
  487. struct rxrpc_sock __rcu *socket; /* socket responsible */
  488. struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
  489. struct mutex user_mutex; /* User access mutex */
  490. unsigned long ack_at; /* When deferred ACK needs to happen */
  491. unsigned long ack_lost_at; /* When ACK is figured as lost */
  492. unsigned long resend_at; /* When next resend needs to happen */
  493. unsigned long ping_at; /* When next to send a ping */
  494. unsigned long keepalive_at; /* When next to send a keepalive ping */
  495. unsigned long expect_rx_by; /* When we expect to get a packet by */
  496. unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
  497. unsigned long expect_term_by; /* When we expect call termination by */
  498. u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
  499. u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
  500. struct timer_list timer; /* Combined event timer */
  501. struct work_struct processor; /* Event processor */
  502. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  503. struct list_head link; /* link in master call list */
  504. struct list_head chan_wait_link; /* Link in conn->waiting_calls */
  505. struct hlist_node error_link; /* link in error distribution list */
  506. struct list_head accept_link; /* Link in rx->acceptq */
  507. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  508. struct list_head sock_link; /* Link in rx->sock_calls */
  509. struct rb_node sock_node; /* Node in rx->calls */
  510. struct sk_buff *tx_pending; /* Tx socket buffer being filled */
  511. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  512. s64 tx_total_len; /* Total length left to be transmitted (or -1) */
  513. __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
  514. unsigned long user_call_ID; /* user-defined call ID */
  515. unsigned long flags;
  516. unsigned long events;
  517. spinlock_t lock;
  518. spinlock_t notify_lock; /* Kernel notification lock */
  519. rwlock_t state_lock; /* lock for state transition */
  520. u32 abort_code; /* Local/remote abort code */
  521. int error; /* Local error incurred */
  522. enum rxrpc_call_state state; /* current state of call */
  523. enum rxrpc_call_completion completion; /* Call completion condition */
  524. atomic_t usage;
  525. u16 service_id; /* service ID */
  526. u8 security_ix; /* Security type */
  527. u32 call_id; /* call ID on connection */
  528. u32 cid; /* connection ID plus channel index */
  529. int debug_id; /* debug ID for printks */
  530. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  531. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  532. /* Rx/Tx circular buffer, depending on phase.
  533. *
  534. * In the Rx phase, packets are annotated with 0 or the number of the
  535. * segment of a jumbo packet each buffer refers to. There can be up to
  536. * 47 segments in a maximum-size UDP packet.
  537. *
  538. * In the Tx phase, packets are annotated with which buffers have been
  539. * acked.
  540. */
  541. #define RXRPC_RXTX_BUFF_SIZE 64
  542. #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
  543. #define RXRPC_INIT_RX_WINDOW_SIZE 63
  544. struct sk_buff **rxtx_buffer;
  545. u8 *rxtx_annotations;
  546. #define RXRPC_TX_ANNO_ACK 0
  547. #define RXRPC_TX_ANNO_UNACK 1
  548. #define RXRPC_TX_ANNO_NAK 2
  549. #define RXRPC_TX_ANNO_RETRANS 3
  550. #define RXRPC_TX_ANNO_MASK 0x03
  551. #define RXRPC_TX_ANNO_LAST 0x04
  552. #define RXRPC_TX_ANNO_RESENT 0x08
  553. #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
  554. #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
  555. #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
  556. rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
  557. * not hard-ACK'd packet follows this.
  558. */
  559. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  560. u16 tx_backoff; /* Delay to insert due to Tx failure */
  561. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  562. * is fixed, we keep these numbers in terms of segments (ie. DATA
  563. * packets) rather than bytes.
  564. */
  565. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  566. u8 cong_cwnd; /* Congestion window size */
  567. u8 cong_extra; /* Extra to send for congestion management */
  568. u8 cong_ssthresh; /* Slow-start threshold */
  569. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  570. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  571. u8 cong_cumul_acks; /* Cumulative ACK count */
  572. ktime_t cong_tstamp; /* Last time cwnd was changed */
  573. rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
  574. * consumed packet follows this.
  575. */
  576. rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
  577. rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
  578. rxrpc_serial_t rx_serial; /* Highest serial received for this call */
  579. u8 rx_winsize; /* Size of Rx window */
  580. u8 tx_winsize; /* Maximum size of Tx window */
  581. bool tx_phase; /* T if transmission phase, F if receive phase */
  582. u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
  583. spinlock_t input_lock; /* Lock for packet input to this call */
  584. /* receive-phase ACK management */
  585. u8 ackr_reason; /* reason to ACK */
  586. u16 ackr_skew; /* skew on packet being ACK'd */
  587. rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
  588. rxrpc_serial_t ackr_first_seq; /* first sequence number received */
  589. rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
  590. rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
  591. rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
  592. /* ping management */
  593. rxrpc_serial_t ping_serial; /* Last ping sent */
  594. ktime_t ping_time; /* Time last ping sent */
  595. /* transmission-phase ACK management */
  596. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  597. rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
  598. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  599. rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
  600. rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
  601. };
  602. /*
  603. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  604. */
  605. struct rxrpc_ack_summary {
  606. u8 ack_reason;
  607. u8 nr_acks; /* Number of ACKs in packet */
  608. u8 nr_nacks; /* Number of NACKs in packet */
  609. u8 nr_new_acks; /* Number of new ACKs in packet */
  610. u8 nr_new_nacks; /* Number of new NACKs in packet */
  611. u8 nr_rot_new_acks; /* Number of rotated new ACKs */
  612. bool new_low_nack; /* T if new low NACK found */
  613. bool retrans_timeo; /* T if reTx due to timeout happened */
  614. u8 flight_size; /* Number of unreceived transmissions */
  615. /* Place to stash values for tracing */
  616. enum rxrpc_congest_mode mode:8;
  617. u8 cwnd;
  618. u8 ssthresh;
  619. u8 dup_acks;
  620. u8 cumulative_acks;
  621. };
  622. /*
  623. * sendmsg() cmsg-specified parameters.
  624. */
  625. enum rxrpc_command {
  626. RXRPC_CMD_SEND_DATA, /* send data message */
  627. RXRPC_CMD_SEND_ABORT, /* request abort generation */
  628. RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
  629. RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
  630. };
  631. struct rxrpc_call_params {
  632. s64 tx_total_len; /* Total Tx data length (if send data) */
  633. unsigned long user_call_ID; /* User's call ID */
  634. struct {
  635. u32 hard; /* Maximum lifetime (sec) */
  636. u32 idle; /* Max time since last data packet (msec) */
  637. u32 normal; /* Max time since last call packet (msec) */
  638. } timeouts;
  639. u8 nr_timeouts; /* Number of timeouts specified */
  640. };
  641. struct rxrpc_send_params {
  642. struct rxrpc_call_params call;
  643. u32 abort_code; /* Abort code to Tx (if abort) */
  644. enum rxrpc_command command : 8; /* The command to implement */
  645. bool exclusive; /* Shared or exclusive call */
  646. bool upgrade; /* If the connection is upgradeable */
  647. };
  648. #include <trace/events/rxrpc.h>
  649. /*
  650. * af_rxrpc.c
  651. */
  652. extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  653. extern struct workqueue_struct *rxrpc_workqueue;
  654. /*
  655. * call_accept.c
  656. */
  657. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  658. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  659. struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
  660. struct rxrpc_sock *,
  661. struct sk_buff *);
  662. void rxrpc_accept_incoming_calls(struct rxrpc_local *);
  663. struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
  664. rxrpc_notify_rx_t);
  665. int rxrpc_reject_call(struct rxrpc_sock *);
  666. /*
  667. * call_event.c
  668. */
  669. void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
  670. enum rxrpc_propose_ack_trace);
  671. void rxrpc_process_call(struct work_struct *);
  672. static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
  673. unsigned long expire_at,
  674. unsigned long now,
  675. enum rxrpc_timer_trace why)
  676. {
  677. trace_rxrpc_timer(call, why, now);
  678. timer_reduce(&call->timer, expire_at);
  679. }
  680. /*
  681. * call_object.c
  682. */
  683. extern const char *const rxrpc_call_states[];
  684. extern const char *const rxrpc_call_completions[];
  685. extern unsigned int rxrpc_max_call_lifetime;
  686. extern struct kmem_cache *rxrpc_call_jar;
  687. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  688. struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
  689. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  690. struct rxrpc_conn_parameters *,
  691. struct sockaddr_rxrpc *,
  692. struct rxrpc_call_params *, gfp_t,
  693. unsigned int);
  694. int rxrpc_retry_client_call(struct rxrpc_sock *,
  695. struct rxrpc_call *,
  696. struct rxrpc_conn_parameters *,
  697. struct sockaddr_rxrpc *,
  698. gfp_t);
  699. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  700. struct sk_buff *);
  701. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  702. int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
  703. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  704. bool __rxrpc_queue_call(struct rxrpc_call *);
  705. bool rxrpc_queue_call(struct rxrpc_call *);
  706. void rxrpc_see_call(struct rxrpc_call *);
  707. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  708. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  709. void rxrpc_cleanup_call(struct rxrpc_call *);
  710. void rxrpc_destroy_all_calls(struct rxrpc_net *);
  711. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  712. {
  713. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  714. }
  715. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  716. {
  717. return !rxrpc_is_service_call(call);
  718. }
  719. /*
  720. * Transition a call to the complete state.
  721. */
  722. static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
  723. enum rxrpc_call_completion compl,
  724. u32 abort_code,
  725. int error)
  726. {
  727. if (call->state < RXRPC_CALL_COMPLETE) {
  728. call->abort_code = abort_code;
  729. call->error = error;
  730. call->completion = compl,
  731. call->state = RXRPC_CALL_COMPLETE;
  732. trace_rxrpc_call_complete(call);
  733. wake_up(&call->waitq);
  734. return true;
  735. }
  736. return false;
  737. }
  738. static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
  739. enum rxrpc_call_completion compl,
  740. u32 abort_code,
  741. int error)
  742. {
  743. bool ret;
  744. write_lock_bh(&call->state_lock);
  745. ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
  746. write_unlock_bh(&call->state_lock);
  747. return ret;
  748. }
  749. /*
  750. * Record that a call successfully completed.
  751. */
  752. static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
  753. {
  754. return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
  755. }
  756. static inline bool rxrpc_call_completed(struct rxrpc_call *call)
  757. {
  758. bool ret;
  759. write_lock_bh(&call->state_lock);
  760. ret = __rxrpc_call_completed(call);
  761. write_unlock_bh(&call->state_lock);
  762. return ret;
  763. }
  764. /*
  765. * Record that a call is locally aborted.
  766. */
  767. static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  768. rxrpc_seq_t seq,
  769. u32 abort_code, int error)
  770. {
  771. trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
  772. abort_code, error);
  773. return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
  774. abort_code, error);
  775. }
  776. static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  777. rxrpc_seq_t seq, u32 abort_code, int error)
  778. {
  779. bool ret;
  780. write_lock_bh(&call->state_lock);
  781. ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
  782. write_unlock_bh(&call->state_lock);
  783. return ret;
  784. }
  785. /*
  786. * Abort a call due to a protocol error.
  787. */
  788. static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
  789. struct sk_buff *skb,
  790. const char *eproto_why,
  791. const char *why,
  792. u32 abort_code)
  793. {
  794. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  795. trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
  796. return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
  797. }
  798. #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
  799. __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
  800. (abort_why), (abort_code))
  801. /*
  802. * conn_client.c
  803. */
  804. extern unsigned int rxrpc_max_client_connections;
  805. extern unsigned int rxrpc_reap_client_connections;
  806. extern unsigned long rxrpc_conn_idle_client_expiry;
  807. extern unsigned long rxrpc_conn_idle_client_fast_expiry;
  808. extern struct idr rxrpc_client_conn_ids;
  809. void rxrpc_destroy_client_conn_ids(void);
  810. int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
  811. struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
  812. gfp_t);
  813. void rxrpc_expose_client_call(struct rxrpc_call *);
  814. void rxrpc_disconnect_client_call(struct rxrpc_call *);
  815. void rxrpc_put_client_conn(struct rxrpc_connection *);
  816. void rxrpc_discard_expired_client_conns(struct work_struct *);
  817. void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
  818. void rxrpc_clean_up_local_conns(struct rxrpc_local *);
  819. /*
  820. * conn_event.c
  821. */
  822. void rxrpc_process_connection(struct work_struct *);
  823. /*
  824. * conn_object.c
  825. */
  826. extern unsigned int rxrpc_connection_expiry;
  827. extern unsigned int rxrpc_closed_conn_expiry;
  828. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
  829. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
  830. struct sk_buff *,
  831. struct rxrpc_peer **);
  832. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  833. void rxrpc_disconnect_call(struct rxrpc_call *);
  834. void rxrpc_kill_connection(struct rxrpc_connection *);
  835. bool rxrpc_queue_conn(struct rxrpc_connection *);
  836. void rxrpc_see_connection(struct rxrpc_connection *);
  837. void rxrpc_get_connection(struct rxrpc_connection *);
  838. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
  839. void rxrpc_put_service_conn(struct rxrpc_connection *);
  840. void rxrpc_service_connection_reaper(struct work_struct *);
  841. void rxrpc_destroy_all_connections(struct rxrpc_net *);
  842. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  843. {
  844. return conn->out_clientflag;
  845. }
  846. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  847. {
  848. return !rxrpc_conn_is_client(conn);
  849. }
  850. static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
  851. {
  852. if (!conn)
  853. return;
  854. if (rxrpc_conn_is_client(conn))
  855. rxrpc_put_client_conn(conn);
  856. else
  857. rxrpc_put_service_conn(conn);
  858. }
  859. static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
  860. unsigned long expire_at)
  861. {
  862. timer_reduce(&conn->timer, expire_at);
  863. }
  864. /*
  865. * conn_service.c
  866. */
  867. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  868. struct sk_buff *);
  869. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
  870. void rxrpc_new_incoming_connection(struct rxrpc_sock *,
  871. struct rxrpc_connection *, struct sk_buff *);
  872. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  873. /*
  874. * input.c
  875. */
  876. int rxrpc_input_packet(struct sock *, struct sk_buff *);
  877. /*
  878. * insecure.c
  879. */
  880. extern const struct rxrpc_security rxrpc_no_security;
  881. /*
  882. * key.c
  883. */
  884. extern struct key_type key_type_rxrpc;
  885. extern struct key_type key_type_rxrpc_s;
  886. int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
  887. int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
  888. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
  889. u32);
  890. /*
  891. * local_event.c
  892. */
  893. extern void rxrpc_process_local_events(struct rxrpc_local *);
  894. /*
  895. * local_object.c
  896. */
  897. struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
  898. struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
  899. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
  900. void rxrpc_put_local(struct rxrpc_local *);
  901. struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
  902. void rxrpc_unuse_local(struct rxrpc_local *);
  903. void rxrpc_queue_local(struct rxrpc_local *);
  904. void rxrpc_destroy_all_locals(struct rxrpc_net *);
  905. static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
  906. {
  907. return atomic_dec_return(&local->active_users) == 0;
  908. }
  909. static inline bool __rxrpc_use_local(struct rxrpc_local *local)
  910. {
  911. return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
  912. }
  913. /*
  914. * misc.c
  915. */
  916. extern unsigned int rxrpc_max_backlog __read_mostly;
  917. extern unsigned long rxrpc_requested_ack_delay;
  918. extern unsigned long rxrpc_soft_ack_delay;
  919. extern unsigned long rxrpc_idle_ack_delay;
  920. extern unsigned int rxrpc_rx_window_size;
  921. extern unsigned int rxrpc_rx_mtu;
  922. extern unsigned int rxrpc_rx_jumbo_max;
  923. extern unsigned long rxrpc_resend_timeout;
  924. extern const s8 rxrpc_ack_priority[];
  925. /*
  926. * net_ns.c
  927. */
  928. extern unsigned int rxrpc_net_id;
  929. extern struct pernet_operations rxrpc_net_ops;
  930. static inline struct rxrpc_net *rxrpc_net(struct net *net)
  931. {
  932. return net_generic(net, rxrpc_net_id);
  933. }
  934. /*
  935. * output.c
  936. */
  937. int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
  938. int rxrpc_send_abort_packet(struct rxrpc_call *);
  939. int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
  940. void rxrpc_reject_packets(struct rxrpc_local *);
  941. void rxrpc_send_keepalive(struct rxrpc_peer *);
  942. /*
  943. * peer_event.c
  944. */
  945. void rxrpc_error_report(struct sock *);
  946. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
  947. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  948. void rxrpc_peer_keepalive_worker(struct work_struct *);
  949. /*
  950. * peer_object.c
  951. */
  952. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  953. const struct sockaddr_rxrpc *);
  954. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
  955. struct sockaddr_rxrpc *, gfp_t);
  956. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
  957. void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
  958. struct rxrpc_peer *);
  959. void rxrpc_destroy_all_peers(struct rxrpc_net *);
  960. struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
  961. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
  962. void rxrpc_put_peer(struct rxrpc_peer *);
  963. void rxrpc_put_peer_locked(struct rxrpc_peer *);
  964. /*
  965. * proc.c
  966. */
  967. extern const struct seq_operations rxrpc_call_seq_ops;
  968. extern const struct seq_operations rxrpc_connection_seq_ops;
  969. /*
  970. * recvmsg.c
  971. */
  972. void rxrpc_notify_socket(struct rxrpc_call *);
  973. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  974. /*
  975. * rxkad.c
  976. */
  977. #ifdef CONFIG_RXKAD
  978. extern const struct rxrpc_security rxkad;
  979. #endif
  980. /*
  981. * security.c
  982. */
  983. int __init rxrpc_init_security(void);
  984. void rxrpc_exit_security(void);
  985. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  986. int rxrpc_init_server_conn_security(struct rxrpc_connection *);
  987. /*
  988. * sendmsg.c
  989. */
  990. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  991. /*
  992. * skbuff.c
  993. */
  994. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  995. void rxrpc_packet_destructor(struct sk_buff *);
  996. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  997. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  998. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  999. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1000. void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1001. void rxrpc_purge_queue(struct sk_buff_head *);
  1002. /*
  1003. * sysctl.c
  1004. */
  1005. #ifdef CONFIG_SYSCTL
  1006. extern int __init rxrpc_sysctl_init(void);
  1007. extern void rxrpc_sysctl_exit(void);
  1008. #else
  1009. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  1010. static inline void rxrpc_sysctl_exit(void) {}
  1011. #endif
  1012. /*
  1013. * utils.c
  1014. */
  1015. int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *,
  1016. struct sk_buff *);
  1017. static inline bool before(u32 seq1, u32 seq2)
  1018. {
  1019. return (s32)(seq1 - seq2) < 0;
  1020. }
  1021. static inline bool before_eq(u32 seq1, u32 seq2)
  1022. {
  1023. return (s32)(seq1 - seq2) <= 0;
  1024. }
  1025. static inline bool after(u32 seq1, u32 seq2)
  1026. {
  1027. return (s32)(seq1 - seq2) > 0;
  1028. }
  1029. static inline bool after_eq(u32 seq1, u32 seq2)
  1030. {
  1031. return (s32)(seq1 - seq2) >= 0;
  1032. }
  1033. /*
  1034. * debug tracing
  1035. */
  1036. extern unsigned int rxrpc_debug;
  1037. #define dbgprintk(FMT,...) \
  1038. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  1039. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1040. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1041. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  1042. #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
  1043. #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
  1044. #if defined(__KDEBUG)
  1045. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  1046. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  1047. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  1048. #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
  1049. #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
  1050. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  1051. #define RXRPC_DEBUG_KENTER 0x01
  1052. #define RXRPC_DEBUG_KLEAVE 0x02
  1053. #define RXRPC_DEBUG_KDEBUG 0x04
  1054. #define RXRPC_DEBUG_KPROTO 0x08
  1055. #define RXRPC_DEBUG_KNET 0x10
  1056. #define _enter(FMT,...) \
  1057. do { \
  1058. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  1059. kenter(FMT,##__VA_ARGS__); \
  1060. } while (0)
  1061. #define _leave(FMT,...) \
  1062. do { \
  1063. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  1064. kleave(FMT,##__VA_ARGS__); \
  1065. } while (0)
  1066. #define _debug(FMT,...) \
  1067. do { \
  1068. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  1069. kdebug(FMT,##__VA_ARGS__); \
  1070. } while (0)
  1071. #define _proto(FMT,...) \
  1072. do { \
  1073. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
  1074. kproto(FMT,##__VA_ARGS__); \
  1075. } while (0)
  1076. #define _net(FMT,...) \
  1077. do { \
  1078. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
  1079. knet(FMT,##__VA_ARGS__); \
  1080. } while (0)
  1081. #else
  1082. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1083. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1084. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  1085. #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
  1086. #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
  1087. #endif
  1088. /*
  1089. * debug assertion checking
  1090. */
  1091. #if 1 // defined(__KDEBUGALL)
  1092. #define ASSERT(X) \
  1093. do { \
  1094. if (unlikely(!(X))) { \
  1095. pr_err("Assertion failed\n"); \
  1096. BUG(); \
  1097. } \
  1098. } while (0)
  1099. #define ASSERTCMP(X, OP, Y) \
  1100. do { \
  1101. __typeof__(X) _x = (X); \
  1102. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1103. if (unlikely(!(_x OP _y))) { \
  1104. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1105. (unsigned long)_x, (unsigned long)_x, #OP, \
  1106. (unsigned long)_y, (unsigned long)_y); \
  1107. BUG(); \
  1108. } \
  1109. } while (0)
  1110. #define ASSERTIF(C, X) \
  1111. do { \
  1112. if (unlikely((C) && !(X))) { \
  1113. pr_err("Assertion failed\n"); \
  1114. BUG(); \
  1115. } \
  1116. } while (0)
  1117. #define ASSERTIFCMP(C, X, OP, Y) \
  1118. do { \
  1119. __typeof__(X) _x = (X); \
  1120. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1121. if (unlikely((C) && !(_x OP _y))) { \
  1122. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1123. (unsigned long)_x, (unsigned long)_x, #OP, \
  1124. (unsigned long)_y, (unsigned long)_y); \
  1125. BUG(); \
  1126. } \
  1127. } while (0)
  1128. #else
  1129. #define ASSERT(X) \
  1130. do { \
  1131. } while (0)
  1132. #define ASSERTCMP(X, OP, Y) \
  1133. do { \
  1134. } while (0)
  1135. #define ASSERTIF(C, X) \
  1136. do { \
  1137. } while (0)
  1138. #define ASSERTIFCMP(C, X, OP, Y) \
  1139. do { \
  1140. } while (0)
  1141. #endif /* __KDEBUGALL */