sock_reuseport.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * To speed up listener socket lookup, create an array to store all sockets
  4. * listening on the same port. This allows a decision to be made after finding
  5. * the first socket. An optional BPF program can also be configured for
  6. * selecting the socket index from the array of available sockets.
  7. */
  8. #include <net/sock_reuseport.h>
  9. #include <linux/bpf.h>
  10. #include <linux/idr.h>
  11. #include <linux/filter.h>
  12. #include <linux/rcupdate.h>
  13. #define INIT_SOCKS 128
  14. DEFINE_SPINLOCK(reuseport_lock);
  15. #define REUSEPORT_MIN_ID 1
  16. static DEFINE_IDA(reuseport_ida);
  17. int reuseport_get_id(struct sock_reuseport *reuse)
  18. {
  19. int id;
  20. if (reuse->reuseport_id)
  21. return reuse->reuseport_id;
  22. id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
  23. /* Called under reuseport_lock */
  24. GFP_ATOMIC);
  25. if (id < 0)
  26. return id;
  27. reuse->reuseport_id = id;
  28. return reuse->reuseport_id;
  29. }
  30. static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
  31. {
  32. unsigned int size = sizeof(struct sock_reuseport) +
  33. sizeof(struct sock *) * max_socks;
  34. struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
  35. if (!reuse)
  36. return NULL;
  37. reuse->max_socks = max_socks;
  38. RCU_INIT_POINTER(reuse->prog, NULL);
  39. return reuse;
  40. }
  41. int reuseport_alloc(struct sock *sk, bool bind_inany)
  42. {
  43. struct sock_reuseport *reuse;
  44. /* bh lock used since this function call may precede hlist lock in
  45. * soft irq of receive path or setsockopt from process context
  46. */
  47. spin_lock_bh(&reuseport_lock);
  48. /* Allocation attempts can occur concurrently via the setsockopt path
  49. * and the bind/hash path. Nothing to do when we lose the race.
  50. */
  51. reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  52. lockdep_is_held(&reuseport_lock));
  53. if (reuse) {
  54. /* Only set reuse->bind_inany if the bind_inany is true.
  55. * Otherwise, it will overwrite the reuse->bind_inany
  56. * which was set by the bind/hash path.
  57. */
  58. if (bind_inany)
  59. reuse->bind_inany = bind_inany;
  60. goto out;
  61. }
  62. reuse = __reuseport_alloc(INIT_SOCKS);
  63. if (!reuse) {
  64. spin_unlock_bh(&reuseport_lock);
  65. return -ENOMEM;
  66. }
  67. reuse->socks[0] = sk;
  68. reuse->num_socks = 1;
  69. reuse->bind_inany = bind_inany;
  70. rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
  71. out:
  72. spin_unlock_bh(&reuseport_lock);
  73. return 0;
  74. }
  75. EXPORT_SYMBOL(reuseport_alloc);
  76. static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
  77. {
  78. struct sock_reuseport *more_reuse;
  79. u32 more_socks_size, i;
  80. more_socks_size = reuse->max_socks * 2U;
  81. if (more_socks_size > U16_MAX)
  82. return NULL;
  83. more_reuse = __reuseport_alloc(more_socks_size);
  84. if (!more_reuse)
  85. return NULL;
  86. more_reuse->max_socks = more_socks_size;
  87. more_reuse->num_socks = reuse->num_socks;
  88. more_reuse->prog = reuse->prog;
  89. more_reuse->reuseport_id = reuse->reuseport_id;
  90. more_reuse->bind_inany = reuse->bind_inany;
  91. more_reuse->has_conns = reuse->has_conns;
  92. memcpy(more_reuse->socks, reuse->socks,
  93. reuse->num_socks * sizeof(struct sock *));
  94. more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
  95. for (i = 0; i < reuse->num_socks; ++i)
  96. rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
  97. more_reuse);
  98. /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
  99. * that reuse and more_reuse can temporarily share a reference
  100. * to prog.
  101. */
  102. kfree_rcu(reuse, rcu);
  103. return more_reuse;
  104. }
  105. static void reuseport_free_rcu(struct rcu_head *head)
  106. {
  107. struct sock_reuseport *reuse;
  108. reuse = container_of(head, struct sock_reuseport, rcu);
  109. sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
  110. if (reuse->reuseport_id)
  111. ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
  112. kfree(reuse);
  113. }
  114. /**
  115. * reuseport_add_sock - Add a socket to the reuseport group of another.
  116. * @sk: New socket to add to the group.
  117. * @sk2: Socket belonging to the existing reuseport group.
  118. * May return ENOMEM and not add socket to group under memory pressure.
  119. */
  120. int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
  121. {
  122. struct sock_reuseport *old_reuse, *reuse;
  123. if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
  124. int err = reuseport_alloc(sk2, bind_inany);
  125. if (err)
  126. return err;
  127. }
  128. spin_lock_bh(&reuseport_lock);
  129. reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
  130. lockdep_is_held(&reuseport_lock));
  131. old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  132. lockdep_is_held(&reuseport_lock));
  133. if (old_reuse && old_reuse->num_socks != 1) {
  134. spin_unlock_bh(&reuseport_lock);
  135. return -EBUSY;
  136. }
  137. if (reuse->num_socks == reuse->max_socks) {
  138. reuse = reuseport_grow(reuse);
  139. if (!reuse) {
  140. spin_unlock_bh(&reuseport_lock);
  141. return -ENOMEM;
  142. }
  143. }
  144. reuse->socks[reuse->num_socks] = sk;
  145. /* paired with smp_rmb() in reuseport_select_sock() */
  146. smp_wmb();
  147. reuse->num_socks++;
  148. rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
  149. spin_unlock_bh(&reuseport_lock);
  150. if (old_reuse)
  151. call_rcu(&old_reuse->rcu, reuseport_free_rcu);
  152. return 0;
  153. }
  154. void reuseport_detach_sock(struct sock *sk)
  155. {
  156. struct sock_reuseport *reuse;
  157. int i;
  158. spin_lock_bh(&reuseport_lock);
  159. reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  160. lockdep_is_held(&reuseport_lock));
  161. /* At least one of the sk in this reuseport group is added to
  162. * a bpf map. Notify the bpf side. The bpf map logic will
  163. * remove the sk if it is indeed added to a bpf map.
  164. */
  165. if (reuse->reuseport_id)
  166. bpf_sk_reuseport_detach(sk);
  167. rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
  168. for (i = 0; i < reuse->num_socks; i++) {
  169. if (reuse->socks[i] == sk) {
  170. reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
  171. reuse->num_socks--;
  172. if (reuse->num_socks == 0)
  173. call_rcu(&reuse->rcu, reuseport_free_rcu);
  174. break;
  175. }
  176. }
  177. spin_unlock_bh(&reuseport_lock);
  178. }
  179. EXPORT_SYMBOL(reuseport_detach_sock);
  180. static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
  181. struct bpf_prog *prog, struct sk_buff *skb,
  182. int hdr_len)
  183. {
  184. struct sk_buff *nskb = NULL;
  185. u32 index;
  186. if (skb_shared(skb)) {
  187. nskb = skb_clone(skb, GFP_ATOMIC);
  188. if (!nskb)
  189. return NULL;
  190. skb = nskb;
  191. }
  192. /* temporarily advance data past protocol header */
  193. if (!pskb_pull(skb, hdr_len)) {
  194. kfree_skb(nskb);
  195. return NULL;
  196. }
  197. index = bpf_prog_run_save_cb(prog, skb);
  198. __skb_push(skb, hdr_len);
  199. consume_skb(nskb);
  200. if (index >= socks)
  201. return NULL;
  202. return reuse->socks[index];
  203. }
  204. /**
  205. * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
  206. * @sk: First socket in the group.
  207. * @hash: When no BPF filter is available, use this hash to select.
  208. * @skb: skb to run through BPF filter.
  209. * @hdr_len: BPF filter expects skb data pointer at payload data. If
  210. * the skb does not yet point at the payload, this parameter represents
  211. * how far the pointer needs to advance to reach the payload.
  212. * Returns a socket that should receive the packet (or NULL on error).
  213. */
  214. struct sock *reuseport_select_sock(struct sock *sk,
  215. u32 hash,
  216. struct sk_buff *skb,
  217. int hdr_len)
  218. {
  219. struct sock_reuseport *reuse;
  220. struct bpf_prog *prog;
  221. struct sock *sk2 = NULL;
  222. u16 socks;
  223. rcu_read_lock();
  224. reuse = rcu_dereference(sk->sk_reuseport_cb);
  225. /* if memory allocation failed or add call is not yet complete */
  226. if (!reuse)
  227. goto out;
  228. prog = rcu_dereference(reuse->prog);
  229. socks = READ_ONCE(reuse->num_socks);
  230. if (likely(socks)) {
  231. /* paired with smp_wmb() in reuseport_add_sock() */
  232. smp_rmb();
  233. if (!prog || !skb)
  234. goto select_by_hash;
  235. if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
  236. sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
  237. else
  238. sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
  239. select_by_hash:
  240. /* no bpf or invalid bpf result: fall back to hash usage */
  241. if (!sk2) {
  242. int i, j;
  243. i = j = reciprocal_scale(hash, socks);
  244. while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
  245. i++;
  246. if (i >= socks)
  247. i = 0;
  248. if (i == j)
  249. goto out;
  250. }
  251. sk2 = reuse->socks[i];
  252. }
  253. }
  254. out:
  255. rcu_read_unlock();
  256. return sk2;
  257. }
  258. EXPORT_SYMBOL(reuseport_select_sock);
  259. int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
  260. {
  261. struct sock_reuseport *reuse;
  262. struct bpf_prog *old_prog;
  263. if (sk_unhashed(sk) && sk->sk_reuseport) {
  264. int err = reuseport_alloc(sk, false);
  265. if (err)
  266. return err;
  267. } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
  268. /* The socket wasn't bound with SO_REUSEPORT */
  269. return -EINVAL;
  270. }
  271. spin_lock_bh(&reuseport_lock);
  272. reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  273. lockdep_is_held(&reuseport_lock));
  274. old_prog = rcu_dereference_protected(reuse->prog,
  275. lockdep_is_held(&reuseport_lock));
  276. rcu_assign_pointer(reuse->prog, prog);
  277. spin_unlock_bh(&reuseport_lock);
  278. sk_reuseport_prog_free(old_prog);
  279. return 0;
  280. }
  281. EXPORT_SYMBOL(reuseport_attach_prog);