123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * To speed up listener socket lookup, create an array to store all sockets
- * listening on the same port. This allows a decision to be made after finding
- * the first socket. An optional BPF program can also be configured for
- * selecting the socket index from the array of available sockets.
- */
- #include <net/sock_reuseport.h>
- #include <linux/bpf.h>
- #include <linux/idr.h>
- #include <linux/filter.h>
- #include <linux/rcupdate.h>
- #define INIT_SOCKS 128
- DEFINE_SPINLOCK(reuseport_lock);
- #define REUSEPORT_MIN_ID 1
- static DEFINE_IDA(reuseport_ida);
- int reuseport_get_id(struct sock_reuseport *reuse)
- {
- int id;
- if (reuse->reuseport_id)
- return reuse->reuseport_id;
- id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
- /* Called under reuseport_lock */
- GFP_ATOMIC);
- if (id < 0)
- return id;
- reuse->reuseport_id = id;
- return reuse->reuseport_id;
- }
- static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
- {
- unsigned int size = sizeof(struct sock_reuseport) +
- sizeof(struct sock *) * max_socks;
- struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
- if (!reuse)
- return NULL;
- reuse->max_socks = max_socks;
- RCU_INIT_POINTER(reuse->prog, NULL);
- return reuse;
- }
- int reuseport_alloc(struct sock *sk, bool bind_inany)
- {
- struct sock_reuseport *reuse;
- /* bh lock used since this function call may precede hlist lock in
- * soft irq of receive path or setsockopt from process context
- */
- spin_lock_bh(&reuseport_lock);
- /* Allocation attempts can occur concurrently via the setsockopt path
- * and the bind/hash path. Nothing to do when we lose the race.
- */
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- if (reuse) {
- /* Only set reuse->bind_inany if the bind_inany is true.
- * Otherwise, it will overwrite the reuse->bind_inany
- * which was set by the bind/hash path.
- */
- if (bind_inany)
- reuse->bind_inany = bind_inany;
- goto out;
- }
- reuse = __reuseport_alloc(INIT_SOCKS);
- if (!reuse) {
- spin_unlock_bh(&reuseport_lock);
- return -ENOMEM;
- }
- reuse->socks[0] = sk;
- reuse->num_socks = 1;
- reuse->bind_inany = bind_inany;
- rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
- out:
- spin_unlock_bh(&reuseport_lock);
- return 0;
- }
- EXPORT_SYMBOL(reuseport_alloc);
- static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
- {
- struct sock_reuseport *more_reuse;
- u32 more_socks_size, i;
- more_socks_size = reuse->max_socks * 2U;
- if (more_socks_size > U16_MAX)
- return NULL;
- more_reuse = __reuseport_alloc(more_socks_size);
- if (!more_reuse)
- return NULL;
- more_reuse->max_socks = more_socks_size;
- more_reuse->num_socks = reuse->num_socks;
- more_reuse->prog = reuse->prog;
- more_reuse->reuseport_id = reuse->reuseport_id;
- more_reuse->bind_inany = reuse->bind_inany;
- more_reuse->has_conns = reuse->has_conns;
- memcpy(more_reuse->socks, reuse->socks,
- reuse->num_socks * sizeof(struct sock *));
- more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
- for (i = 0; i < reuse->num_socks; ++i)
- rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
- more_reuse);
- /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
- * that reuse and more_reuse can temporarily share a reference
- * to prog.
- */
- kfree_rcu(reuse, rcu);
- return more_reuse;
- }
- static void reuseport_free_rcu(struct rcu_head *head)
- {
- struct sock_reuseport *reuse;
- reuse = container_of(head, struct sock_reuseport, rcu);
- sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
- if (reuse->reuseport_id)
- ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
- kfree(reuse);
- }
- /**
- * reuseport_add_sock - Add a socket to the reuseport group of another.
- * @sk: New socket to add to the group.
- * @sk2: Socket belonging to the existing reuseport group.
- * May return ENOMEM and not add socket to group under memory pressure.
- */
- int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
- {
- struct sock_reuseport *old_reuse, *reuse;
- if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
- int err = reuseport_alloc(sk2, bind_inany);
- if (err)
- return err;
- }
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- if (old_reuse && old_reuse->num_socks != 1) {
- spin_unlock_bh(&reuseport_lock);
- return -EBUSY;
- }
- if (reuse->num_socks == reuse->max_socks) {
- reuse = reuseport_grow(reuse);
- if (!reuse) {
- spin_unlock_bh(&reuseport_lock);
- return -ENOMEM;
- }
- }
- reuse->socks[reuse->num_socks] = sk;
- /* paired with smp_rmb() in reuseport_select_sock() */
- smp_wmb();
- reuse->num_socks++;
- rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
- spin_unlock_bh(&reuseport_lock);
- if (old_reuse)
- call_rcu(&old_reuse->rcu, reuseport_free_rcu);
- return 0;
- }
- void reuseport_detach_sock(struct sock *sk)
- {
- struct sock_reuseport *reuse;
- int i;
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- /* At least one of the sk in this reuseport group is added to
- * a bpf map. Notify the bpf side. The bpf map logic will
- * remove the sk if it is indeed added to a bpf map.
- */
- if (reuse->reuseport_id)
- bpf_sk_reuseport_detach(sk);
- rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
- for (i = 0; i < reuse->num_socks; i++) {
- if (reuse->socks[i] == sk) {
- reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
- reuse->num_socks--;
- if (reuse->num_socks == 0)
- call_rcu(&reuse->rcu, reuseport_free_rcu);
- break;
- }
- }
- spin_unlock_bh(&reuseport_lock);
- }
- EXPORT_SYMBOL(reuseport_detach_sock);
- static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
- struct bpf_prog *prog, struct sk_buff *skb,
- int hdr_len)
- {
- struct sk_buff *nskb = NULL;
- u32 index;
- if (skb_shared(skb)) {
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return NULL;
- skb = nskb;
- }
- /* temporarily advance data past protocol header */
- if (!pskb_pull(skb, hdr_len)) {
- kfree_skb(nskb);
- return NULL;
- }
- index = bpf_prog_run_save_cb(prog, skb);
- __skb_push(skb, hdr_len);
- consume_skb(nskb);
- if (index >= socks)
- return NULL;
- return reuse->socks[index];
- }
- /**
- * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
- * @sk: First socket in the group.
- * @hash: When no BPF filter is available, use this hash to select.
- * @skb: skb to run through BPF filter.
- * @hdr_len: BPF filter expects skb data pointer at payload data. If
- * the skb does not yet point at the payload, this parameter represents
- * how far the pointer needs to advance to reach the payload.
- * Returns a socket that should receive the packet (or NULL on error).
- */
- struct sock *reuseport_select_sock(struct sock *sk,
- u32 hash,
- struct sk_buff *skb,
- int hdr_len)
- {
- struct sock_reuseport *reuse;
- struct bpf_prog *prog;
- struct sock *sk2 = NULL;
- u16 socks;
- rcu_read_lock();
- reuse = rcu_dereference(sk->sk_reuseport_cb);
- /* if memory allocation failed or add call is not yet complete */
- if (!reuse)
- goto out;
- prog = rcu_dereference(reuse->prog);
- socks = READ_ONCE(reuse->num_socks);
- if (likely(socks)) {
- /* paired with smp_wmb() in reuseport_add_sock() */
- smp_rmb();
- if (!prog || !skb)
- goto select_by_hash;
- if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
- sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
- else
- sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
- select_by_hash:
- /* no bpf or invalid bpf result: fall back to hash usage */
- if (!sk2) {
- int i, j;
- i = j = reciprocal_scale(hash, socks);
- while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
- i++;
- if (i >= socks)
- i = 0;
- if (i == j)
- goto out;
- }
- sk2 = reuse->socks[i];
- }
- }
- out:
- rcu_read_unlock();
- return sk2;
- }
- EXPORT_SYMBOL(reuseport_select_sock);
- int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
- {
- struct sock_reuseport *reuse;
- struct bpf_prog *old_prog;
- if (sk_unhashed(sk) && sk->sk_reuseport) {
- int err = reuseport_alloc(sk, false);
- if (err)
- return err;
- } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
- /* The socket wasn't bound with SO_REUSEPORT */
- return -EINVAL;
- }
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- old_prog = rcu_dereference_protected(reuse->prog,
- lockdep_is_held(&reuseport_lock));
- rcu_assign_pointer(reuse->prog, prog);
- spin_unlock_bh(&reuseport_lock);
- sk_reuseport_prog_free(old_prog);
- return 0;
- }
- EXPORT_SYMBOL(reuseport_attach_prog);
|