| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * To speed up listener socket lookup, create an array to store all sockets
- * listening on the same port. This allows a decision to be made after finding
- * the first socket. An optional BPF program can also be configured for
- * selecting the socket index from the array of available sockets.
- */
- #include <net/ip.h>
- #include <net/sock_reuseport.h>
- #include <linux/bpf.h>
- #include <linux/idr.h>
- #include <linux/filter.h>
- #include <linux/rcupdate.h>
- #define INIT_SOCKS 128
- DEFINE_SPINLOCK(reuseport_lock);
- static DEFINE_IDA(reuseport_ida);
- static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
- struct sock_reuseport *reuse, bool bind_inany);
- void reuseport_has_conns_set(struct sock *sk)
- {
- struct sock_reuseport *reuse;
- if (!rcu_access_pointer(sk->sk_reuseport_cb))
- return;
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- if (likely(reuse))
- reuse->has_conns = 1;
- spin_unlock_bh(&reuseport_lock);
- }
- EXPORT_SYMBOL(reuseport_has_conns_set);
- static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse)
- {
- /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
- WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1);
- }
- static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse)
- {
- /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
- WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1);
- }
- static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
- {
- if (sk->sk_incoming_cpu >= 0)
- __reuseport_get_incoming_cpu(reuse);
- }
- static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
- {
- if (sk->sk_incoming_cpu >= 0)
- __reuseport_put_incoming_cpu(reuse);
- }
- void reuseport_update_incoming_cpu(struct sock *sk, int val)
- {
- struct sock_reuseport *reuse;
- int old_sk_incoming_cpu;
- if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) {
- /* Paired with REAE_ONCE() in sk_incoming_cpu_update()
- * and compute_score().
- */
- WRITE_ONCE(sk->sk_incoming_cpu, val);
- return;
- }
- spin_lock_bh(&reuseport_lock);
- /* This must be done under reuseport_lock to avoid a race with
- * reuseport_grow(), which accesses sk->sk_incoming_cpu without
- * lock_sock() when detaching a shutdown()ed sk.
- *
- * Paired with READ_ONCE() in reuseport_select_sock_by_hash().
- */
- old_sk_incoming_cpu = sk->sk_incoming_cpu;
- WRITE_ONCE(sk->sk_incoming_cpu, val);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- /* reuseport_grow() has detached a closed sk. */
- if (!reuse)
- goto out;
- if (old_sk_incoming_cpu < 0 && val >= 0)
- __reuseport_get_incoming_cpu(reuse);
- else if (old_sk_incoming_cpu >= 0 && val < 0)
- __reuseport_put_incoming_cpu(reuse);
- out:
- spin_unlock_bh(&reuseport_lock);
- }
- static int reuseport_sock_index(struct sock *sk,
- const struct sock_reuseport *reuse,
- bool closed)
- {
- int left, right;
- if (!closed) {
- left = 0;
- right = reuse->num_socks;
- } else {
- left = reuse->max_socks - reuse->num_closed_socks;
- right = reuse->max_socks;
- }
- for (; left < right; left++)
- if (reuse->socks[left] == sk)
- return left;
- return -1;
- }
- static void __reuseport_add_sock(struct sock *sk,
- struct sock_reuseport *reuse)
- {
- reuse->socks[reuse->num_socks] = sk;
- /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
- smp_wmb();
- reuse->num_socks++;
- reuseport_get_incoming_cpu(sk, reuse);
- }
- static bool __reuseport_detach_sock(struct sock *sk,
- struct sock_reuseport *reuse)
- {
- int i = reuseport_sock_index(sk, reuse, false);
- if (i == -1)
- return false;
- reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
- reuse->num_socks--;
- reuseport_put_incoming_cpu(sk, reuse);
- return true;
- }
- static void __reuseport_add_closed_sock(struct sock *sk,
- struct sock_reuseport *reuse)
- {
- reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
- /* paired with READ_ONCE() in inet_csk_bind_conflict() */
- WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
- reuseport_get_incoming_cpu(sk, reuse);
- }
- static bool __reuseport_detach_closed_sock(struct sock *sk,
- struct sock_reuseport *reuse)
- {
- int i = reuseport_sock_index(sk, reuse, true);
- if (i == -1)
- return false;
- reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
- /* paired with READ_ONCE() in inet_csk_bind_conflict() */
- WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
- reuseport_put_incoming_cpu(sk, reuse);
- return true;
- }
- static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
- {
- struct sock_reuseport *reuse;
- reuse = kzalloc(struct_size(reuse, socks, max_socks), GFP_ATOMIC);
- if (!reuse)
- return NULL;
- reuse->max_socks = max_socks;
- RCU_INIT_POINTER(reuse->prog, NULL);
- return reuse;
- }
- int reuseport_alloc(struct sock *sk, bool bind_inany)
- {
- struct sock_reuseport *reuse;
- int id, ret = 0;
- /* bh lock used since this function call may precede hlist lock in
- * soft irq of receive path or setsockopt from process context
- */
- spin_lock_bh(&reuseport_lock);
- /* Allocation attempts can occur concurrently via the setsockopt path
- * and the bind/hash path. Nothing to do when we lose the race.
- */
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- if (reuse) {
- if (reuse->num_closed_socks) {
- /* sk was shutdown()ed before */
- ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
- goto out;
- }
- /* Only set reuse->bind_inany if the bind_inany is true.
- * Otherwise, it will overwrite the reuse->bind_inany
- * which was set by the bind/hash path.
- */
- if (bind_inany)
- reuse->bind_inany = bind_inany;
- goto out;
- }
- reuse = __reuseport_alloc(INIT_SOCKS);
- if (!reuse) {
- ret = -ENOMEM;
- goto out;
- }
- id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
- if (id < 0) {
- kfree(reuse);
- ret = id;
- goto out;
- }
- reuse->reuseport_id = id;
- reuse->bind_inany = bind_inany;
- reuse->socks[0] = sk;
- reuse->num_socks = 1;
- reuseport_get_incoming_cpu(sk, reuse);
- rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
- out:
- spin_unlock_bh(&reuseport_lock);
- return ret;
- }
- EXPORT_SYMBOL(reuseport_alloc);
- static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
- {
- struct sock_reuseport *more_reuse;
- u32 more_socks_size, i;
- more_socks_size = reuse->max_socks * 2U;
- if (more_socks_size > U16_MAX) {
- if (reuse->num_closed_socks) {
- /* Make room by removing a closed sk.
- * The child has already been migrated.
- * Only reqsk left at this point.
- */
- struct sock *sk;
- sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
- RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
- __reuseport_detach_closed_sock(sk, reuse);
- return reuse;
- }
- return NULL;
- }
- more_reuse = __reuseport_alloc(more_socks_size);
- if (!more_reuse)
- return NULL;
- more_reuse->num_socks = reuse->num_socks;
- more_reuse->num_closed_socks = reuse->num_closed_socks;
- more_reuse->prog = reuse->prog;
- more_reuse->reuseport_id = reuse->reuseport_id;
- more_reuse->bind_inany = reuse->bind_inany;
- more_reuse->has_conns = reuse->has_conns;
- more_reuse->incoming_cpu = reuse->incoming_cpu;
- memcpy(more_reuse->socks, reuse->socks,
- reuse->num_socks * sizeof(struct sock *));
- memcpy(more_reuse->socks +
- (more_reuse->max_socks - more_reuse->num_closed_socks),
- reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
- reuse->num_closed_socks * sizeof(struct sock *));
- more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
- for (i = 0; i < reuse->max_socks; ++i)
- rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
- more_reuse);
- /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
- * that reuse and more_reuse can temporarily share a reference
- * to prog.
- */
- kfree_rcu(reuse, rcu);
- return more_reuse;
- }
- static void reuseport_free_rcu(struct rcu_head *head)
- {
- struct sock_reuseport *reuse;
- reuse = container_of(head, struct sock_reuseport, rcu);
- sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
- ida_free(&reuseport_ida, reuse->reuseport_id);
- kfree(reuse);
- }
- /**
- * reuseport_add_sock - Add a socket to the reuseport group of another.
- * @sk: New socket to add to the group.
- * @sk2: Socket belonging to the existing reuseport group.
- * @bind_inany: Whether or not the group is bound to a local INANY address.
- *
- * May return ENOMEM and not add socket to group under memory pressure.
- */
- int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
- {
- struct sock_reuseport *old_reuse, *reuse;
- if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
- int err = reuseport_alloc(sk2, bind_inany);
- if (err)
- return err;
- }
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- if (old_reuse && old_reuse->num_closed_socks) {
- /* sk was shutdown()ed before */
- int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
- spin_unlock_bh(&reuseport_lock);
- return err;
- }
- if (old_reuse && old_reuse->num_socks != 1) {
- spin_unlock_bh(&reuseport_lock);
- return -EBUSY;
- }
- if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
- reuse = reuseport_grow(reuse);
- if (!reuse) {
- spin_unlock_bh(&reuseport_lock);
- return -ENOMEM;
- }
- }
- __reuseport_add_sock(sk, reuse);
- rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
- spin_unlock_bh(&reuseport_lock);
- if (old_reuse)
- call_rcu(&old_reuse->rcu, reuseport_free_rcu);
- return 0;
- }
- EXPORT_SYMBOL(reuseport_add_sock);
- static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
- struct sock_reuseport *reuse, bool bind_inany)
- {
- if (old_reuse == reuse) {
- /* If sk was in the same reuseport group, just pop sk out of
- * the closed section and push sk into the listening section.
- */
- __reuseport_detach_closed_sock(sk, old_reuse);
- __reuseport_add_sock(sk, old_reuse);
- return 0;
- }
- if (!reuse) {
- /* In bind()/listen() path, we cannot carry over the eBPF prog
- * for the shutdown()ed socket. In setsockopt() path, we should
- * not change the eBPF prog of listening sockets by attaching a
- * prog to the shutdown()ed socket. Thus, we will allocate a new
- * reuseport group and detach sk from the old group.
- */
- int id;
- reuse = __reuseport_alloc(INIT_SOCKS);
- if (!reuse)
- return -ENOMEM;
- id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
- if (id < 0) {
- kfree(reuse);
- return id;
- }
- reuse->reuseport_id = id;
- reuse->bind_inany = bind_inany;
- } else {
- /* Move sk from the old group to the new one if
- * - all the other listeners in the old group were close()d or
- * shutdown()ed, and then sk2 has listen()ed on the same port
- * OR
- * - sk listen()ed without bind() (or with autobind), was
- * shutdown()ed, and then listen()s on another port which
- * sk2 listen()s on.
- */
- if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
- reuse = reuseport_grow(reuse);
- if (!reuse)
- return -ENOMEM;
- }
- }
- __reuseport_detach_closed_sock(sk, old_reuse);
- __reuseport_add_sock(sk, reuse);
- rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
- if (old_reuse->num_socks + old_reuse->num_closed_socks == 0)
- call_rcu(&old_reuse->rcu, reuseport_free_rcu);
- return 0;
- }
- void reuseport_detach_sock(struct sock *sk)
- {
- struct sock_reuseport *reuse;
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- /* reuseport_grow() has detached a closed sk */
- if (!reuse)
- goto out;
- /* Notify the bpf side. The sk may be added to a sockarray
- * map. If so, sockarray logic will remove it from the map.
- *
- * Other bpf map types that work with reuseport, like sockmap,
- * don't need an explicit callback from here. They override sk
- * unhash/close ops to remove the sk from the map before we
- * get to this point.
- */
- bpf_sk_reuseport_detach(sk);
- rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
- if (!__reuseport_detach_closed_sock(sk, reuse))
- __reuseport_detach_sock(sk, reuse);
- if (reuse->num_socks + reuse->num_closed_socks == 0)
- call_rcu(&reuse->rcu, reuseport_free_rcu);
- out:
- spin_unlock_bh(&reuseport_lock);
- }
- EXPORT_SYMBOL(reuseport_detach_sock);
- void reuseport_stop_listen_sock(struct sock *sk)
- {
- if (sk->sk_protocol == IPPROTO_TCP) {
- struct sock_reuseport *reuse;
- struct bpf_prog *prog;
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- prog = rcu_dereference_protected(reuse->prog,
- lockdep_is_held(&reuseport_lock));
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
- (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
- /* Migration capable, move sk from the listening section
- * to the closed section.
- */
- bpf_sk_reuseport_detach(sk);
- __reuseport_detach_sock(sk, reuse);
- __reuseport_add_closed_sock(sk, reuse);
- spin_unlock_bh(&reuseport_lock);
- return;
- }
- spin_unlock_bh(&reuseport_lock);
- }
- /* Not capable to do migration, detach immediately */
- reuseport_detach_sock(sk);
- }
- EXPORT_SYMBOL(reuseport_stop_listen_sock);
- static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
- struct bpf_prog *prog, struct sk_buff *skb,
- int hdr_len)
- {
- struct sk_buff *nskb = NULL;
- u32 index;
- if (skb_shared(skb)) {
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return NULL;
- skb = nskb;
- }
- /* temporarily advance data past protocol header */
- if (!pskb_pull(skb, hdr_len)) {
- kfree_skb(nskb);
- return NULL;
- }
- index = bpf_prog_run_save_cb(prog, skb);
- __skb_push(skb, hdr_len);
- consume_skb(nskb);
- if (index >= socks)
- return NULL;
- return reuse->socks[index];
- }
- static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
- u32 hash, u16 num_socks)
- {
- struct sock *first_valid_sk = NULL;
- int i, j;
- i = j = reciprocal_scale(hash, num_socks);
- do {
- struct sock *sk = reuse->socks[i];
- if (sk->sk_state != TCP_ESTABLISHED) {
- /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */
- if (!READ_ONCE(reuse->incoming_cpu))
- return sk;
- /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */
- if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
- return sk;
- if (!first_valid_sk)
- first_valid_sk = sk;
- }
- i++;
- if (i >= num_socks)
- i = 0;
- } while (i != j);
- return first_valid_sk;
- }
- /**
- * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
- * @sk: First socket in the group.
- * @hash: When no BPF filter is available, use this hash to select.
- * @skb: skb to run through BPF filter.
- * @hdr_len: BPF filter expects skb data pointer at payload data. If
- * the skb does not yet point at the payload, this parameter represents
- * how far the pointer needs to advance to reach the payload.
- * Returns a socket that should receive the packet (or NULL on error).
- */
- struct sock *reuseport_select_sock(struct sock *sk,
- u32 hash,
- struct sk_buff *skb,
- int hdr_len)
- {
- struct sock_reuseport *reuse;
- struct bpf_prog *prog;
- struct sock *sk2 = NULL;
- u16 socks;
- rcu_read_lock();
- reuse = rcu_dereference(sk->sk_reuseport_cb);
- /* if memory allocation failed or add call is not yet complete */
- if (!reuse)
- goto out;
- prog = rcu_dereference(reuse->prog);
- socks = READ_ONCE(reuse->num_socks);
- if (likely(socks)) {
- /* paired with smp_wmb() in __reuseport_add_sock() */
- smp_rmb();
- if (!prog || !skb)
- goto select_by_hash;
- if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
- sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
- else
- sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
- select_by_hash:
- /* no bpf or invalid bpf result: fall back to hash usage */
- if (!sk2)
- sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
- }
- out:
- rcu_read_unlock();
- return sk2;
- }
- EXPORT_SYMBOL(reuseport_select_sock);
- /**
- * reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group.
- * @sk: close()ed or shutdown()ed socket in the group.
- * @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or
- * NEW_SYN_RECV request socket during 3WHS.
- * @skb: skb to run through BPF filter.
- * Returns a socket (with sk_refcnt +1) that should accept the child socket
- * (or NULL on error).
- */
- struct sock *reuseport_migrate_sock(struct sock *sk,
- struct sock *migrating_sk,
- struct sk_buff *skb)
- {
- struct sock_reuseport *reuse;
- struct sock *nsk = NULL;
- bool allocated = false;
- struct bpf_prog *prog;
- u16 socks;
- u32 hash;
- rcu_read_lock();
- reuse = rcu_dereference(sk->sk_reuseport_cb);
- if (!reuse)
- goto out;
- socks = READ_ONCE(reuse->num_socks);
- if (unlikely(!socks))
- goto failure;
- /* paired with smp_wmb() in __reuseport_add_sock() */
- smp_rmb();
- hash = migrating_sk->sk_hash;
- prog = rcu_dereference(reuse->prog);
- if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
- goto select_by_hash;
- goto failure;
- }
- if (!skb) {
- skb = alloc_skb(0, GFP_ATOMIC);
- if (!skb)
- goto failure;
- allocated = true;
- }
- nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);
- if (allocated)
- kfree_skb(skb);
- select_by_hash:
- if (!nsk)
- nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
- if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
- nsk = NULL;
- goto failure;
- }
- out:
- rcu_read_unlock();
- return nsk;
- failure:
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
- goto out;
- }
- EXPORT_SYMBOL(reuseport_migrate_sock);
- int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
- {
- struct sock_reuseport *reuse;
- struct bpf_prog *old_prog;
- if (sk_unhashed(sk)) {
- int err;
- if (!sk->sk_reuseport)
- return -EINVAL;
- err = reuseport_alloc(sk, false);
- if (err)
- return err;
- } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
- /* The socket wasn't bound with SO_REUSEPORT */
- return -EINVAL;
- }
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- old_prog = rcu_dereference_protected(reuse->prog,
- lockdep_is_held(&reuseport_lock));
- rcu_assign_pointer(reuse->prog, prog);
- spin_unlock_bh(&reuseport_lock);
- sk_reuseport_prog_free(old_prog);
- return 0;
- }
- EXPORT_SYMBOL(reuseport_attach_prog);
- int reuseport_detach_prog(struct sock *sk)
- {
- struct sock_reuseport *reuse;
- struct bpf_prog *old_prog;
- old_prog = NULL;
- spin_lock_bh(&reuseport_lock);
- reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
- /* reuse must be checked after acquiring the reuseport_lock
- * because reuseport_grow() can detach a closed sk.
- */
- if (!reuse) {
- spin_unlock_bh(&reuseport_lock);
- return sk->sk_reuseport ? -ENOENT : -EINVAL;
- }
- if (sk_unhashed(sk) && reuse->num_closed_socks) {
- spin_unlock_bh(&reuseport_lock);
- return -ENOENT;
- }
- old_prog = rcu_replace_pointer(reuse->prog, old_prog,
- lockdep_is_held(&reuseport_lock));
- spin_unlock_bh(&reuseport_lock);
- if (!old_prog)
- return -ENOENT;
- sk_reuseport_prog_free(old_prog);
- return 0;
- }
- EXPORT_SYMBOL(reuseport_detach_prog);
|