vsock_bpf.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2022 Bobby Eshleman <bobby.eshleman@bytedance.com>
  3. *
  4. * Based off of net/unix/unix_bpf.c
  5. */
  6. #include <linux/bpf.h>
  7. #include <linux/module.h>
  8. #include <linux/skmsg.h>
  9. #include <linux/socket.h>
  10. #include <linux/wait.h>
  11. #include <net/af_vsock.h>
  12. #include <net/sock.h>
  13. #define vsock_sk_has_data(__sk, __psock) \
  14. ({ !skb_queue_empty(&(__sk)->sk_receive_queue) || \
  15. !skb_queue_empty(&(__psock)->ingress_skb) || \
  16. !list_empty(&(__psock)->ingress_msg); \
  17. })
  18. static struct proto *vsock_prot_saved __read_mostly;
  19. static DEFINE_SPINLOCK(vsock_prot_lock);
  20. static struct proto vsock_bpf_prot;
  21. static bool vsock_has_data(struct sock *sk, struct sk_psock *psock)
  22. {
  23. struct vsock_sock *vsk = vsock_sk(sk);
  24. s64 ret;
  25. ret = vsock_connectible_has_data(vsk);
  26. if (ret > 0)
  27. return true;
  28. return vsock_sk_has_data(sk, psock);
  29. }
  30. static bool vsock_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo)
  31. {
  32. bool ret;
  33. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  34. if (sk->sk_shutdown & RCV_SHUTDOWN)
  35. return true;
  36. if (!timeo)
  37. return false;
  38. add_wait_queue(sk_sleep(sk), &wait);
  39. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  40. ret = vsock_has_data(sk, psock);
  41. if (!ret) {
  42. wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
  43. ret = vsock_has_data(sk, psock);
  44. }
  45. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  46. remove_wait_queue(sk_sleep(sk), &wait);
  47. return ret;
  48. }
  49. static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags)
  50. {
  51. struct socket *sock = sk->sk_socket;
  52. int err;
  53. if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
  54. err = __vsock_connectible_recvmsg(sock, msg, len, flags);
  55. else if (sk->sk_type == SOCK_DGRAM)
  56. err = __vsock_dgram_recvmsg(sock, msg, len, flags);
  57. else
  58. err = -EPROTOTYPE;
  59. return err;
  60. }
  61. static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
  62. size_t len, int flags, int *addr_len)
  63. {
  64. struct sk_psock *psock;
  65. struct vsock_sock *vsk;
  66. int copied;
  67. psock = sk_psock_get(sk);
  68. if (unlikely(!psock))
  69. return __vsock_recvmsg(sk, msg, len, flags);
  70. lock_sock(sk);
  71. vsk = vsock_sk(sk);
  72. if (!vsk->transport) {
  73. copied = -ENODEV;
  74. goto out;
  75. }
  76. if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
  77. release_sock(sk);
  78. sk_psock_put(sk, psock);
  79. return __vsock_recvmsg(sk, msg, len, flags);
  80. }
  81. copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
  82. while (copied == 0) {
  83. long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  84. if (!vsock_msg_wait_data(sk, psock, timeo)) {
  85. copied = -EAGAIN;
  86. break;
  87. }
  88. if (sk_psock_queue_empty(psock)) {
  89. release_sock(sk);
  90. sk_psock_put(sk, psock);
  91. return __vsock_recvmsg(sk, msg, len, flags);
  92. }
  93. copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
  94. }
  95. out:
  96. release_sock(sk);
  97. sk_psock_put(sk, psock);
  98. return copied;
  99. }
  100. static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
  101. {
  102. *prot = *base;
  103. prot->close = sock_map_close;
  104. prot->recvmsg = vsock_bpf_recvmsg;
  105. prot->sock_is_readable = sk_msg_is_readable;
  106. }
  107. static void vsock_bpf_check_needs_rebuild(struct proto *ops)
  108. {
  109. /* Paired with the smp_store_release() below. */
  110. if (unlikely(ops != smp_load_acquire(&vsock_prot_saved))) {
  111. spin_lock_bh(&vsock_prot_lock);
  112. if (likely(ops != vsock_prot_saved)) {
  113. vsock_bpf_rebuild_protos(&vsock_bpf_prot, ops);
  114. /* Make sure proto function pointers are updated before publishing the
  115. * pointer to the struct.
  116. */
  117. smp_store_release(&vsock_prot_saved, ops);
  118. }
  119. spin_unlock_bh(&vsock_prot_lock);
  120. }
  121. }
  122. int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
  123. {
  124. struct vsock_sock *vsk;
  125. if (restore) {
  126. sk->sk_write_space = psock->saved_write_space;
  127. sock_replace_proto(sk, psock->sk_proto);
  128. return 0;
  129. }
  130. vsk = vsock_sk(sk);
  131. if (!vsk->transport)
  132. return -ENODEV;
  133. if (!vsk->transport->read_skb)
  134. return -EOPNOTSUPP;
  135. vsock_bpf_check_needs_rebuild(psock->sk_proto);
  136. sock_replace_proto(sk, &vsock_bpf_prot);
  137. return 0;
  138. }
  139. void __init vsock_bpf_build_proto(void)
  140. {
  141. vsock_bpf_rebuild_protos(&vsock_bpf_prot, &vsock_proto);
  142. }