diag.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vsock sock_diag(7) module
  4. *
  5. * Copyright (C) 2017 Red Hat, Inc.
  6. * Author: Stefan Hajnoczi <stefanha@redhat.com>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/sock_diag.h>
  10. #include <linux/vm_sockets_diag.h>
  11. #include <net/af_vsock.h>
  12. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
  13. u32 portid, u32 seq, u32 flags)
  14. {
  15. struct vsock_sock *vsk = vsock_sk(sk);
  16. struct vsock_diag_msg *rep;
  17. struct nlmsghdr *nlh;
  18. nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
  19. flags);
  20. if (!nlh)
  21. return -EMSGSIZE;
  22. rep = nlmsg_data(nlh);
  23. rep->vdiag_family = AF_VSOCK;
  24. /* Lock order dictates that sk_lock is acquired before
  25. * vsock_table_lock, so we cannot lock here. Simply don't take
  26. * sk_lock; sk is guaranteed to stay alive since vsock_table_lock is
  27. * held.
  28. */
  29. rep->vdiag_type = sk->sk_type;
  30. rep->vdiag_state = sk->sk_state;
  31. rep->vdiag_shutdown = sk->sk_shutdown;
  32. rep->vdiag_src_cid = vsk->local_addr.svm_cid;
  33. rep->vdiag_src_port = vsk->local_addr.svm_port;
  34. rep->vdiag_dst_cid = vsk->remote_addr.svm_cid;
  35. rep->vdiag_dst_port = vsk->remote_addr.svm_port;
  36. rep->vdiag_ino = sock_i_ino(sk);
  37. sock_diag_save_cookie(sk, rep->vdiag_cookie);
  38. return 0;
  39. }
  40. static int vsock_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  41. {
  42. struct vsock_diag_req *req;
  43. struct vsock_sock *vsk;
  44. unsigned int bucket;
  45. unsigned int last_i;
  46. unsigned int table;
  47. struct net *net;
  48. unsigned int i;
  49. req = nlmsg_data(cb->nlh);
  50. net = sock_net(skb->sk);
  51. /* State saved between calls: */
  52. table = cb->args[0];
  53. bucket = cb->args[1];
  54. i = last_i = cb->args[2];
  55. /* TODO VMCI pending sockets? */
  56. spin_lock_bh(&vsock_table_lock);
  57. /* Bind table (locally created sockets) */
  58. if (table == 0) {
  59. while (bucket < ARRAY_SIZE(vsock_bind_table)) {
  60. struct list_head *head = &vsock_bind_table[bucket];
  61. i = 0;
  62. list_for_each_entry(vsk, head, bound_table) {
  63. struct sock *sk = sk_vsock(vsk);
  64. if (!net_eq(sock_net(sk), net))
  65. continue;
  66. if (i < last_i)
  67. goto next_bind;
  68. if (!(req->vdiag_states & (1 << sk->sk_state)))
  69. goto next_bind;
  70. if (sk_diag_fill(sk, skb,
  71. NETLINK_CB(cb->skb).portid,
  72. cb->nlh->nlmsg_seq,
  73. NLM_F_MULTI) < 0)
  74. goto done;
  75. next_bind:
  76. i++;
  77. }
  78. last_i = 0;
  79. bucket++;
  80. }
  81. table++;
  82. bucket = 0;
  83. }
  84. /* Connected table (accepted connections) */
  85. while (bucket < ARRAY_SIZE(vsock_connected_table)) {
  86. struct list_head *head = &vsock_connected_table[bucket];
  87. i = 0;
  88. list_for_each_entry(vsk, head, connected_table) {
  89. struct sock *sk = sk_vsock(vsk);
  90. /* Skip sockets we've already seen above */
  91. if (__vsock_in_bound_table(vsk))
  92. continue;
  93. if (!net_eq(sock_net(sk), net))
  94. continue;
  95. if (i < last_i)
  96. goto next_connected;
  97. if (!(req->vdiag_states & (1 << sk->sk_state)))
  98. goto next_connected;
  99. if (sk_diag_fill(sk, skb,
  100. NETLINK_CB(cb->skb).portid,
  101. cb->nlh->nlmsg_seq,
  102. NLM_F_MULTI) < 0)
  103. goto done;
  104. next_connected:
  105. i++;
  106. }
  107. last_i = 0;
  108. bucket++;
  109. }
  110. done:
  111. spin_unlock_bh(&vsock_table_lock);
  112. cb->args[0] = table;
  113. cb->args[1] = bucket;
  114. cb->args[2] = i;
  115. return skb->len;
  116. }
  117. static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  118. {
  119. int hdrlen = sizeof(struct vsock_diag_req);
  120. struct net *net = sock_net(skb->sk);
  121. if (nlmsg_len(h) < hdrlen)
  122. return -EINVAL;
  123. if (h->nlmsg_flags & NLM_F_DUMP) {
  124. struct netlink_dump_control c = {
  125. .dump = vsock_diag_dump,
  126. };
  127. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  128. }
  129. return -EOPNOTSUPP;
  130. }
  131. static const struct sock_diag_handler vsock_diag_handler = {
  132. .owner = THIS_MODULE,
  133. .family = AF_VSOCK,
  134. .dump = vsock_diag_handler_dump,
  135. };
  136. static int __init vsock_diag_init(void)
  137. {
  138. return sock_diag_register(&vsock_diag_handler);
  139. }
  140. static void __exit vsock_diag_exit(void)
  141. {
  142. sock_diag_unregister(&vsock_diag_handler);
  143. }
  144. module_init(vsock_diag_init);
  145. module_exit(vsock_diag_exit);
  146. MODULE_LICENSE("GPL");
  147. MODULE_DESCRIPTION("VMware Virtual Sockets monitoring via SOCK_DIAG");
  148. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG,
  149. 40 /* AF_VSOCK */);