sock_diag.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* License: GPL */
  2. #include <linux/mutex.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <net/netlink.h>
  6. #include <net/net_namespace.h>
  7. #include <linux/module.h>
  8. #include <net/sock.h>
  9. #include <linux/kernel.h>
  10. #include <linux/tcp.h>
  11. #include <linux/workqueue.h>
  12. #include <linux/nospec.h>
  13. #include <linux/inet_diag.h>
  14. #include <linux/sock_diag.h>
  15. static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
  16. static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
  17. static DEFINE_MUTEX(sock_diag_table_mutex);
  18. static struct workqueue_struct *broadcast_wq;
  19. u64 sock_gen_cookie(struct sock *sk)
  20. {
  21. while (1) {
  22. u64 res = atomic64_read(&sk->sk_cookie);
  23. if (res)
  24. return res;
  25. res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
  26. atomic64_cmpxchg(&sk->sk_cookie, 0, res);
  27. }
  28. }
  29. int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
  30. {
  31. u64 res;
  32. if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
  33. return 0;
  34. res = sock_gen_cookie(sk);
  35. if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
  36. return -ESTALE;
  37. return 0;
  38. }
  39. EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
  40. void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
  41. {
  42. u64 res = sock_gen_cookie(sk);
  43. cookie[0] = (u32)res;
  44. cookie[1] = (u32)(res >> 32);
  45. }
  46. EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
  47. int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
  48. {
  49. u32 mem[SK_MEMINFO_VARS];
  50. sk_get_meminfo(sk, mem);
  51. return nla_put(skb, attrtype, sizeof(mem), &mem);
  52. }
  53. EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
  54. int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
  55. struct sk_buff *skb, int attrtype)
  56. {
  57. struct sock_fprog_kern *fprog;
  58. struct sk_filter *filter;
  59. struct nlattr *attr;
  60. unsigned int flen;
  61. int err = 0;
  62. if (!may_report_filterinfo) {
  63. nla_reserve(skb, attrtype, 0);
  64. return 0;
  65. }
  66. rcu_read_lock();
  67. filter = rcu_dereference(sk->sk_filter);
  68. if (!filter)
  69. goto out;
  70. fprog = filter->prog->orig_prog;
  71. if (!fprog)
  72. goto out;
  73. flen = bpf_classic_proglen(fprog);
  74. attr = nla_reserve(skb, attrtype, flen);
  75. if (attr == NULL) {
  76. err = -EMSGSIZE;
  77. goto out;
  78. }
  79. memcpy(nla_data(attr), fprog->filter, flen);
  80. out:
  81. rcu_read_unlock();
  82. return err;
  83. }
  84. EXPORT_SYMBOL(sock_diag_put_filterinfo);
  85. struct broadcast_sk {
  86. struct sock *sk;
  87. struct work_struct work;
  88. };
  89. static size_t sock_diag_nlmsg_size(void)
  90. {
  91. return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
  92. + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */
  93. + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
  94. }
  95. static void sock_diag_broadcast_destroy_work(struct work_struct *work)
  96. {
  97. struct broadcast_sk *bsk =
  98. container_of(work, struct broadcast_sk, work);
  99. struct sock *sk = bsk->sk;
  100. const struct sock_diag_handler *hndl;
  101. struct sk_buff *skb;
  102. const enum sknetlink_groups group = sock_diag_destroy_group(sk);
  103. int err = -1;
  104. WARN_ON(group == SKNLGRP_NONE);
  105. skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL);
  106. if (!skb)
  107. goto out;
  108. mutex_lock(&sock_diag_table_mutex);
  109. hndl = sock_diag_handlers[sk->sk_family];
  110. if (hndl && hndl->get_info)
  111. err = hndl->get_info(skb, sk);
  112. mutex_unlock(&sock_diag_table_mutex);
  113. if (!err)
  114. nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
  115. GFP_KERNEL);
  116. else
  117. kfree_skb(skb);
  118. out:
  119. sk_destruct(sk);
  120. kfree(bsk);
  121. }
  122. void sock_diag_broadcast_destroy(struct sock *sk)
  123. {
  124. /* Note, this function is often called from an interrupt context. */
  125. struct broadcast_sk *bsk =
  126. kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
  127. if (!bsk)
  128. return sk_destruct(sk);
  129. bsk->sk = sk;
  130. INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work);
  131. queue_work(broadcast_wq, &bsk->work);
  132. }
  133. void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  134. {
  135. mutex_lock(&sock_diag_table_mutex);
  136. inet_rcv_compat = fn;
  137. mutex_unlock(&sock_diag_table_mutex);
  138. }
  139. EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
  140. void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  141. {
  142. mutex_lock(&sock_diag_table_mutex);
  143. inet_rcv_compat = NULL;
  144. mutex_unlock(&sock_diag_table_mutex);
  145. }
  146. EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
  147. int sock_diag_register(const struct sock_diag_handler *hndl)
  148. {
  149. int err = 0;
  150. if (hndl->family >= AF_MAX)
  151. return -EINVAL;
  152. mutex_lock(&sock_diag_table_mutex);
  153. if (sock_diag_handlers[hndl->family])
  154. err = -EBUSY;
  155. else
  156. sock_diag_handlers[hndl->family] = hndl;
  157. mutex_unlock(&sock_diag_table_mutex);
  158. return err;
  159. }
  160. EXPORT_SYMBOL_GPL(sock_diag_register);
  161. void sock_diag_unregister(const struct sock_diag_handler *hnld)
  162. {
  163. int family = hnld->family;
  164. if (family >= AF_MAX)
  165. return;
  166. mutex_lock(&sock_diag_table_mutex);
  167. BUG_ON(sock_diag_handlers[family] != hnld);
  168. sock_diag_handlers[family] = NULL;
  169. mutex_unlock(&sock_diag_table_mutex);
  170. }
  171. EXPORT_SYMBOL_GPL(sock_diag_unregister);
  172. static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
  173. {
  174. int err;
  175. struct sock_diag_req *req = nlmsg_data(nlh);
  176. const struct sock_diag_handler *hndl;
  177. if (nlmsg_len(nlh) < sizeof(*req))
  178. return -EINVAL;
  179. if (req->sdiag_family >= AF_MAX)
  180. return -EINVAL;
  181. req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
  182. if (sock_diag_handlers[req->sdiag_family] == NULL)
  183. sock_load_diag_module(req->sdiag_family, 0);
  184. mutex_lock(&sock_diag_table_mutex);
  185. hndl = sock_diag_handlers[req->sdiag_family];
  186. if (hndl == NULL)
  187. err = -ENOENT;
  188. else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
  189. err = hndl->dump(skb, nlh);
  190. else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
  191. err = hndl->destroy(skb, nlh);
  192. else
  193. err = -EOPNOTSUPP;
  194. mutex_unlock(&sock_diag_table_mutex);
  195. return err;
  196. }
  197. static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
  198. struct netlink_ext_ack *extack)
  199. {
  200. int ret;
  201. switch (nlh->nlmsg_type) {
  202. case TCPDIAG_GETSOCK:
  203. case DCCPDIAG_GETSOCK:
  204. if (inet_rcv_compat == NULL)
  205. sock_load_diag_module(AF_INET, 0);
  206. mutex_lock(&sock_diag_table_mutex);
  207. if (inet_rcv_compat != NULL)
  208. ret = inet_rcv_compat(skb, nlh);
  209. else
  210. ret = -EOPNOTSUPP;
  211. mutex_unlock(&sock_diag_table_mutex);
  212. return ret;
  213. case SOCK_DIAG_BY_FAMILY:
  214. case SOCK_DESTROY:
  215. return __sock_diag_cmd(skb, nlh);
  216. default:
  217. return -EINVAL;
  218. }
  219. }
  220. static DEFINE_MUTEX(sock_diag_mutex);
  221. static void sock_diag_rcv(struct sk_buff *skb)
  222. {
  223. mutex_lock(&sock_diag_mutex);
  224. netlink_rcv_skb(skb, &sock_diag_rcv_msg);
  225. mutex_unlock(&sock_diag_mutex);
  226. }
  227. static int sock_diag_bind(struct net *net, int group)
  228. {
  229. switch (group) {
  230. case SKNLGRP_INET_TCP_DESTROY:
  231. case SKNLGRP_INET_UDP_DESTROY:
  232. if (!sock_diag_handlers[AF_INET])
  233. sock_load_diag_module(AF_INET, 0);
  234. break;
  235. case SKNLGRP_INET6_TCP_DESTROY:
  236. case SKNLGRP_INET6_UDP_DESTROY:
  237. if (!sock_diag_handlers[AF_INET6])
  238. sock_load_diag_module(AF_INET6, 0);
  239. break;
  240. }
  241. return 0;
  242. }
  243. int sock_diag_destroy(struct sock *sk, int err)
  244. {
  245. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  246. return -EPERM;
  247. if (!sk->sk_prot->diag_destroy)
  248. return -EOPNOTSUPP;
  249. return sk->sk_prot->diag_destroy(sk, err);
  250. }
  251. EXPORT_SYMBOL_GPL(sock_diag_destroy);
  252. static int __net_init diag_net_init(struct net *net)
  253. {
  254. struct netlink_kernel_cfg cfg = {
  255. .groups = SKNLGRP_MAX,
  256. .input = sock_diag_rcv,
  257. .bind = sock_diag_bind,
  258. .flags = NL_CFG_F_NONROOT_RECV,
  259. };
  260. net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
  261. return net->diag_nlsk == NULL ? -ENOMEM : 0;
  262. }
  263. static void __net_exit diag_net_exit(struct net *net)
  264. {
  265. netlink_kernel_release(net->diag_nlsk);
  266. net->diag_nlsk = NULL;
  267. }
  268. static struct pernet_operations diag_net_ops = {
  269. .init = diag_net_init,
  270. .exit = diag_net_exit,
  271. };
  272. static int __init sock_diag_init(void)
  273. {
  274. broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0);
  275. BUG_ON(!broadcast_wq);
  276. return register_pernet_subsys(&diag_net_ops);
  277. }
  278. device_initcall(sock_diag_init);