af_mctp.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Management Component Transport Protocol (MCTP)
  4. *
  5. * Copyright (c) 2021 Code Construct
  6. * Copyright (c) 2021 Google
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/if_arp.h>
  10. #include <linux/net.h>
  11. #include <linux/mctp.h>
  12. #include <linux/module.h>
  13. #include <linux/socket.h>
  14. #include <net/mctp.h>
  15. #include <net/mctpdevice.h>
  16. #include <net/sock.h>
  17. #define CREATE_TRACE_POINTS
  18. #include <trace/events/mctp.h>
  19. /* socket implementation */
  20. static void mctp_sk_expire_keys(struct timer_list *timer);
  21. static int mctp_release(struct socket *sock)
  22. {
  23. struct sock *sk = sock->sk;
  24. if (sk) {
  25. sock->sk = NULL;
  26. sk->sk_prot->close(sk, 0);
  27. }
  28. return 0;
  29. }
  30. /* Generic sockaddr checks, padding checks only so far */
  31. static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
  32. {
  33. return !addr->__smctp_pad0 && !addr->__smctp_pad1;
  34. }
  35. static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
  36. {
  37. return !addr->__smctp_pad0[0] &&
  38. !addr->__smctp_pad0[1] &&
  39. !addr->__smctp_pad0[2];
  40. }
  41. static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
  42. {
  43. struct sock *sk = sock->sk;
  44. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  45. struct sockaddr_mctp *smctp;
  46. int rc;
  47. if (addrlen < sizeof(*smctp))
  48. return -EINVAL;
  49. if (addr->sa_family != AF_MCTP)
  50. return -EAFNOSUPPORT;
  51. if (!capable(CAP_NET_BIND_SERVICE))
  52. return -EACCES;
  53. /* it's a valid sockaddr for MCTP, cast and do protocol checks */
  54. smctp = (struct sockaddr_mctp *)addr;
  55. if (!mctp_sockaddr_is_ok(smctp))
  56. return -EINVAL;
  57. lock_sock(sk);
  58. /* TODO: allow rebind */
  59. if (sk_hashed(sk)) {
  60. rc = -EADDRINUSE;
  61. goto out_release;
  62. }
  63. msk->bind_net = smctp->smctp_network;
  64. msk->bind_addr = smctp->smctp_addr.s_addr;
  65. msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
  66. rc = sk->sk_prot->hash(sk);
  67. out_release:
  68. release_sock(sk);
  69. return rc;
  70. }
  71. static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  72. {
  73. DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
  74. int rc, addrlen = msg->msg_namelen;
  75. struct sock *sk = sock->sk;
  76. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  77. struct mctp_skb_cb *cb;
  78. struct mctp_route *rt;
  79. struct sk_buff *skb = NULL;
  80. int hlen;
  81. if (addr) {
  82. const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
  83. MCTP_TAG_PREALLOC;
  84. if (addrlen < sizeof(struct sockaddr_mctp))
  85. return -EINVAL;
  86. if (addr->smctp_family != AF_MCTP)
  87. return -EINVAL;
  88. if (!mctp_sockaddr_is_ok(addr))
  89. return -EINVAL;
  90. if (addr->smctp_tag & ~tagbits)
  91. return -EINVAL;
  92. /* can't preallocate a non-owned tag */
  93. if (addr->smctp_tag & MCTP_TAG_PREALLOC &&
  94. !(addr->smctp_tag & MCTP_TAG_OWNER))
  95. return -EINVAL;
  96. } else {
  97. /* TODO: connect()ed sockets */
  98. return -EDESTADDRREQ;
  99. }
  100. if (!capable(CAP_NET_RAW))
  101. return -EACCES;
  102. if (addr->smctp_network == MCTP_NET_ANY)
  103. addr->smctp_network = mctp_default_net(sock_net(sk));
  104. /* direct addressing */
  105. if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
  106. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
  107. extaddr, msg->msg_name);
  108. struct net_device *dev;
  109. rc = -EINVAL;
  110. rcu_read_lock();
  111. dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
  112. /* check for correct halen */
  113. if (dev && extaddr->smctp_halen == dev->addr_len) {
  114. hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
  115. rc = 0;
  116. }
  117. rcu_read_unlock();
  118. if (rc)
  119. goto err_free;
  120. rt = NULL;
  121. } else {
  122. rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
  123. addr->smctp_addr.s_addr);
  124. if (!rt) {
  125. rc = -EHOSTUNREACH;
  126. goto err_free;
  127. }
  128. hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
  129. }
  130. skb = sock_alloc_send_skb(sk, hlen + 1 + len,
  131. msg->msg_flags & MSG_DONTWAIT, &rc);
  132. if (!skb)
  133. return rc;
  134. skb_reserve(skb, hlen);
  135. /* set type as fist byte in payload */
  136. *(u8 *)skb_put(skb, 1) = addr->smctp_type;
  137. rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
  138. if (rc < 0)
  139. goto err_free;
  140. /* set up cb */
  141. cb = __mctp_cb(skb);
  142. cb->net = addr->smctp_network;
  143. if (!rt) {
  144. /* fill extended address in cb */
  145. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
  146. extaddr, msg->msg_name);
  147. if (!mctp_sockaddr_ext_is_ok(extaddr) ||
  148. extaddr->smctp_halen > sizeof(cb->haddr)) {
  149. rc = -EINVAL;
  150. goto err_free;
  151. }
  152. cb->ifindex = extaddr->smctp_ifindex;
  153. /* smctp_halen is checked above */
  154. cb->halen = extaddr->smctp_halen;
  155. memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
  156. }
  157. rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
  158. addr->smctp_tag);
  159. return rc ? : len;
  160. err_free:
  161. kfree_skb(skb);
  162. return rc;
  163. }
  164. static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  165. int flags)
  166. {
  167. DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
  168. struct sock *sk = sock->sk;
  169. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  170. struct sk_buff *skb;
  171. size_t msglen;
  172. u8 type;
  173. int rc;
  174. if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
  175. return -EOPNOTSUPP;
  176. skb = skb_recv_datagram(sk, flags, &rc);
  177. if (!skb)
  178. return rc;
  179. if (!skb->len) {
  180. rc = 0;
  181. goto out_free;
  182. }
  183. /* extract message type, remove from data */
  184. type = *((u8 *)skb->data);
  185. msglen = skb->len - 1;
  186. if (len < msglen)
  187. msg->msg_flags |= MSG_TRUNC;
  188. else
  189. len = msglen;
  190. rc = skb_copy_datagram_msg(skb, 1, msg, len);
  191. if (rc < 0)
  192. goto out_free;
  193. sock_recv_cmsgs(msg, sk, skb);
  194. if (addr) {
  195. struct mctp_skb_cb *cb = mctp_cb(skb);
  196. /* TODO: expand mctp_skb_cb for header fields? */
  197. struct mctp_hdr *hdr = mctp_hdr(skb);
  198. addr = msg->msg_name;
  199. addr->smctp_family = AF_MCTP;
  200. addr->__smctp_pad0 = 0;
  201. addr->smctp_network = cb->net;
  202. addr->smctp_addr.s_addr = hdr->src;
  203. addr->smctp_type = type;
  204. addr->smctp_tag = hdr->flags_seq_tag &
  205. (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
  206. addr->__smctp_pad1 = 0;
  207. msg->msg_namelen = sizeof(*addr);
  208. if (msk->addr_ext) {
  209. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae,
  210. msg->msg_name);
  211. msg->msg_namelen = sizeof(*ae);
  212. ae->smctp_ifindex = cb->ifindex;
  213. ae->smctp_halen = cb->halen;
  214. memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
  215. memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
  216. memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
  217. }
  218. }
  219. rc = len;
  220. if (flags & MSG_TRUNC)
  221. rc = msglen;
  222. out_free:
  223. skb_free_datagram(sk, skb);
  224. return rc;
  225. }
  226. /* We're done with the key; invalidate, stop reassembly, and remove from lists.
  227. */
  228. static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net,
  229. unsigned long flags, unsigned long reason)
  230. __releases(&key->lock)
  231. __must_hold(&net->mctp.keys_lock)
  232. {
  233. struct sk_buff *skb;
  234. trace_mctp_key_release(key, reason);
  235. skb = key->reasm_head;
  236. key->reasm_head = NULL;
  237. key->reasm_dead = true;
  238. key->valid = false;
  239. mctp_dev_release_key(key->dev, key);
  240. spin_unlock_irqrestore(&key->lock, flags);
  241. if (!hlist_unhashed(&key->hlist)) {
  242. hlist_del_init(&key->hlist);
  243. hlist_del_init(&key->sklist);
  244. /* unref for the lists */
  245. mctp_key_unref(key);
  246. }
  247. kfree_skb(skb);
  248. }
  249. static int mctp_setsockopt(struct socket *sock, int level, int optname,
  250. sockptr_t optval, unsigned int optlen)
  251. {
  252. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  253. int val;
  254. if (level != SOL_MCTP)
  255. return -EINVAL;
  256. if (optname == MCTP_OPT_ADDR_EXT) {
  257. if (optlen != sizeof(int))
  258. return -EINVAL;
  259. if (copy_from_sockptr(&val, optval, sizeof(int)))
  260. return -EFAULT;
  261. msk->addr_ext = val;
  262. return 0;
  263. }
  264. return -ENOPROTOOPT;
  265. }
  266. static int mctp_getsockopt(struct socket *sock, int level, int optname,
  267. char __user *optval, int __user *optlen)
  268. {
  269. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  270. int len, val;
  271. if (level != SOL_MCTP)
  272. return -EINVAL;
  273. if (get_user(len, optlen))
  274. return -EFAULT;
  275. if (optname == MCTP_OPT_ADDR_EXT) {
  276. if (len != sizeof(int))
  277. return -EINVAL;
  278. val = !!msk->addr_ext;
  279. if (copy_to_user(optval, &val, len))
  280. return -EFAULT;
  281. return 0;
  282. }
  283. return -EINVAL;
  284. }
  285. /* helpers for reading/writing the tag ioc, handling compatibility across the
  286. * two versions, and some basic API error checking
  287. */
  288. static int mctp_ioctl_tag_copy_from_user(unsigned long arg,
  289. struct mctp_ioc_tag_ctl2 *ctl,
  290. bool tagv2)
  291. {
  292. struct mctp_ioc_tag_ctl ctl_compat;
  293. unsigned long size;
  294. void *ptr;
  295. int rc;
  296. if (tagv2) {
  297. size = sizeof(*ctl);
  298. ptr = ctl;
  299. } else {
  300. size = sizeof(ctl_compat);
  301. ptr = &ctl_compat;
  302. }
  303. rc = copy_from_user(ptr, (void __user *)arg, size);
  304. if (rc)
  305. return -EFAULT;
  306. if (!tagv2) {
  307. /* compat, using defaults for new fields */
  308. ctl->net = MCTP_INITIAL_DEFAULT_NET;
  309. ctl->peer_addr = ctl_compat.peer_addr;
  310. ctl->local_addr = MCTP_ADDR_ANY;
  311. ctl->flags = ctl_compat.flags;
  312. ctl->tag = ctl_compat.tag;
  313. }
  314. if (ctl->flags)
  315. return -EINVAL;
  316. if (ctl->local_addr != MCTP_ADDR_ANY &&
  317. ctl->local_addr != MCTP_ADDR_NULL)
  318. return -EINVAL;
  319. return 0;
  320. }
  321. static int mctp_ioctl_tag_copy_to_user(unsigned long arg,
  322. struct mctp_ioc_tag_ctl2 *ctl,
  323. bool tagv2)
  324. {
  325. struct mctp_ioc_tag_ctl ctl_compat;
  326. unsigned long size;
  327. void *ptr;
  328. int rc;
  329. if (tagv2) {
  330. ptr = ctl;
  331. size = sizeof(*ctl);
  332. } else {
  333. ctl_compat.peer_addr = ctl->peer_addr;
  334. ctl_compat.tag = ctl->tag;
  335. ctl_compat.flags = ctl->flags;
  336. ptr = &ctl_compat;
  337. size = sizeof(ctl_compat);
  338. }
  339. rc = copy_to_user((void __user *)arg, ptr, size);
  340. if (rc)
  341. return -EFAULT;
  342. return 0;
  343. }
  344. static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2,
  345. unsigned long arg)
  346. {
  347. struct net *net = sock_net(&msk->sk);
  348. struct mctp_sk_key *key = NULL;
  349. struct mctp_ioc_tag_ctl2 ctl;
  350. unsigned long flags;
  351. u8 tag;
  352. int rc;
  353. rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
  354. if (rc)
  355. return rc;
  356. if (ctl.tag)
  357. return -EINVAL;
  358. key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY,
  359. ctl.peer_addr, true, &tag);
  360. if (IS_ERR(key))
  361. return PTR_ERR(key);
  362. ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
  363. rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2);
  364. if (rc) {
  365. unsigned long fl2;
  366. /* Unwind our key allocation: the keys list lock needs to be
  367. * taken before the individual key locks, and we need a valid
  368. * flags value (fl2) to pass to __mctp_key_remove, hence the
  369. * second spin_lock_irqsave() rather than a plain spin_lock().
  370. */
  371. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  372. spin_lock_irqsave(&key->lock, fl2);
  373. __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
  374. mctp_key_unref(key);
  375. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  376. return rc;
  377. }
  378. mctp_key_unref(key);
  379. return 0;
  380. }
  381. static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2,
  382. unsigned long arg)
  383. {
  384. struct net *net = sock_net(&msk->sk);
  385. struct mctp_ioc_tag_ctl2 ctl;
  386. unsigned long flags, fl2;
  387. struct mctp_sk_key *key;
  388. struct hlist_node *tmp;
  389. int rc;
  390. u8 tag;
  391. rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
  392. if (rc)
  393. return rc;
  394. /* Must be a local tag, TO set, preallocated */
  395. if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
  396. return -EINVAL;
  397. tag = ctl.tag & MCTP_TAG_MASK;
  398. rc = -EINVAL;
  399. if (ctl.peer_addr == MCTP_ADDR_NULL)
  400. ctl.peer_addr = MCTP_ADDR_ANY;
  401. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  402. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  403. /* we do an irqsave here, even though we know the irq state,
  404. * so we have the flags to pass to __mctp_key_remove
  405. */
  406. spin_lock_irqsave(&key->lock, fl2);
  407. if (key->manual_alloc &&
  408. ctl.net == key->net &&
  409. ctl.peer_addr == key->peer_addr &&
  410. tag == key->tag) {
  411. __mctp_key_remove(key, net, fl2,
  412. MCTP_TRACE_KEY_DROPPED);
  413. rc = 0;
  414. } else {
  415. spin_unlock_irqrestore(&key->lock, fl2);
  416. }
  417. }
  418. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  419. return rc;
  420. }
  421. static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  422. {
  423. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  424. bool tagv2 = false;
  425. switch (cmd) {
  426. case SIOCMCTPALLOCTAG2:
  427. case SIOCMCTPALLOCTAG:
  428. tagv2 = cmd == SIOCMCTPALLOCTAG2;
  429. return mctp_ioctl_alloctag(msk, tagv2, arg);
  430. case SIOCMCTPDROPTAG:
  431. case SIOCMCTPDROPTAG2:
  432. tagv2 = cmd == SIOCMCTPDROPTAG2;
  433. return mctp_ioctl_droptag(msk, tagv2, arg);
  434. }
  435. return -EINVAL;
  436. }
  437. #ifdef CONFIG_COMPAT
  438. static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd,
  439. unsigned long arg)
  440. {
  441. void __user *argp = compat_ptr(arg);
  442. switch (cmd) {
  443. /* These have compatible ptr layouts */
  444. case SIOCMCTPALLOCTAG:
  445. case SIOCMCTPDROPTAG:
  446. return mctp_ioctl(sock, cmd, (unsigned long)argp);
  447. }
  448. return -ENOIOCTLCMD;
  449. }
  450. #endif
  451. static const struct proto_ops mctp_dgram_ops = {
  452. .family = PF_MCTP,
  453. .release = mctp_release,
  454. .bind = mctp_bind,
  455. .connect = sock_no_connect,
  456. .socketpair = sock_no_socketpair,
  457. .accept = sock_no_accept,
  458. .getname = sock_no_getname,
  459. .poll = datagram_poll,
  460. .ioctl = mctp_ioctl,
  461. .gettstamp = sock_gettstamp,
  462. .listen = sock_no_listen,
  463. .shutdown = sock_no_shutdown,
  464. .setsockopt = mctp_setsockopt,
  465. .getsockopt = mctp_getsockopt,
  466. .sendmsg = mctp_sendmsg,
  467. .recvmsg = mctp_recvmsg,
  468. .mmap = sock_no_mmap,
  469. #ifdef CONFIG_COMPAT
  470. .compat_ioctl = mctp_compat_ioctl,
  471. #endif
  472. };
  473. static void mctp_sk_expire_keys(struct timer_list *timer)
  474. {
  475. struct mctp_sock *msk = container_of(timer, struct mctp_sock,
  476. key_expiry);
  477. struct net *net = sock_net(&msk->sk);
  478. unsigned long next_expiry, flags, fl2;
  479. struct mctp_sk_key *key;
  480. struct hlist_node *tmp;
  481. bool next_expiry_valid = false;
  482. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  483. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  484. /* don't expire. manual_alloc is immutable, no locking
  485. * required.
  486. */
  487. if (key->manual_alloc)
  488. continue;
  489. spin_lock_irqsave(&key->lock, fl2);
  490. if (!time_after_eq(key->expiry, jiffies)) {
  491. __mctp_key_remove(key, net, fl2,
  492. MCTP_TRACE_KEY_TIMEOUT);
  493. continue;
  494. }
  495. if (next_expiry_valid) {
  496. if (time_before(key->expiry, next_expiry))
  497. next_expiry = key->expiry;
  498. } else {
  499. next_expiry = key->expiry;
  500. next_expiry_valid = true;
  501. }
  502. spin_unlock_irqrestore(&key->lock, fl2);
  503. }
  504. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  505. if (next_expiry_valid)
  506. mod_timer(timer, next_expiry);
  507. }
  508. static int mctp_sk_init(struct sock *sk)
  509. {
  510. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  511. INIT_HLIST_HEAD(&msk->keys);
  512. timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
  513. return 0;
  514. }
  515. static void mctp_sk_close(struct sock *sk, long timeout)
  516. {
  517. sk_common_release(sk);
  518. }
  519. static int mctp_sk_hash(struct sock *sk)
  520. {
  521. struct net *net = sock_net(sk);
  522. mutex_lock(&net->mctp.bind_lock);
  523. sk_add_node_rcu(sk, &net->mctp.binds);
  524. mutex_unlock(&net->mctp.bind_lock);
  525. return 0;
  526. }
  527. static void mctp_sk_unhash(struct sock *sk)
  528. {
  529. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  530. struct net *net = sock_net(sk);
  531. unsigned long flags, fl2;
  532. struct mctp_sk_key *key;
  533. struct hlist_node *tmp;
  534. /* remove from any type-based binds */
  535. mutex_lock(&net->mctp.bind_lock);
  536. sk_del_node_init_rcu(sk);
  537. mutex_unlock(&net->mctp.bind_lock);
  538. /* remove tag allocations */
  539. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  540. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  541. spin_lock_irqsave(&key->lock, fl2);
  542. __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
  543. }
  544. sock_set_flag(sk, SOCK_DEAD);
  545. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  546. /* Since there are no more tag allocations (we have removed all of the
  547. * keys), stop any pending expiry events. the timer cannot be re-queued
  548. * as the sk is no longer observable
  549. */
  550. del_timer_sync(&msk->key_expiry);
  551. }
  552. static void mctp_sk_destruct(struct sock *sk)
  553. {
  554. skb_queue_purge(&sk->sk_receive_queue);
  555. }
  556. static struct proto mctp_proto = {
  557. .name = "MCTP",
  558. .owner = THIS_MODULE,
  559. .obj_size = sizeof(struct mctp_sock),
  560. .init = mctp_sk_init,
  561. .close = mctp_sk_close,
  562. .hash = mctp_sk_hash,
  563. .unhash = mctp_sk_unhash,
  564. };
  565. static int mctp_pf_create(struct net *net, struct socket *sock,
  566. int protocol, int kern)
  567. {
  568. const struct proto_ops *ops;
  569. struct proto *proto;
  570. struct sock *sk;
  571. int rc;
  572. if (protocol)
  573. return -EPROTONOSUPPORT;
  574. /* only datagram sockets are supported */
  575. if (sock->type != SOCK_DGRAM)
  576. return -ESOCKTNOSUPPORT;
  577. proto = &mctp_proto;
  578. ops = &mctp_dgram_ops;
  579. sock->state = SS_UNCONNECTED;
  580. sock->ops = ops;
  581. sk = sk_alloc(net, PF_MCTP, GFP_KERNEL, proto, kern);
  582. if (!sk)
  583. return -ENOMEM;
  584. sock_init_data(sock, sk);
  585. sk->sk_destruct = mctp_sk_destruct;
  586. rc = 0;
  587. if (sk->sk_prot->init)
  588. rc = sk->sk_prot->init(sk);
  589. if (rc)
  590. goto err_sk_put;
  591. return 0;
  592. err_sk_put:
  593. sock_orphan(sk);
  594. sock_put(sk);
  595. return rc;
  596. }
  597. static struct net_proto_family mctp_pf = {
  598. .family = PF_MCTP,
  599. .create = mctp_pf_create,
  600. .owner = THIS_MODULE,
  601. };
  602. static __init int mctp_init(void)
  603. {
  604. int rc;
  605. /* ensure our uapi tag definitions match the header format */
  606. BUILD_BUG_ON(MCTP_TAG_OWNER != MCTP_HDR_FLAG_TO);
  607. BUILD_BUG_ON(MCTP_TAG_MASK != MCTP_HDR_TAG_MASK);
  608. pr_info("mctp: management component transport protocol core\n");
  609. rc = sock_register(&mctp_pf);
  610. if (rc)
  611. return rc;
  612. rc = proto_register(&mctp_proto, 0);
  613. if (rc)
  614. goto err_unreg_sock;
  615. rc = mctp_routes_init();
  616. if (rc)
  617. goto err_unreg_proto;
  618. rc = mctp_neigh_init();
  619. if (rc)
  620. goto err_unreg_routes;
  621. rc = mctp_device_init();
  622. if (rc)
  623. goto err_unreg_neigh;
  624. return 0;
  625. err_unreg_neigh:
  626. mctp_neigh_exit();
  627. err_unreg_routes:
  628. mctp_routes_exit();
  629. err_unreg_proto:
  630. proto_unregister(&mctp_proto);
  631. err_unreg_sock:
  632. sock_unregister(PF_MCTP);
  633. return rc;
  634. }
  635. static __exit void mctp_exit(void)
  636. {
  637. mctp_device_exit();
  638. mctp_neigh_exit();
  639. mctp_routes_exit();
  640. proto_unregister(&mctp_proto);
  641. sock_unregister(PF_MCTP);
  642. }
  643. subsys_initcall(mctp_init);
  644. module_exit(mctp_exit);
  645. MODULE_DESCRIPTION("MCTP core");
  646. MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
  647. MODULE_ALIAS_NETPROTO(PF_MCTP);