device.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Management Component Transport Protocol (MCTP) - device implementation.
  4. *
  5. * Copyright (c) 2021 Code Construct
  6. * Copyright (c) 2021 Google
  7. */
  8. #include <linux/if_arp.h>
  9. #include <linux/if_link.h>
  10. #include <linux/mctp.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/rcupdate.h>
  13. #include <linux/rtnetlink.h>
  14. #include <net/addrconf.h>
  15. #include <net/netlink.h>
  16. #include <net/mctp.h>
  17. #include <net/mctpdevice.h>
  18. #include <net/sock.h>
  19. struct mctp_dump_cb {
  20. unsigned long ifindex;
  21. size_t a_idx;
  22. };
  23. /* unlocked: caller must hold rcu_read_lock.
  24. * Returned mctp_dev has its refcount incremented, or NULL if unset.
  25. */
  26. struct mctp_dev *__mctp_dev_get(const struct net_device *dev)
  27. {
  28. struct mctp_dev *mdev = rcu_dereference(dev->mctp_ptr);
  29. /* RCU guarantees that any mdev is still live.
  30. * Zero refcount implies a pending free, return NULL.
  31. */
  32. if (mdev)
  33. if (!refcount_inc_not_zero(&mdev->refs))
  34. return NULL;
  35. return mdev;
  36. }
  37. /* Returned mctp_dev does not have refcount incremented. The returned pointer
  38. * remains live while rtnl_lock is held, as that prevents mctp_unregister()
  39. */
  40. struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
  41. {
  42. return rtnl_dereference(dev->mctp_ptr);
  43. }
  44. static int mctp_addrinfo_size(void)
  45. {
  46. return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
  47. + nla_total_size(1) // IFA_LOCAL
  48. + nla_total_size(1) // IFA_ADDRESS
  49. ;
  50. }
  51. /* flag should be NLM_F_MULTI for dump calls */
  52. static int mctp_fill_addrinfo(struct sk_buff *skb,
  53. struct mctp_dev *mdev, mctp_eid_t eid,
  54. int msg_type, u32 portid, u32 seq, int flag)
  55. {
  56. struct ifaddrmsg *hdr;
  57. struct nlmsghdr *nlh;
  58. nlh = nlmsg_put(skb, portid, seq,
  59. msg_type, sizeof(*hdr), flag);
  60. if (!nlh)
  61. return -EMSGSIZE;
  62. hdr = nlmsg_data(nlh);
  63. hdr->ifa_family = AF_MCTP;
  64. hdr->ifa_prefixlen = 0;
  65. hdr->ifa_flags = 0;
  66. hdr->ifa_scope = 0;
  67. hdr->ifa_index = mdev->dev->ifindex;
  68. if (nla_put_u8(skb, IFA_LOCAL, eid))
  69. goto cancel;
  70. if (nla_put_u8(skb, IFA_ADDRESS, eid))
  71. goto cancel;
  72. nlmsg_end(skb, nlh);
  73. return 0;
  74. cancel:
  75. nlmsg_cancel(skb, nlh);
  76. return -EMSGSIZE;
  77. }
  78. static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb,
  79. struct netlink_callback *cb)
  80. {
  81. struct mctp_dump_cb *mcb = (void *)cb->ctx;
  82. u32 portid, seq;
  83. int rc = 0;
  84. portid = NETLINK_CB(cb->skb).portid;
  85. seq = cb->nlh->nlmsg_seq;
  86. for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) {
  87. rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx],
  88. RTM_NEWADDR, portid, seq, NLM_F_MULTI);
  89. if (rc < 0)
  90. break;
  91. }
  92. return rc;
  93. }
  94. static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
  95. {
  96. struct mctp_dump_cb *mcb = (void *)cb->ctx;
  97. struct net *net = sock_net(skb->sk);
  98. struct net_device *dev;
  99. struct ifaddrmsg *hdr;
  100. struct mctp_dev *mdev;
  101. int ifindex = 0, rc;
  102. /* Filter by ifindex if a header is provided */
  103. if (cb->nlh->nlmsg_len >= nlmsg_msg_size(sizeof(*hdr))) {
  104. hdr = nlmsg_data(cb->nlh);
  105. ifindex = hdr->ifa_index;
  106. } else {
  107. if (cb->strict_check) {
  108. NL_SET_ERR_MSG(cb->extack, "mctp: Invalid header for addr dump request");
  109. return -EINVAL;
  110. }
  111. }
  112. rcu_read_lock();
  113. for_each_netdev_dump(net, dev, mcb->ifindex) {
  114. if (ifindex && ifindex != dev->ifindex)
  115. continue;
  116. mdev = __mctp_dev_get(dev);
  117. if (!mdev)
  118. continue;
  119. rc = mctp_dump_dev_addrinfo(mdev, skb, cb);
  120. mctp_dev_put(mdev);
  121. if (rc < 0)
  122. break;
  123. mcb->a_idx = 0;
  124. }
  125. rcu_read_unlock();
  126. return skb->len;
  127. }
  128. static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type,
  129. struct sk_buff *req_skb, struct nlmsghdr *req_nlh)
  130. {
  131. u32 portid = NETLINK_CB(req_skb).portid;
  132. struct net *net = dev_net(mdev->dev);
  133. struct sk_buff *skb;
  134. int rc = -ENOBUFS;
  135. skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL);
  136. if (!skb)
  137. goto out;
  138. rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type,
  139. portid, req_nlh->nlmsg_seq, 0);
  140. if (rc < 0) {
  141. WARN_ON_ONCE(rc == -EMSGSIZE);
  142. goto out;
  143. }
  144. rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL);
  145. return;
  146. out:
  147. kfree_skb(skb);
  148. rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc);
  149. }
  150. static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = {
  151. [IFA_ADDRESS] = { .type = NLA_U8 },
  152. [IFA_LOCAL] = { .type = NLA_U8 },
  153. };
  154. static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
  155. struct netlink_ext_ack *extack)
  156. {
  157. struct net *net = sock_net(skb->sk);
  158. struct nlattr *tb[IFA_MAX + 1];
  159. struct net_device *dev;
  160. struct mctp_addr *addr;
  161. struct mctp_dev *mdev;
  162. struct ifaddrmsg *ifm;
  163. unsigned long flags;
  164. u8 *tmp_addrs;
  165. int rc;
  166. rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
  167. extack);
  168. if (rc < 0)
  169. return rc;
  170. ifm = nlmsg_data(nlh);
  171. if (tb[IFA_LOCAL])
  172. addr = nla_data(tb[IFA_LOCAL]);
  173. else if (tb[IFA_ADDRESS])
  174. addr = nla_data(tb[IFA_ADDRESS]);
  175. else
  176. return -EINVAL;
  177. /* find device */
  178. dev = __dev_get_by_index(net, ifm->ifa_index);
  179. if (!dev)
  180. return -ENODEV;
  181. mdev = mctp_dev_get_rtnl(dev);
  182. if (!mdev)
  183. return -ENODEV;
  184. if (!mctp_address_unicast(addr->s_addr))
  185. return -EINVAL;
  186. /* Prevent duplicates. Under RTNL so don't need to lock for reading */
  187. if (memchr(mdev->addrs, addr->s_addr, mdev->num_addrs))
  188. return -EEXIST;
  189. tmp_addrs = kmalloc(mdev->num_addrs + 1, GFP_KERNEL);
  190. if (!tmp_addrs)
  191. return -ENOMEM;
  192. memcpy(tmp_addrs, mdev->addrs, mdev->num_addrs);
  193. tmp_addrs[mdev->num_addrs] = addr->s_addr;
  194. /* Lock to write */
  195. spin_lock_irqsave(&mdev->addrs_lock, flags);
  196. mdev->num_addrs++;
  197. swap(mdev->addrs, tmp_addrs);
  198. spin_unlock_irqrestore(&mdev->addrs_lock, flags);
  199. kfree(tmp_addrs);
  200. mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh);
  201. mctp_route_add_local(mdev, addr->s_addr);
  202. return 0;
  203. }
  204. static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
  205. struct netlink_ext_ack *extack)
  206. {
  207. struct net *net = sock_net(skb->sk);
  208. struct nlattr *tb[IFA_MAX + 1];
  209. struct net_device *dev;
  210. struct mctp_addr *addr;
  211. struct mctp_dev *mdev;
  212. struct ifaddrmsg *ifm;
  213. unsigned long flags;
  214. u8 *pos;
  215. int rc;
  216. rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
  217. extack);
  218. if (rc < 0)
  219. return rc;
  220. ifm = nlmsg_data(nlh);
  221. if (tb[IFA_LOCAL])
  222. addr = nla_data(tb[IFA_LOCAL]);
  223. else if (tb[IFA_ADDRESS])
  224. addr = nla_data(tb[IFA_ADDRESS]);
  225. else
  226. return -EINVAL;
  227. /* find device */
  228. dev = __dev_get_by_index(net, ifm->ifa_index);
  229. if (!dev)
  230. return -ENODEV;
  231. mdev = mctp_dev_get_rtnl(dev);
  232. if (!mdev)
  233. return -ENODEV;
  234. pos = memchr(mdev->addrs, addr->s_addr, mdev->num_addrs);
  235. if (!pos)
  236. return -ENOENT;
  237. rc = mctp_route_remove_local(mdev, addr->s_addr);
  238. // we can ignore -ENOENT in the case a route was already removed
  239. if (rc < 0 && rc != -ENOENT)
  240. return rc;
  241. spin_lock_irqsave(&mdev->addrs_lock, flags);
  242. memmove(pos, pos + 1, mdev->num_addrs - 1 - (pos - mdev->addrs));
  243. mdev->num_addrs--;
  244. spin_unlock_irqrestore(&mdev->addrs_lock, flags);
  245. mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh);
  246. return 0;
  247. }
  248. void mctp_dev_hold(struct mctp_dev *mdev)
  249. {
  250. refcount_inc(&mdev->refs);
  251. }
  252. void mctp_dev_put(struct mctp_dev *mdev)
  253. {
  254. if (mdev && refcount_dec_and_test(&mdev->refs)) {
  255. kfree(mdev->addrs);
  256. dev_put(mdev->dev);
  257. kfree_rcu(mdev, rcu);
  258. }
  259. }
  260. void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key)
  261. __must_hold(&key->lock)
  262. {
  263. if (!dev)
  264. return;
  265. if (dev->ops && dev->ops->release_flow)
  266. dev->ops->release_flow(dev, key);
  267. key->dev = NULL;
  268. mctp_dev_put(dev);
  269. }
  270. void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key)
  271. __must_hold(&key->lock)
  272. {
  273. mctp_dev_hold(dev);
  274. key->dev = dev;
  275. }
  276. static struct mctp_dev *mctp_add_dev(struct net_device *dev)
  277. {
  278. struct mctp_dev *mdev;
  279. ASSERT_RTNL();
  280. mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
  281. if (!mdev)
  282. return ERR_PTR(-ENOMEM);
  283. spin_lock_init(&mdev->addrs_lock);
  284. mdev->net = mctp_default_net(dev_net(dev));
  285. /* associate to net_device */
  286. refcount_set(&mdev->refs, 1);
  287. rcu_assign_pointer(dev->mctp_ptr, mdev);
  288. dev_hold(dev);
  289. mdev->dev = dev;
  290. return mdev;
  291. }
  292. static int mctp_fill_link_af(struct sk_buff *skb,
  293. const struct net_device *dev, u32 ext_filter_mask)
  294. {
  295. struct mctp_dev *mdev;
  296. mdev = mctp_dev_get_rtnl(dev);
  297. if (!mdev)
  298. return -ENODATA;
  299. if (nla_put_u32(skb, IFLA_MCTP_NET, mdev->net))
  300. return -EMSGSIZE;
  301. return 0;
  302. }
  303. static size_t mctp_get_link_af_size(const struct net_device *dev,
  304. u32 ext_filter_mask)
  305. {
  306. struct mctp_dev *mdev;
  307. unsigned int ret;
  308. /* caller holds RCU */
  309. mdev = __mctp_dev_get(dev);
  310. if (!mdev)
  311. return 0;
  312. ret = nla_total_size(4); /* IFLA_MCTP_NET */
  313. mctp_dev_put(mdev);
  314. return ret;
  315. }
  316. static const struct nla_policy ifla_af_mctp_policy[IFLA_MCTP_MAX + 1] = {
  317. [IFLA_MCTP_NET] = { .type = NLA_U32 },
  318. };
  319. static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr,
  320. struct netlink_ext_ack *extack)
  321. {
  322. struct nlattr *tb[IFLA_MCTP_MAX + 1];
  323. struct mctp_dev *mdev;
  324. int rc;
  325. rc = nla_parse_nested(tb, IFLA_MCTP_MAX, attr, ifla_af_mctp_policy,
  326. NULL);
  327. if (rc)
  328. return rc;
  329. mdev = mctp_dev_get_rtnl(dev);
  330. if (!mdev)
  331. return 0;
  332. if (tb[IFLA_MCTP_NET])
  333. WRITE_ONCE(mdev->net, nla_get_u32(tb[IFLA_MCTP_NET]));
  334. return 0;
  335. }
  336. /* Matches netdev types that should have MCTP handling */
  337. static bool mctp_known(struct net_device *dev)
  338. {
  339. /* only register specific types (inc. NONE for TUN devices) */
  340. return dev->type == ARPHRD_MCTP ||
  341. dev->type == ARPHRD_LOOPBACK ||
  342. dev->type == ARPHRD_NONE;
  343. }
  344. static void mctp_unregister(struct net_device *dev)
  345. {
  346. struct mctp_dev *mdev;
  347. mdev = mctp_dev_get_rtnl(dev);
  348. if (!mdev)
  349. return;
  350. RCU_INIT_POINTER(mdev->dev->mctp_ptr, NULL);
  351. mctp_route_remove_dev(mdev);
  352. mctp_neigh_remove_dev(mdev);
  353. mctp_dev_put(mdev);
  354. }
  355. static int mctp_register(struct net_device *dev)
  356. {
  357. struct mctp_dev *mdev;
  358. /* Already registered? */
  359. if (rtnl_dereference(dev->mctp_ptr))
  360. return 0;
  361. /* only register specific types */
  362. if (!mctp_known(dev))
  363. return 0;
  364. mdev = mctp_add_dev(dev);
  365. if (IS_ERR(mdev))
  366. return PTR_ERR(mdev);
  367. return 0;
  368. }
  369. static int mctp_dev_notify(struct notifier_block *this, unsigned long event,
  370. void *ptr)
  371. {
  372. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  373. int rc;
  374. switch (event) {
  375. case NETDEV_REGISTER:
  376. rc = mctp_register(dev);
  377. if (rc)
  378. return notifier_from_errno(rc);
  379. break;
  380. case NETDEV_UNREGISTER:
  381. mctp_unregister(dev);
  382. break;
  383. }
  384. return NOTIFY_OK;
  385. }
  386. static int mctp_register_netdevice(struct net_device *dev,
  387. const struct mctp_netdev_ops *ops)
  388. {
  389. struct mctp_dev *mdev;
  390. mdev = mctp_add_dev(dev);
  391. if (IS_ERR(mdev))
  392. return PTR_ERR(mdev);
  393. mdev->ops = ops;
  394. return register_netdevice(dev);
  395. }
  396. int mctp_register_netdev(struct net_device *dev,
  397. const struct mctp_netdev_ops *ops)
  398. {
  399. int rc;
  400. rtnl_lock();
  401. rc = mctp_register_netdevice(dev, ops);
  402. rtnl_unlock();
  403. return rc;
  404. }
  405. EXPORT_SYMBOL_GPL(mctp_register_netdev);
  406. void mctp_unregister_netdev(struct net_device *dev)
  407. {
  408. unregister_netdev(dev);
  409. }
  410. EXPORT_SYMBOL_GPL(mctp_unregister_netdev);
  411. static struct rtnl_af_ops mctp_af_ops = {
  412. .family = AF_MCTP,
  413. .fill_link_af = mctp_fill_link_af,
  414. .get_link_af_size = mctp_get_link_af_size,
  415. .set_link_af = mctp_set_link_af,
  416. };
  417. static struct notifier_block mctp_dev_nb = {
  418. .notifier_call = mctp_dev_notify,
  419. .priority = ADDRCONF_NOTIFY_PRIORITY,
  420. };
  421. static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = {
  422. {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_NEWADDR,
  423. .doit = mctp_rtm_newaddr},
  424. {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_DELADDR,
  425. .doit = mctp_rtm_deladdr},
  426. {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_GETADDR,
  427. .dumpit = mctp_dump_addrinfo},
  428. };
  429. int __init mctp_device_init(void)
  430. {
  431. int err;
  432. register_netdevice_notifier(&mctp_dev_nb);
  433. rtnl_af_register(&mctp_af_ops);
  434. err = rtnl_register_many(mctp_device_rtnl_msg_handlers);
  435. if (err) {
  436. rtnl_af_unregister(&mctp_af_ops);
  437. unregister_netdevice_notifier(&mctp_dev_nb);
  438. }
  439. return err;
  440. }
  441. void __exit mctp_device_exit(void)
  442. {
  443. rtnl_unregister_many(mctp_device_rtnl_msg_handlers);
  444. rtnl_af_unregister(&mctp_af_ops);
  445. unregister_netdevice_notifier(&mctp_dev_nb);
  446. }