phy.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2023 Bootlin
  4. *
  5. */
  6. #include "common.h"
  7. #include "netlink.h"
  8. #include <linux/phy.h>
  9. #include <linux/phy_link_topology.h>
  10. #include <linux/sfp.h>
  11. struct phy_req_info {
  12. struct ethnl_req_info base;
  13. struct phy_device_node *pdn;
  14. };
  15. #define PHY_REQINFO(__req_base) \
  16. container_of(__req_base, struct phy_req_info, base)
  17. const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
  18. [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
  19. };
  20. /* Caller holds rtnl */
  21. static ssize_t
  22. ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
  23. struct netlink_ext_ack *extack)
  24. {
  25. struct phy_req_info *req_info = PHY_REQINFO(req_base);
  26. struct phy_device_node *pdn = req_info->pdn;
  27. struct phy_device *phydev = pdn->phy;
  28. size_t size = 0;
  29. ASSERT_RTNL();
  30. /* ETHTOOL_A_PHY_INDEX */
  31. size += nla_total_size(sizeof(u32));
  32. /* ETHTOOL_A_DRVNAME */
  33. if (phydev->drv)
  34. size += nla_total_size(strlen(phydev->drv->name) + 1);
  35. /* ETHTOOL_A_NAME */
  36. size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
  37. /* ETHTOOL_A_PHY_UPSTREAM_TYPE */
  38. size += nla_total_size(sizeof(u32));
  39. if (phy_on_sfp(phydev)) {
  40. const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
  41. /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
  42. if (upstream_sfp_name)
  43. size += nla_total_size(strlen(upstream_sfp_name) + 1);
  44. /* ETHTOOL_A_PHY_UPSTREAM_INDEX */
  45. size += nla_total_size(sizeof(u32));
  46. }
  47. /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
  48. if (phydev->sfp_bus) {
  49. const char *sfp_name = sfp_get_name(phydev->sfp_bus);
  50. if (sfp_name)
  51. size += nla_total_size(strlen(sfp_name) + 1);
  52. }
  53. return size;
  54. }
  55. static int
  56. ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
  57. {
  58. struct phy_req_info *req_info = PHY_REQINFO(req_base);
  59. struct phy_device_node *pdn = req_info->pdn;
  60. struct phy_device *phydev = pdn->phy;
  61. enum phy_upstream ptype;
  62. ptype = pdn->upstream_type;
  63. if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
  64. nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
  65. nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
  66. return -EMSGSIZE;
  67. if (phydev->drv &&
  68. nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
  69. return -EMSGSIZE;
  70. if (ptype == PHY_UPSTREAM_PHY) {
  71. struct phy_device *upstream = pdn->upstream.phydev;
  72. const char *sfp_upstream_name;
  73. /* Parent index */
  74. if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
  75. return -EMSGSIZE;
  76. if (pdn->parent_sfp_bus) {
  77. sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
  78. if (sfp_upstream_name &&
  79. nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
  80. sfp_upstream_name))
  81. return -EMSGSIZE;
  82. }
  83. }
  84. if (phydev->sfp_bus) {
  85. const char *sfp_name = sfp_get_name(phydev->sfp_bus);
  86. if (sfp_name &&
  87. nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
  88. sfp_name))
  89. return -EMSGSIZE;
  90. }
  91. return 0;
  92. }
  93. static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
  94. struct nlattr **tb,
  95. struct netlink_ext_ack *extack)
  96. {
  97. struct phy_link_topology *topo = req_base->dev->link_topo;
  98. struct phy_req_info *req_info = PHY_REQINFO(req_base);
  99. struct phy_device *phydev;
  100. phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER],
  101. extack);
  102. if (!phydev)
  103. return 0;
  104. if (IS_ERR(phydev))
  105. return PTR_ERR(phydev);
  106. if (!topo)
  107. return 0;
  108. req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
  109. return 0;
  110. }
  111. int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
  112. {
  113. struct phy_req_info req_info = {};
  114. struct nlattr **tb = info->attrs;
  115. struct sk_buff *rskb;
  116. void *reply_payload;
  117. int reply_len;
  118. int ret;
  119. ret = ethnl_parse_header_dev_get(&req_info.base,
  120. tb[ETHTOOL_A_PHY_HEADER],
  121. genl_info_net(info), info->extack,
  122. true);
  123. if (ret < 0)
  124. return ret;
  125. rtnl_lock();
  126. ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
  127. if (ret < 0)
  128. goto err_unlock_rtnl;
  129. /* No PHY, return early */
  130. if (!req_info.pdn)
  131. goto err_unlock_rtnl;
  132. ret = ethnl_phy_reply_size(&req_info.base, info->extack);
  133. if (ret < 0)
  134. goto err_unlock_rtnl;
  135. reply_len = ret + ethnl_reply_header_size();
  136. rskb = ethnl_reply_init(reply_len, req_info.base.dev,
  137. ETHTOOL_MSG_PHY_GET_REPLY,
  138. ETHTOOL_A_PHY_HEADER,
  139. info, &reply_payload);
  140. if (!rskb) {
  141. ret = -ENOMEM;
  142. goto err_unlock_rtnl;
  143. }
  144. ret = ethnl_phy_fill_reply(&req_info.base, rskb);
  145. if (ret)
  146. goto err_free_msg;
  147. rtnl_unlock();
  148. ethnl_parse_header_dev_put(&req_info.base);
  149. genlmsg_end(rskb, reply_payload);
  150. return genlmsg_reply(rskb, info);
  151. err_free_msg:
  152. nlmsg_free(rskb);
  153. err_unlock_rtnl:
  154. rtnl_unlock();
  155. ethnl_parse_header_dev_put(&req_info.base);
  156. return ret;
  157. }
  158. struct ethnl_phy_dump_ctx {
  159. struct phy_req_info *phy_req_info;
  160. unsigned long ifindex;
  161. unsigned long phy_index;
  162. };
  163. int ethnl_phy_start(struct netlink_callback *cb)
  164. {
  165. const struct genl_info *info = genl_info_dump(cb);
  166. struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
  167. int ret;
  168. BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
  169. ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
  170. if (!ctx->phy_req_info)
  171. return -ENOMEM;
  172. ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
  173. info->attrs[ETHTOOL_A_PHY_HEADER],
  174. sock_net(cb->skb->sk), cb->extack,
  175. false);
  176. ctx->ifindex = 0;
  177. ctx->phy_index = 0;
  178. if (ret)
  179. kfree(ctx->phy_req_info);
  180. return ret;
  181. }
  182. int ethnl_phy_done(struct netlink_callback *cb)
  183. {
  184. struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
  185. if (ctx->phy_req_info->base.dev)
  186. ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
  187. kfree(ctx->phy_req_info);
  188. return 0;
  189. }
  190. static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
  191. struct netlink_callback *cb)
  192. {
  193. struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
  194. struct phy_req_info *pri = ctx->phy_req_info;
  195. struct phy_device_node *pdn;
  196. int ret = 0;
  197. void *ehdr;
  198. if (!dev->link_topo)
  199. return 0;
  200. xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
  201. ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
  202. if (!ehdr) {
  203. ret = -EMSGSIZE;
  204. break;
  205. }
  206. ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
  207. if (ret < 0) {
  208. genlmsg_cancel(skb, ehdr);
  209. break;
  210. }
  211. pri->pdn = pdn;
  212. ret = ethnl_phy_fill_reply(&pri->base, skb);
  213. if (ret < 0) {
  214. genlmsg_cancel(skb, ehdr);
  215. break;
  216. }
  217. genlmsg_end(skb, ehdr);
  218. }
  219. return ret;
  220. }
  221. int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  222. {
  223. struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
  224. struct net *net = sock_net(skb->sk);
  225. struct net_device *dev;
  226. int ret = 0;
  227. rtnl_lock();
  228. if (ctx->phy_req_info->base.dev) {
  229. ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
  230. } else {
  231. for_each_netdev_dump(net, dev, ctx->ifindex) {
  232. ret = ethnl_phy_dump_one_dev(skb, dev, cb);
  233. if (ret)
  234. break;
  235. ctx->phy_index = 0;
  236. }
  237. }
  238. rtnl_unlock();
  239. return ret;
  240. }