mm.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2022-2023 NXP
  4. */
  5. #include "common.h"
  6. #include "netlink.h"
  7. struct mm_req_info {
  8. struct ethnl_req_info base;
  9. };
  10. struct mm_reply_data {
  11. struct ethnl_reply_data base;
  12. struct ethtool_mm_state state;
  13. struct ethtool_mm_stats stats;
  14. };
  15. #define MM_REPDATA(__reply_base) \
  16. container_of(__reply_base, struct mm_reply_data, base)
  17. #define ETHTOOL_MM_STAT_CNT \
  18. (__ETHTOOL_A_MM_STAT_CNT - (ETHTOOL_A_MM_STAT_PAD + 1))
  19. const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1] = {
  20. [ETHTOOL_A_MM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_stats),
  21. };
  22. static int mm_prepare_data(const struct ethnl_req_info *req_base,
  23. struct ethnl_reply_data *reply_base,
  24. const struct genl_info *info)
  25. {
  26. struct mm_reply_data *data = MM_REPDATA(reply_base);
  27. struct net_device *dev = reply_base->dev;
  28. const struct ethtool_ops *ops;
  29. int ret;
  30. ops = dev->ethtool_ops;
  31. if (!ops->get_mm)
  32. return -EOPNOTSUPP;
  33. ethtool_stats_init((u64 *)&data->stats,
  34. sizeof(data->stats) / sizeof(u64));
  35. ret = ethnl_ops_begin(dev);
  36. if (ret < 0)
  37. return ret;
  38. ret = ops->get_mm(dev, &data->state);
  39. if (ret)
  40. goto out_complete;
  41. if (ops->get_mm_stats && (req_base->flags & ETHTOOL_FLAG_STATS))
  42. ops->get_mm_stats(dev, &data->stats);
  43. out_complete:
  44. ethnl_ops_complete(dev);
  45. return ret;
  46. }
  47. static int mm_reply_size(const struct ethnl_req_info *req_base,
  48. const struct ethnl_reply_data *reply_base)
  49. {
  50. int len = 0;
  51. len += nla_total_size(sizeof(u8)); /* _MM_PMAC_ENABLED */
  52. len += nla_total_size(sizeof(u8)); /* _MM_TX_ENABLED */
  53. len += nla_total_size(sizeof(u8)); /* _MM_TX_ACTIVE */
  54. len += nla_total_size(sizeof(u8)); /* _MM_VERIFY_ENABLED */
  55. len += nla_total_size(sizeof(u8)); /* _MM_VERIFY_STATUS */
  56. len += nla_total_size(sizeof(u32)); /* _MM_VERIFY_TIME */
  57. len += nla_total_size(sizeof(u32)); /* _MM_MAX_VERIFY_TIME */
  58. len += nla_total_size(sizeof(u32)); /* _MM_TX_MIN_FRAG_SIZE */
  59. len += nla_total_size(sizeof(u32)); /* _MM_RX_MIN_FRAG_SIZE */
  60. if (req_base->flags & ETHTOOL_FLAG_STATS)
  61. len += nla_total_size(0) + /* _MM_STATS */
  62. nla_total_size_64bit(sizeof(u64)) * ETHTOOL_MM_STAT_CNT;
  63. return len;
  64. }
  65. static int mm_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
  66. {
  67. if (val == ETHTOOL_STAT_NOT_SET)
  68. return 0;
  69. if (nla_put_u64_64bit(skb, attrtype, val, ETHTOOL_A_MM_STAT_PAD))
  70. return -EMSGSIZE;
  71. return 0;
  72. }
  73. static int mm_put_stats(struct sk_buff *skb,
  74. const struct ethtool_mm_stats *stats)
  75. {
  76. struct nlattr *nest;
  77. nest = nla_nest_start(skb, ETHTOOL_A_MM_STATS);
  78. if (!nest)
  79. return -EMSGSIZE;
  80. if (mm_put_stat(skb, stats->MACMergeFrameAssErrorCount,
  81. ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS) ||
  82. mm_put_stat(skb, stats->MACMergeFrameSmdErrorCount,
  83. ETHTOOL_A_MM_STAT_SMD_ERRORS) ||
  84. mm_put_stat(skb, stats->MACMergeFrameAssOkCount,
  85. ETHTOOL_A_MM_STAT_REASSEMBLY_OK) ||
  86. mm_put_stat(skb, stats->MACMergeFragCountRx,
  87. ETHTOOL_A_MM_STAT_RX_FRAG_COUNT) ||
  88. mm_put_stat(skb, stats->MACMergeFragCountTx,
  89. ETHTOOL_A_MM_STAT_TX_FRAG_COUNT) ||
  90. mm_put_stat(skb, stats->MACMergeHoldCount,
  91. ETHTOOL_A_MM_STAT_HOLD_COUNT))
  92. goto err_cancel;
  93. nla_nest_end(skb, nest);
  94. return 0;
  95. err_cancel:
  96. nla_nest_cancel(skb, nest);
  97. return -EMSGSIZE;
  98. }
  99. static int mm_fill_reply(struct sk_buff *skb,
  100. const struct ethnl_req_info *req_base,
  101. const struct ethnl_reply_data *reply_base)
  102. {
  103. const struct mm_reply_data *data = MM_REPDATA(reply_base);
  104. const struct ethtool_mm_state *state = &data->state;
  105. if (nla_put_u8(skb, ETHTOOL_A_MM_TX_ENABLED, state->tx_enabled) ||
  106. nla_put_u8(skb, ETHTOOL_A_MM_TX_ACTIVE, state->tx_active) ||
  107. nla_put_u8(skb, ETHTOOL_A_MM_PMAC_ENABLED, state->pmac_enabled) ||
  108. nla_put_u8(skb, ETHTOOL_A_MM_VERIFY_ENABLED, state->verify_enabled) ||
  109. nla_put_u8(skb, ETHTOOL_A_MM_VERIFY_STATUS, state->verify_status) ||
  110. nla_put_u32(skb, ETHTOOL_A_MM_VERIFY_TIME, state->verify_time) ||
  111. nla_put_u32(skb, ETHTOOL_A_MM_MAX_VERIFY_TIME, state->max_verify_time) ||
  112. nla_put_u32(skb, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, state->tx_min_frag_size) ||
  113. nla_put_u32(skb, ETHTOOL_A_MM_RX_MIN_FRAG_SIZE, state->rx_min_frag_size))
  114. return -EMSGSIZE;
  115. if (req_base->flags & ETHTOOL_FLAG_STATS &&
  116. mm_put_stats(skb, &data->stats))
  117. return -EMSGSIZE;
  118. return 0;
  119. }
  120. const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1] = {
  121. [ETHTOOL_A_MM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
  122. [ETHTOOL_A_MM_VERIFY_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
  123. [ETHTOOL_A_MM_VERIFY_TIME] = NLA_POLICY_RANGE(NLA_U32, 1, 128),
  124. [ETHTOOL_A_MM_TX_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
  125. [ETHTOOL_A_MM_PMAC_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
  126. [ETHTOOL_A_MM_TX_MIN_FRAG_SIZE] = NLA_POLICY_RANGE(NLA_U32, 60, 252),
  127. };
  128. static void mm_state_to_cfg(const struct ethtool_mm_state *state,
  129. struct ethtool_mm_cfg *cfg)
  130. {
  131. /* We could also compare state->verify_status against
  132. * ETHTOOL_MM_VERIFY_STATUS_DISABLED, but state->verify_enabled
  133. * is more like an administrative state which should be seen in
  134. * ETHTOOL_MSG_MM_GET replies. For example, a port with verification
  135. * disabled might be in the ETHTOOL_MM_VERIFY_STATUS_INITIAL
  136. * if it's down.
  137. */
  138. cfg->verify_enabled = state->verify_enabled;
  139. cfg->verify_time = state->verify_time;
  140. cfg->tx_enabled = state->tx_enabled;
  141. cfg->pmac_enabled = state->pmac_enabled;
  142. cfg->tx_min_frag_size = state->tx_min_frag_size;
  143. }
  144. static int
  145. ethnl_set_mm_validate(struct ethnl_req_info *req_info, struct genl_info *info)
  146. {
  147. const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
  148. return ops->get_mm && ops->set_mm ? 1 : -EOPNOTSUPP;
  149. }
  150. static int ethnl_set_mm(struct ethnl_req_info *req_info, struct genl_info *info)
  151. {
  152. struct netlink_ext_ack *extack = info->extack;
  153. struct net_device *dev = req_info->dev;
  154. struct ethtool_mm_state state = {};
  155. struct nlattr **tb = info->attrs;
  156. struct ethtool_mm_cfg cfg = {};
  157. bool mod = false;
  158. int ret;
  159. ret = dev->ethtool_ops->get_mm(dev, &state);
  160. if (ret)
  161. return ret;
  162. mm_state_to_cfg(&state, &cfg);
  163. ethnl_update_bool(&cfg.verify_enabled, tb[ETHTOOL_A_MM_VERIFY_ENABLED],
  164. &mod);
  165. ethnl_update_u32(&cfg.verify_time, tb[ETHTOOL_A_MM_VERIFY_TIME], &mod);
  166. ethnl_update_bool(&cfg.tx_enabled, tb[ETHTOOL_A_MM_TX_ENABLED], &mod);
  167. ethnl_update_bool(&cfg.pmac_enabled, tb[ETHTOOL_A_MM_PMAC_ENABLED],
  168. &mod);
  169. ethnl_update_u32(&cfg.tx_min_frag_size,
  170. tb[ETHTOOL_A_MM_TX_MIN_FRAG_SIZE], &mod);
  171. if (!mod)
  172. return 0;
  173. if (cfg.verify_time > state.max_verify_time) {
  174. NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MM_VERIFY_TIME],
  175. "verifyTime exceeds device maximum");
  176. return -ERANGE;
  177. }
  178. if (cfg.verify_enabled && !cfg.tx_enabled) {
  179. NL_SET_ERR_MSG(extack, "Verification requires TX enabled");
  180. return -EINVAL;
  181. }
  182. if (cfg.tx_enabled && !cfg.pmac_enabled) {
  183. NL_SET_ERR_MSG(extack, "TX enabled requires pMAC enabled");
  184. return -EINVAL;
  185. }
  186. ret = dev->ethtool_ops->set_mm(dev, &cfg, extack);
  187. return ret < 0 ? ret : 1;
  188. }
  189. const struct ethnl_request_ops ethnl_mm_request_ops = {
  190. .request_cmd = ETHTOOL_MSG_MM_GET,
  191. .reply_cmd = ETHTOOL_MSG_MM_GET_REPLY,
  192. .hdr_attr = ETHTOOL_A_MM_HEADER,
  193. .req_info_size = sizeof(struct mm_req_info),
  194. .reply_data_size = sizeof(struct mm_reply_data),
  195. .prepare_data = mm_prepare_data,
  196. .reply_size = mm_reply_size,
  197. .fill_reply = mm_fill_reply,
  198. .set_validate = ethnl_set_mm_validate,
  199. .set = ethnl_set_mm,
  200. .set_ntf_cmd = ETHTOOL_MSG_MM_NTF,
  201. };
  202. /* Returns whether a given device supports the MAC merge layer
  203. * (has an eMAC and a pMAC). Must be called under rtnl_lock() and
  204. * ethnl_ops_begin().
  205. */
  206. bool __ethtool_dev_mm_supported(struct net_device *dev)
  207. {
  208. const struct ethtool_ops *ops = dev->ethtool_ops;
  209. struct ethtool_mm_state state = {};
  210. int ret = -EOPNOTSUPP;
  211. if (ops && ops->get_mm)
  212. ret = ops->get_mm(dev, &state);
  213. return !ret;
  214. }
  215. bool ethtool_dev_mm_supported(struct net_device *dev)
  216. {
  217. const struct ethtool_ops *ops = dev->ethtool_ops;
  218. bool supported;
  219. int ret;
  220. ASSERT_RTNL();
  221. if (!ops)
  222. return false;
  223. ret = ethnl_ops_begin(dev);
  224. if (ret < 0)
  225. return false;
  226. supported = __ethtool_dev_mm_supported(dev);
  227. ethnl_ops_complete(dev);
  228. return supported;
  229. }
  230. EXPORT_SYMBOL_GPL(ethtool_dev_mm_supported);