rings.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include "netlink.h"
  3. #include "common.h"
  4. struct rings_req_info {
  5. struct ethnl_req_info base;
  6. };
  7. struct rings_reply_data {
  8. struct ethnl_reply_data base;
  9. struct ethtool_ringparam ringparam;
  10. struct kernel_ethtool_ringparam kernel_ringparam;
  11. u32 supported_ring_params;
  12. };
  13. #define RINGS_REPDATA(__reply_base) \
  14. container_of(__reply_base, struct rings_reply_data, base)
  15. const struct nla_policy ethnl_rings_get_policy[] = {
  16. [ETHTOOL_A_RINGS_HEADER] =
  17. NLA_POLICY_NESTED(ethnl_header_policy),
  18. };
  19. static int rings_prepare_data(const struct ethnl_req_info *req_base,
  20. struct ethnl_reply_data *reply_base,
  21. const struct genl_info *info)
  22. {
  23. struct rings_reply_data *data = RINGS_REPDATA(reply_base);
  24. struct net_device *dev = reply_base->dev;
  25. int ret;
  26. if (!dev->ethtool_ops->get_ringparam)
  27. return -EOPNOTSUPP;
  28. data->supported_ring_params = dev->ethtool_ops->supported_ring_params;
  29. ret = ethnl_ops_begin(dev);
  30. if (ret < 0)
  31. return ret;
  32. dev->ethtool_ops->get_ringparam(dev, &data->ringparam,
  33. &data->kernel_ringparam, info->extack);
  34. ethnl_ops_complete(dev);
  35. return 0;
  36. }
  37. static int rings_reply_size(const struct ethnl_req_info *req_base,
  38. const struct ethnl_reply_data *reply_base)
  39. {
  40. return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */
  41. nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */
  42. nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */
  43. nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */
  44. nla_total_size(sizeof(u32)) + /* _RINGS_RX */
  45. nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
  46. nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
  47. nla_total_size(sizeof(u32)) + /* _RINGS_TX */
  48. nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
  49. nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
  50. nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
  51. nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
  52. nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */
  53. nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */
  54. nla_total_size(sizeof(u32)); /* _RINGS_TX_PUSH_BUF_LEN_MAX */
  55. }
  56. static int rings_fill_reply(struct sk_buff *skb,
  57. const struct ethnl_req_info *req_base,
  58. const struct ethnl_reply_data *reply_base)
  59. {
  60. const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
  61. const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam;
  62. const struct ethtool_ringparam *ringparam = &data->ringparam;
  63. u32 supported_ring_params = data->supported_ring_params;
  64. WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED);
  65. if ((ringparam->rx_max_pending &&
  66. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX,
  67. ringparam->rx_max_pending) ||
  68. nla_put_u32(skb, ETHTOOL_A_RINGS_RX,
  69. ringparam->rx_pending))) ||
  70. (ringparam->rx_mini_max_pending &&
  71. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX,
  72. ringparam->rx_mini_max_pending) ||
  73. nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI,
  74. ringparam->rx_mini_pending))) ||
  75. (ringparam->rx_jumbo_max_pending &&
  76. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX,
  77. ringparam->rx_jumbo_max_pending) ||
  78. nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO,
  79. ringparam->rx_jumbo_pending))) ||
  80. (ringparam->tx_max_pending &&
  81. (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX,
  82. ringparam->tx_max_pending) ||
  83. nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
  84. ringparam->tx_pending))) ||
  85. (kr->rx_buf_len &&
  86. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) ||
  87. (kr->tcp_data_split &&
  88. (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
  89. kr->tcp_data_split))) ||
  90. (kr->cqe_size &&
  91. (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
  92. nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) ||
  93. nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push) ||
  94. ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) &&
  95. (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX,
  96. kr->tx_push_buf_max_len) ||
  97. nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN,
  98. kr->tx_push_buf_len))))
  99. return -EMSGSIZE;
  100. return 0;
  101. }
  102. /* RINGS_SET */
  103. const struct nla_policy ethnl_rings_set_policy[] = {
  104. [ETHTOOL_A_RINGS_HEADER] =
  105. NLA_POLICY_NESTED(ethnl_header_policy),
  106. [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 },
  107. [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
  108. [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
  109. [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
  110. [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
  111. [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] =
  112. NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED),
  113. [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
  114. [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
  115. [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
  116. [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 },
  117. };
  118. static int
  119. ethnl_set_rings_validate(struct ethnl_req_info *req_info,
  120. struct genl_info *info)
  121. {
  122. const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
  123. struct nlattr **tb = info->attrs;
  124. if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] &&
  125. !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
  126. NL_SET_ERR_MSG_ATTR(info->extack,
  127. tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
  128. "setting rx buf len not supported");
  129. return -EOPNOTSUPP;
  130. }
  131. if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] &&
  132. !(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) {
  133. NL_SET_ERR_MSG_ATTR(info->extack,
  134. tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT],
  135. "setting TCP data split is not supported");
  136. return -EOPNOTSUPP;
  137. }
  138. if (tb[ETHTOOL_A_RINGS_CQE_SIZE] &&
  139. !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
  140. NL_SET_ERR_MSG_ATTR(info->extack,
  141. tb[ETHTOOL_A_RINGS_CQE_SIZE],
  142. "setting cqe size not supported");
  143. return -EOPNOTSUPP;
  144. }
  145. if (tb[ETHTOOL_A_RINGS_TX_PUSH] &&
  146. !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) {
  147. NL_SET_ERR_MSG_ATTR(info->extack,
  148. tb[ETHTOOL_A_RINGS_TX_PUSH],
  149. "setting tx push not supported");
  150. return -EOPNOTSUPP;
  151. }
  152. if (tb[ETHTOOL_A_RINGS_RX_PUSH] &&
  153. !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) {
  154. NL_SET_ERR_MSG_ATTR(info->extack,
  155. tb[ETHTOOL_A_RINGS_RX_PUSH],
  156. "setting rx push not supported");
  157. return -EOPNOTSUPP;
  158. }
  159. if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] &&
  160. !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) {
  161. NL_SET_ERR_MSG_ATTR(info->extack,
  162. tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN],
  163. "setting tx push buf len is not supported");
  164. return -EOPNOTSUPP;
  165. }
  166. return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
  167. }
  168. static int
  169. ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
  170. {
  171. struct kernel_ethtool_ringparam kernel_ringparam = {};
  172. struct ethtool_ringparam ringparam = {};
  173. struct net_device *dev = req_info->dev;
  174. struct nlattr **tb = info->attrs;
  175. const struct nlattr *err_attr;
  176. bool mod = false;
  177. int ret;
  178. dev->ethtool_ops->get_ringparam(dev, &ringparam,
  179. &kernel_ringparam, info->extack);
  180. ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
  181. ethnl_update_u32(&ringparam.rx_mini_pending,
  182. tb[ETHTOOL_A_RINGS_RX_MINI], &mod);
  183. ethnl_update_u32(&ringparam.rx_jumbo_pending,
  184. tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod);
  185. ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
  186. ethnl_update_u32(&kernel_ringparam.rx_buf_len,
  187. tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
  188. ethnl_update_u8(&kernel_ringparam.tcp_data_split,
  189. tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], &mod);
  190. ethnl_update_u32(&kernel_ringparam.cqe_size,
  191. tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
  192. ethnl_update_u8(&kernel_ringparam.tx_push,
  193. tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
  194. ethnl_update_u8(&kernel_ringparam.rx_push,
  195. tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
  196. ethnl_update_u32(&kernel_ringparam.tx_push_buf_len,
  197. tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod);
  198. if (!mod)
  199. return 0;
  200. /* ensure new ring parameters are within limits */
  201. if (ringparam.rx_pending > ringparam.rx_max_pending)
  202. err_attr = tb[ETHTOOL_A_RINGS_RX];
  203. else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending)
  204. err_attr = tb[ETHTOOL_A_RINGS_RX_MINI];
  205. else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending)
  206. err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO];
  207. else if (ringparam.tx_pending > ringparam.tx_max_pending)
  208. err_attr = tb[ETHTOOL_A_RINGS_TX];
  209. else
  210. err_attr = NULL;
  211. if (err_attr) {
  212. NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
  213. "requested ring size exceeds maximum");
  214. return -EINVAL;
  215. }
  216. if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) {
  217. NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN],
  218. "Requested TX push buffer exceeds the maximum of %u",
  219. kernel_ringparam.tx_push_buf_max_len);
  220. return -EINVAL;
  221. }
  222. ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
  223. &kernel_ringparam, info->extack);
  224. return ret < 0 ? ret : 1;
  225. }
  226. const struct ethnl_request_ops ethnl_rings_request_ops = {
  227. .request_cmd = ETHTOOL_MSG_RINGS_GET,
  228. .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
  229. .hdr_attr = ETHTOOL_A_RINGS_HEADER,
  230. .req_info_size = sizeof(struct rings_req_info),
  231. .reply_data_size = sizeof(struct rings_reply_data),
  232. .prepare_data = rings_prepare_data,
  233. .reply_size = rings_reply_size,
  234. .fill_reply = rings_fill_reply,
  235. .set_validate = ethnl_set_rings_validate,
  236. .set = ethnl_set_rings,
  237. .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF,
  238. };