ice_ethtool.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. /* ethtool support for ice */
  4. #include "ice.h"
  5. struct ice_stats {
  6. char stat_string[ETH_GSTRING_LEN];
  7. int sizeof_stat;
  8. int stat_offset;
  9. };
  10. #define ICE_STAT(_type, _name, _stat) { \
  11. .stat_string = _name, \
  12. .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
  13. .stat_offset = offsetof(_type, _stat) \
  14. }
  15. #define ICE_VSI_STAT(_name, _stat) \
  16. ICE_STAT(struct ice_vsi, _name, _stat)
  17. #define ICE_PF_STAT(_name, _stat) \
  18. ICE_STAT(struct ice_pf, _name, _stat)
  19. static int ice_q_stats_len(struct net_device *netdev)
  20. {
  21. struct ice_netdev_priv *np = netdev_priv(netdev);
  22. return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
  23. (sizeof(struct ice_q_stats) / sizeof(u64)));
  24. }
  25. #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
  26. #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
  27. #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
  28. ice_q_stats_len(n))
  29. static const struct ice_stats ice_gstrings_vsi_stats[] = {
  30. ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
  31. ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
  32. ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
  33. ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
  34. ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
  35. ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
  36. ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
  37. ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
  38. ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
  39. ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
  40. ICE_VSI_STAT("tx_linearize", tx_linearize),
  41. ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
  42. ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
  43. ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
  44. };
  45. /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  46. * but they aren't. This device is capable of supporting multiple
  47. * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
  48. * netdevs whereas the PF_STATs are for the physical function that's
  49. * hosting these netdevs.
  50. *
  51. * The PF_STATs are appended to the netdev stats only when ethtool -S
  52. * is queried on the base PF netdev.
  53. */
  54. static struct ice_stats ice_gstrings_pf_stats[] = {
  55. ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
  56. ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
  57. ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
  58. ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
  59. ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
  60. ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
  61. ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
  62. ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
  63. ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
  64. ICE_PF_STAT("tx_size_64", stats.tx_size_64),
  65. ICE_PF_STAT("rx_size_64", stats.rx_size_64),
  66. ICE_PF_STAT("tx_size_127", stats.tx_size_127),
  67. ICE_PF_STAT("rx_size_127", stats.rx_size_127),
  68. ICE_PF_STAT("tx_size_255", stats.tx_size_255),
  69. ICE_PF_STAT("rx_size_255", stats.rx_size_255),
  70. ICE_PF_STAT("tx_size_511", stats.tx_size_511),
  71. ICE_PF_STAT("rx_size_511", stats.rx_size_511),
  72. ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
  73. ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
  74. ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
  75. ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
  76. ICE_PF_STAT("tx_size_big", stats.tx_size_big),
  77. ICE_PF_STAT("rx_size_big", stats.rx_size_big),
  78. ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
  79. ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
  80. ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
  81. ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
  82. ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
  83. ICE_PF_STAT("rx_undersize", stats.rx_undersize),
  84. ICE_PF_STAT("rx_fragments", stats.rx_fragments),
  85. ICE_PF_STAT("rx_oversize", stats.rx_oversize),
  86. ICE_PF_STAT("rx_jabber", stats.rx_jabber),
  87. ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
  88. ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
  89. ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
  90. ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
  91. ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
  92. ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
  93. ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
  94. };
  95. static u32 ice_regs_dump_list[] = {
  96. PFGEN_STATE,
  97. PRTGEN_STATUS,
  98. QRX_CTRL(0),
  99. QINT_TQCTL(0),
  100. QINT_RQCTL(0),
  101. PFINT_OICR_ENA,
  102. QRX_ITR(0),
  103. };
  104. /**
  105. * ice_nvm_version_str - format the NVM version strings
  106. * @hw: ptr to the hardware info
  107. */
  108. static char *ice_nvm_version_str(struct ice_hw *hw)
  109. {
  110. static char buf[ICE_ETHTOOL_FWVER_LEN];
  111. u8 ver, patch;
  112. u32 full_ver;
  113. u16 build;
  114. full_ver = hw->nvm.oem_ver;
  115. ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
  116. build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
  117. ICE_OEM_VER_BUILD_SHIFT);
  118. patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
  119. snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
  120. (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
  121. (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
  122. hw->nvm.eetrack, ver, build, patch);
  123. return buf;
  124. }
  125. static void
  126. ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  127. {
  128. struct ice_netdev_priv *np = netdev_priv(netdev);
  129. struct ice_vsi *vsi = np->vsi;
  130. struct ice_pf *pf = vsi->back;
  131. strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
  132. strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
  133. strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
  134. sizeof(drvinfo->fw_version));
  135. strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
  136. sizeof(drvinfo->bus_info));
  137. }
  138. static int ice_get_regs_len(struct net_device __always_unused *netdev)
  139. {
  140. return sizeof(ice_regs_dump_list);
  141. }
  142. static void
  143. ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
  144. {
  145. struct ice_netdev_priv *np = netdev_priv(netdev);
  146. struct ice_pf *pf = np->vsi->back;
  147. struct ice_hw *hw = &pf->hw;
  148. u32 *regs_buf = (u32 *)p;
  149. int i;
  150. regs->version = 1;
  151. for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
  152. regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
  153. }
  154. static u32 ice_get_msglevel(struct net_device *netdev)
  155. {
  156. struct ice_netdev_priv *np = netdev_priv(netdev);
  157. struct ice_pf *pf = np->vsi->back;
  158. #ifndef CONFIG_DYNAMIC_DEBUG
  159. if (pf->hw.debug_mask)
  160. netdev_info(netdev, "hw debug_mask: 0x%llX\n",
  161. pf->hw.debug_mask);
  162. #endif /* !CONFIG_DYNAMIC_DEBUG */
  163. return pf->msg_enable;
  164. }
  165. static void ice_set_msglevel(struct net_device *netdev, u32 data)
  166. {
  167. struct ice_netdev_priv *np = netdev_priv(netdev);
  168. struct ice_pf *pf = np->vsi->back;
  169. #ifndef CONFIG_DYNAMIC_DEBUG
  170. if (ICE_DBG_USER & data)
  171. pf->hw.debug_mask = data;
  172. else
  173. pf->msg_enable = data;
  174. #else
  175. pf->msg_enable = data;
  176. #endif /* !CONFIG_DYNAMIC_DEBUG */
  177. }
  178. static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  179. {
  180. struct ice_netdev_priv *np = netdev_priv(netdev);
  181. struct ice_vsi *vsi = np->vsi;
  182. char *p = (char *)data;
  183. unsigned int i;
  184. switch (stringset) {
  185. case ETH_SS_STATS:
  186. for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
  187. snprintf(p, ETH_GSTRING_LEN, "%s",
  188. ice_gstrings_vsi_stats[i].stat_string);
  189. p += ETH_GSTRING_LEN;
  190. }
  191. ice_for_each_alloc_txq(vsi, i) {
  192. snprintf(p, ETH_GSTRING_LEN,
  193. "tx-queue-%u.tx_packets", i);
  194. p += ETH_GSTRING_LEN;
  195. snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
  196. p += ETH_GSTRING_LEN;
  197. }
  198. ice_for_each_alloc_rxq(vsi, i) {
  199. snprintf(p, ETH_GSTRING_LEN,
  200. "rx-queue-%u.rx_packets", i);
  201. p += ETH_GSTRING_LEN;
  202. snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
  203. p += ETH_GSTRING_LEN;
  204. }
  205. if (vsi->type != ICE_VSI_PF)
  206. return;
  207. for (i = 0; i < ICE_PF_STATS_LEN; i++) {
  208. snprintf(p, ETH_GSTRING_LEN, "port.%s",
  209. ice_gstrings_pf_stats[i].stat_string);
  210. p += ETH_GSTRING_LEN;
  211. }
  212. break;
  213. default:
  214. break;
  215. }
  216. }
  217. static int ice_get_sset_count(struct net_device *netdev, int sset)
  218. {
  219. switch (sset) {
  220. case ETH_SS_STATS:
  221. /* The number (and order) of strings reported *must* remain
  222. * constant for a given netdevice. This function must not
  223. * report a different number based on run time parameters
  224. * (such as the number of queues in use, or the setting of
  225. * a private ethtool flag). This is due to the nature of the
  226. * ethtool stats API.
  227. *
  228. * User space programs such as ethtool must make 3 separate
  229. * ioctl requests, one for size, one for the strings, and
  230. * finally one for the stats. Since these cross into
  231. * user space, changes to the number or size could result in
  232. * undefined memory access or incorrect string<->value
  233. * correlations for statistics.
  234. *
  235. * Even if it appears to be safe, changes to the size or
  236. * order of strings will suffer from race conditions and are
  237. * not safe.
  238. */
  239. return ICE_ALL_STATS_LEN(netdev);
  240. default:
  241. return -EOPNOTSUPP;
  242. }
  243. }
  244. static void
  245. ice_get_ethtool_stats(struct net_device *netdev,
  246. struct ethtool_stats __always_unused *stats, u64 *data)
  247. {
  248. struct ice_netdev_priv *np = netdev_priv(netdev);
  249. struct ice_vsi *vsi = np->vsi;
  250. struct ice_pf *pf = vsi->back;
  251. struct ice_ring *ring;
  252. unsigned int j = 0;
  253. int i = 0;
  254. char *p;
  255. for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
  256. p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
  257. data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
  258. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  259. }
  260. /* populate per queue stats */
  261. rcu_read_lock();
  262. ice_for_each_alloc_txq(vsi, j) {
  263. ring = READ_ONCE(vsi->tx_rings[j]);
  264. if (ring) {
  265. data[i++] = ring->stats.pkts;
  266. data[i++] = ring->stats.bytes;
  267. } else {
  268. data[i++] = 0;
  269. data[i++] = 0;
  270. }
  271. }
  272. ice_for_each_alloc_rxq(vsi, j) {
  273. ring = READ_ONCE(vsi->rx_rings[j]);
  274. if (ring) {
  275. data[i++] = ring->stats.pkts;
  276. data[i++] = ring->stats.bytes;
  277. } else {
  278. data[i++] = 0;
  279. data[i++] = 0;
  280. }
  281. }
  282. rcu_read_unlock();
  283. if (vsi->type != ICE_VSI_PF)
  284. return;
  285. for (j = 0; j < ICE_PF_STATS_LEN; j++) {
  286. p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
  287. data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
  288. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  289. }
  290. }
  291. static int
  292. ice_get_link_ksettings(struct net_device *netdev,
  293. struct ethtool_link_ksettings *ks)
  294. {
  295. struct ice_netdev_priv *np = netdev_priv(netdev);
  296. struct ice_link_status *hw_link_info;
  297. struct ice_vsi *vsi = np->vsi;
  298. bool link_up;
  299. hw_link_info = &vsi->port_info->phy.link_info;
  300. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  301. ethtool_link_ksettings_add_link_mode(ks, supported,
  302. 10000baseT_Full);
  303. ethtool_link_ksettings_add_link_mode(ks, advertising,
  304. 10000baseT_Full);
  305. /* set speed and duplex */
  306. if (link_up) {
  307. switch (hw_link_info->link_speed) {
  308. case ICE_AQ_LINK_SPEED_100MB:
  309. ks->base.speed = SPEED_100;
  310. break;
  311. case ICE_AQ_LINK_SPEED_2500MB:
  312. ks->base.speed = SPEED_2500;
  313. break;
  314. case ICE_AQ_LINK_SPEED_5GB:
  315. ks->base.speed = SPEED_5000;
  316. break;
  317. case ICE_AQ_LINK_SPEED_10GB:
  318. ks->base.speed = SPEED_10000;
  319. break;
  320. case ICE_AQ_LINK_SPEED_25GB:
  321. ks->base.speed = SPEED_25000;
  322. break;
  323. case ICE_AQ_LINK_SPEED_40GB:
  324. ks->base.speed = SPEED_40000;
  325. break;
  326. default:
  327. ks->base.speed = SPEED_UNKNOWN;
  328. break;
  329. }
  330. ks->base.duplex = DUPLEX_FULL;
  331. } else {
  332. ks->base.speed = SPEED_UNKNOWN;
  333. ks->base.duplex = DUPLEX_UNKNOWN;
  334. }
  335. /* set autoneg settings */
  336. ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
  337. AUTONEG_ENABLE : AUTONEG_DISABLE);
  338. /* set media type settings */
  339. switch (vsi->port_info->phy.media_type) {
  340. case ICE_MEDIA_FIBER:
  341. ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
  342. ks->base.port = PORT_FIBRE;
  343. break;
  344. case ICE_MEDIA_BASET:
  345. ethtool_link_ksettings_add_link_mode(ks, supported, TP);
  346. ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
  347. ks->base.port = PORT_TP;
  348. break;
  349. case ICE_MEDIA_BACKPLANE:
  350. ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
  351. ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
  352. ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
  353. ethtool_link_ksettings_add_link_mode(ks, advertising,
  354. Backplane);
  355. ks->base.port = PORT_NONE;
  356. break;
  357. case ICE_MEDIA_DA:
  358. ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
  359. ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
  360. ks->base.port = PORT_DA;
  361. break;
  362. default:
  363. ks->base.port = PORT_OTHER;
  364. break;
  365. }
  366. /* flow control is symmetric and always supported */
  367. ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
  368. switch (vsi->port_info->fc.req_mode) {
  369. case ICE_FC_FULL:
  370. ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
  371. break;
  372. case ICE_FC_TX_PAUSE:
  373. ethtool_link_ksettings_add_link_mode(ks, advertising,
  374. Asym_Pause);
  375. break;
  376. case ICE_FC_RX_PAUSE:
  377. ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
  378. ethtool_link_ksettings_add_link_mode(ks, advertising,
  379. Asym_Pause);
  380. break;
  381. case ICE_FC_PFC:
  382. default:
  383. ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
  384. ethtool_link_ksettings_del_link_mode(ks, advertising,
  385. Asym_Pause);
  386. break;
  387. }
  388. return 0;
  389. }
  390. /**
  391. * ice_get_rxnfc - command to get RX flow classification rules
  392. * @netdev: network interface device structure
  393. * @cmd: ethtool rxnfc command
  394. * @rule_locs: buffer to rturn Rx flow classification rules
  395. *
  396. * Returns Success if the command is supported.
  397. */
  398. static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  399. u32 __always_unused *rule_locs)
  400. {
  401. struct ice_netdev_priv *np = netdev_priv(netdev);
  402. struct ice_vsi *vsi = np->vsi;
  403. int ret = -EOPNOTSUPP;
  404. switch (cmd->cmd) {
  405. case ETHTOOL_GRXRINGS:
  406. cmd->data = vsi->rss_size;
  407. ret = 0;
  408. break;
  409. default:
  410. break;
  411. }
  412. return ret;
  413. }
  414. static void
  415. ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
  416. {
  417. struct ice_netdev_priv *np = netdev_priv(netdev);
  418. struct ice_vsi *vsi = np->vsi;
  419. ring->rx_max_pending = ICE_MAX_NUM_DESC;
  420. ring->tx_max_pending = ICE_MAX_NUM_DESC;
  421. ring->rx_pending = vsi->rx_rings[0]->count;
  422. ring->tx_pending = vsi->tx_rings[0]->count;
  423. /* Rx mini and jumbo rings are not supported */
  424. ring->rx_mini_max_pending = 0;
  425. ring->rx_jumbo_max_pending = 0;
  426. ring->rx_mini_pending = 0;
  427. ring->rx_jumbo_pending = 0;
  428. }
  429. static int
  430. ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
  431. {
  432. struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
  433. struct ice_netdev_priv *np = netdev_priv(netdev);
  434. struct ice_vsi *vsi = np->vsi;
  435. struct ice_pf *pf = vsi->back;
  436. int i, timeout = 50, err = 0;
  437. u32 new_rx_cnt, new_tx_cnt;
  438. if (ring->tx_pending > ICE_MAX_NUM_DESC ||
  439. ring->tx_pending < ICE_MIN_NUM_DESC ||
  440. ring->rx_pending > ICE_MAX_NUM_DESC ||
  441. ring->rx_pending < ICE_MIN_NUM_DESC) {
  442. netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
  443. ring->tx_pending, ring->rx_pending,
  444. ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
  445. ICE_REQ_DESC_MULTIPLE);
  446. return -EINVAL;
  447. }
  448. new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
  449. if (new_tx_cnt != ring->tx_pending)
  450. netdev_info(netdev,
  451. "Requested Tx descriptor count rounded up to %d\n",
  452. new_tx_cnt);
  453. new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
  454. if (new_rx_cnt != ring->rx_pending)
  455. netdev_info(netdev,
  456. "Requested Rx descriptor count rounded up to %d\n",
  457. new_rx_cnt);
  458. /* if nothing to do return success */
  459. if (new_tx_cnt == vsi->tx_rings[0]->count &&
  460. new_rx_cnt == vsi->rx_rings[0]->count) {
  461. netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
  462. return 0;
  463. }
  464. while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
  465. timeout--;
  466. if (!timeout)
  467. return -EBUSY;
  468. usleep_range(1000, 2000);
  469. }
  470. /* set for the next time the netdev is started */
  471. if (!netif_running(vsi->netdev)) {
  472. for (i = 0; i < vsi->alloc_txq; i++)
  473. vsi->tx_rings[i]->count = new_tx_cnt;
  474. for (i = 0; i < vsi->alloc_rxq; i++)
  475. vsi->rx_rings[i]->count = new_rx_cnt;
  476. netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
  477. goto done;
  478. }
  479. if (new_tx_cnt == vsi->tx_rings[0]->count)
  480. goto process_rx;
  481. /* alloc updated Tx resources */
  482. netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
  483. vsi->tx_rings[0]->count, new_tx_cnt);
  484. tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
  485. sizeof(struct ice_ring), GFP_KERNEL);
  486. if (!tx_rings) {
  487. err = -ENOMEM;
  488. goto done;
  489. }
  490. for (i = 0; i < vsi->alloc_txq; i++) {
  491. /* clone ring and setup updated count */
  492. tx_rings[i] = *vsi->tx_rings[i];
  493. tx_rings[i].count = new_tx_cnt;
  494. tx_rings[i].desc = NULL;
  495. tx_rings[i].tx_buf = NULL;
  496. err = ice_setup_tx_ring(&tx_rings[i]);
  497. if (err) {
  498. while (i) {
  499. i--;
  500. ice_clean_tx_ring(&tx_rings[i]);
  501. }
  502. devm_kfree(&pf->pdev->dev, tx_rings);
  503. goto done;
  504. }
  505. }
  506. process_rx:
  507. if (new_rx_cnt == vsi->rx_rings[0]->count)
  508. goto process_link;
  509. /* alloc updated Rx resources */
  510. netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
  511. vsi->rx_rings[0]->count, new_rx_cnt);
  512. rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
  513. sizeof(struct ice_ring), GFP_KERNEL);
  514. if (!rx_rings) {
  515. err = -ENOMEM;
  516. goto done;
  517. }
  518. for (i = 0; i < vsi->alloc_rxq; i++) {
  519. /* clone ring and setup updated count */
  520. rx_rings[i] = *vsi->rx_rings[i];
  521. rx_rings[i].count = new_rx_cnt;
  522. rx_rings[i].desc = NULL;
  523. rx_rings[i].rx_buf = NULL;
  524. /* this is to allow wr32 to have something to write to
  525. * during early allocation of Rx buffers
  526. */
  527. rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
  528. err = ice_setup_rx_ring(&rx_rings[i]);
  529. if (err)
  530. goto rx_unwind;
  531. /* allocate Rx buffers */
  532. err = ice_alloc_rx_bufs(&rx_rings[i],
  533. ICE_DESC_UNUSED(&rx_rings[i]));
  534. rx_unwind:
  535. if (err) {
  536. while (i) {
  537. i--;
  538. ice_free_rx_ring(&rx_rings[i]);
  539. }
  540. devm_kfree(&pf->pdev->dev, rx_rings);
  541. err = -ENOMEM;
  542. goto free_tx;
  543. }
  544. }
  545. process_link:
  546. /* Bring interface down, copy in the new ring info, then restore the
  547. * interface. if VSI is up, bring it down and then back up
  548. */
  549. if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
  550. ice_down(vsi);
  551. if (tx_rings) {
  552. for (i = 0; i < vsi->alloc_txq; i++) {
  553. ice_free_tx_ring(vsi->tx_rings[i]);
  554. *vsi->tx_rings[i] = tx_rings[i];
  555. }
  556. devm_kfree(&pf->pdev->dev, tx_rings);
  557. }
  558. if (rx_rings) {
  559. for (i = 0; i < vsi->alloc_rxq; i++) {
  560. ice_free_rx_ring(vsi->rx_rings[i]);
  561. /* copy the real tail offset */
  562. rx_rings[i].tail = vsi->rx_rings[i]->tail;
  563. /* this is to fake out the allocation routine
  564. * into thinking it has to realloc everything
  565. * but the recycling logic will let us re-use
  566. * the buffers allocated above
  567. */
  568. rx_rings[i].next_to_use = 0;
  569. rx_rings[i].next_to_clean = 0;
  570. rx_rings[i].next_to_alloc = 0;
  571. *vsi->rx_rings[i] = rx_rings[i];
  572. }
  573. devm_kfree(&pf->pdev->dev, rx_rings);
  574. }
  575. ice_up(vsi);
  576. }
  577. goto done;
  578. free_tx:
  579. /* error cleanup if the Rx allocations failed after getting Tx */
  580. if (tx_rings) {
  581. for (i = 0; i < vsi->alloc_txq; i++)
  582. ice_free_tx_ring(&tx_rings[i]);
  583. devm_kfree(&pf->pdev->dev, tx_rings);
  584. }
  585. done:
  586. clear_bit(__ICE_CFG_BUSY, pf->state);
  587. return err;
  588. }
  589. static int ice_nway_reset(struct net_device *netdev)
  590. {
  591. /* restart autonegotiation */
  592. struct ice_netdev_priv *np = netdev_priv(netdev);
  593. struct ice_link_status *hw_link_info;
  594. struct ice_vsi *vsi = np->vsi;
  595. struct ice_port_info *pi;
  596. enum ice_status status;
  597. bool link_up;
  598. pi = vsi->port_info;
  599. hw_link_info = &pi->phy.link_info;
  600. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  601. status = ice_aq_set_link_restart_an(pi, link_up, NULL);
  602. if (status) {
  603. netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
  604. status, pi->hw->adminq.sq_last_status);
  605. return -EIO;
  606. }
  607. return 0;
  608. }
  609. /**
  610. * ice_get_pauseparam - Get Flow Control status
  611. * @netdev: network interface device structure
  612. * @pause: ethernet pause (flow control) parameters
  613. */
  614. static void
  615. ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  616. {
  617. struct ice_netdev_priv *np = netdev_priv(netdev);
  618. struct ice_port_info *pi;
  619. pi = np->vsi->port_info;
  620. pause->autoneg =
  621. ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
  622. AUTONEG_ENABLE : AUTONEG_DISABLE);
  623. if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
  624. pause->rx_pause = 1;
  625. } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
  626. pause->tx_pause = 1;
  627. } else if (pi->fc.current_mode == ICE_FC_FULL) {
  628. pause->rx_pause = 1;
  629. pause->tx_pause = 1;
  630. }
  631. }
  632. /**
  633. * ice_set_pauseparam - Set Flow Control parameter
  634. * @netdev: network interface device structure
  635. * @pause: return tx/rx flow control status
  636. */
  637. static int
  638. ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  639. {
  640. struct ice_netdev_priv *np = netdev_priv(netdev);
  641. struct ice_link_status *hw_link_info;
  642. struct ice_pf *pf = np->vsi->back;
  643. struct ice_vsi *vsi = np->vsi;
  644. struct ice_hw *hw = &pf->hw;
  645. struct ice_port_info *pi;
  646. enum ice_status status;
  647. u8 aq_failures;
  648. bool link_up;
  649. int err = 0;
  650. pi = vsi->port_info;
  651. hw_link_info = &pi->phy.link_info;
  652. link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
  653. /* Changing the port's flow control is not supported if this isn't the
  654. * PF VSI
  655. */
  656. if (vsi->type != ICE_VSI_PF) {
  657. netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
  658. return -EOPNOTSUPP;
  659. }
  660. if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
  661. netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
  662. return -EOPNOTSUPP;
  663. }
  664. /* If we have link and don't have autoneg */
  665. if (!test_bit(__ICE_DOWN, pf->state) &&
  666. !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
  667. /* Send message that it might not necessarily work*/
  668. netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
  669. }
  670. if (pause->rx_pause && pause->tx_pause)
  671. pi->fc.req_mode = ICE_FC_FULL;
  672. else if (pause->rx_pause && !pause->tx_pause)
  673. pi->fc.req_mode = ICE_FC_RX_PAUSE;
  674. else if (!pause->rx_pause && pause->tx_pause)
  675. pi->fc.req_mode = ICE_FC_TX_PAUSE;
  676. else if (!pause->rx_pause && !pause->tx_pause)
  677. pi->fc.req_mode = ICE_FC_NONE;
  678. else
  679. return -EINVAL;
  680. /* Set the FC mode and only restart AN if link is up */
  681. status = ice_set_fc(pi, &aq_failures, link_up);
  682. if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
  683. netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
  684. status, hw->adminq.sq_last_status);
  685. err = -EAGAIN;
  686. } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
  687. netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
  688. status, hw->adminq.sq_last_status);
  689. err = -EAGAIN;
  690. } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
  691. netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
  692. status, hw->adminq.sq_last_status);
  693. err = -EAGAIN;
  694. }
  695. if (!test_bit(__ICE_DOWN, pf->state)) {
  696. /* Give it a little more time to try to come back. If still
  697. * down, restart autoneg link or reinitialize the interface.
  698. */
  699. msleep(75);
  700. if (!test_bit(__ICE_DOWN, pf->state))
  701. return ice_nway_reset(netdev);
  702. ice_down(vsi);
  703. ice_up(vsi);
  704. }
  705. return err;
  706. }
  707. /**
  708. * ice_get_rxfh_key_size - get the RSS hash key size
  709. * @netdev: network interface device structure
  710. *
  711. * Returns the table size.
  712. */
  713. static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
  714. {
  715. return ICE_VSIQF_HKEY_ARRAY_SIZE;
  716. }
  717. /**
  718. * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
  719. * @netdev: network interface device structure
  720. *
  721. * Returns the table size.
  722. */
  723. static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
  724. {
  725. struct ice_netdev_priv *np = netdev_priv(netdev);
  726. return np->vsi->rss_table_size;
  727. }
  728. /**
  729. * ice_get_rxfh - get the rx flow hash indirection table
  730. * @netdev: network interface device structure
  731. * @indir: indirection table
  732. * @key: hash key
  733. * @hfunc: hash function
  734. *
  735. * Reads the indirection table directly from the hardware.
  736. */
  737. static int
  738. ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
  739. {
  740. struct ice_netdev_priv *np = netdev_priv(netdev);
  741. struct ice_vsi *vsi = np->vsi;
  742. struct ice_pf *pf = vsi->back;
  743. int ret = 0, i;
  744. u8 *lut;
  745. if (hfunc)
  746. *hfunc = ETH_RSS_HASH_TOP;
  747. if (!indir)
  748. return 0;
  749. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  750. /* RSS not supported return error here */
  751. netdev_warn(netdev, "RSS is not configured on this VSI!\n");
  752. return -EIO;
  753. }
  754. lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
  755. if (!lut)
  756. return -ENOMEM;
  757. if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
  758. ret = -EIO;
  759. goto out;
  760. }
  761. for (i = 0; i < vsi->rss_table_size; i++)
  762. indir[i] = (u32)(lut[i]);
  763. out:
  764. devm_kfree(&pf->pdev->dev, lut);
  765. return ret;
  766. }
  767. /**
  768. * ice_set_rxfh - set the rx flow hash indirection table
  769. * @netdev: network interface device structure
  770. * @indir: indirection table
  771. * @key: hash key
  772. * @hfunc: hash function
  773. *
  774. * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  775. * returns 0 after programming the table.
  776. */
  777. static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
  778. const u8 *key, const u8 hfunc)
  779. {
  780. struct ice_netdev_priv *np = netdev_priv(netdev);
  781. struct ice_vsi *vsi = np->vsi;
  782. struct ice_pf *pf = vsi->back;
  783. u8 *seed = NULL;
  784. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  785. return -EOPNOTSUPP;
  786. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  787. /* RSS not supported return error here */
  788. netdev_warn(netdev, "RSS is not configured on this VSI!\n");
  789. return -EIO;
  790. }
  791. if (key) {
  792. if (!vsi->rss_hkey_user) {
  793. vsi->rss_hkey_user =
  794. devm_kzalloc(&pf->pdev->dev,
  795. ICE_VSIQF_HKEY_ARRAY_SIZE,
  796. GFP_KERNEL);
  797. if (!vsi->rss_hkey_user)
  798. return -ENOMEM;
  799. }
  800. memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
  801. seed = vsi->rss_hkey_user;
  802. }
  803. if (!vsi->rss_lut_user) {
  804. vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
  805. vsi->rss_table_size,
  806. GFP_KERNEL);
  807. if (!vsi->rss_lut_user)
  808. return -ENOMEM;
  809. }
  810. /* Each 32 bits pointed by 'indir' is stored with a lut entry */
  811. if (indir) {
  812. int i;
  813. for (i = 0; i < vsi->rss_table_size; i++)
  814. vsi->rss_lut_user[i] = (u8)(indir[i]);
  815. } else {
  816. ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
  817. vsi->rss_size);
  818. }
  819. if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
  820. return -EIO;
  821. return 0;
  822. }
  823. static const struct ethtool_ops ice_ethtool_ops = {
  824. .get_link_ksettings = ice_get_link_ksettings,
  825. .get_drvinfo = ice_get_drvinfo,
  826. .get_regs_len = ice_get_regs_len,
  827. .get_regs = ice_get_regs,
  828. .get_msglevel = ice_get_msglevel,
  829. .set_msglevel = ice_set_msglevel,
  830. .get_link = ethtool_op_get_link,
  831. .get_strings = ice_get_strings,
  832. .get_ethtool_stats = ice_get_ethtool_stats,
  833. .get_sset_count = ice_get_sset_count,
  834. .get_rxnfc = ice_get_rxnfc,
  835. .get_ringparam = ice_get_ringparam,
  836. .set_ringparam = ice_set_ringparam,
  837. .nway_reset = ice_nway_reset,
  838. .get_pauseparam = ice_get_pauseparam,
  839. .set_pauseparam = ice_set_pauseparam,
  840. .get_rxfh_key_size = ice_get_rxfh_key_size,
  841. .get_rxfh_indir_size = ice_get_rxfh_indir_size,
  842. .get_rxfh = ice_get_rxfh,
  843. .set_rxfh = ice_set_rxfh,
  844. };
  845. /**
  846. * ice_set_ethtool_ops - setup netdev ethtool ops
  847. * @netdev: network interface device structure
  848. *
  849. * setup netdev ethtool ops with ice specific ops
  850. */
  851. void ice_set_ethtool_ops(struct net_device *netdev)
  852. {
  853. netdev->ethtool_ops = &ice_ethtool_ops;
  854. }