i40evf_virtchnl.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include "i40evf.h"
  4. #include "i40e_prototype.h"
  5. #include "i40evf_client.h"
  6. /* busy wait delay in msec */
  7. #define I40EVF_BUSY_WAIT_DELAY 10
  8. #define I40EVF_BUSY_WAIT_COUNT 50
  9. /**
  10. * i40evf_send_pf_msg
  11. * @adapter: adapter structure
  12. * @op: virtual channel opcode
  13. * @msg: pointer to message buffer
  14. * @len: message length
  15. *
  16. * Send message to PF and print status if failure.
  17. **/
  18. static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
  19. enum virtchnl_ops op, u8 *msg, u16 len)
  20. {
  21. struct i40e_hw *hw = &adapter->hw;
  22. i40e_status err;
  23. if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
  24. return 0; /* nothing to see here, move along */
  25. err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
  26. if (err)
  27. dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
  28. op, i40evf_stat_str(hw, err),
  29. i40evf_aq_str(hw, hw->aq.asq_last_status));
  30. return err;
  31. }
  32. /**
  33. * i40evf_send_api_ver
  34. * @adapter: adapter structure
  35. *
  36. * Send API version admin queue message to the PF. The reply is not checked
  37. * in this function. Returns 0 if the message was successfully
  38. * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
  39. **/
  40. int i40evf_send_api_ver(struct i40evf_adapter *adapter)
  41. {
  42. struct virtchnl_version_info vvi;
  43. vvi.major = VIRTCHNL_VERSION_MAJOR;
  44. vvi.minor = VIRTCHNL_VERSION_MINOR;
  45. return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
  46. sizeof(vvi));
  47. }
  48. /**
  49. * i40evf_verify_api_ver
  50. * @adapter: adapter structure
  51. *
  52. * Compare API versions with the PF. Must be called after admin queue is
  53. * initialized. Returns 0 if API versions match, -EIO if they do not,
  54. * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
  55. * from the firmware are propagated.
  56. **/
  57. int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
  58. {
  59. struct virtchnl_version_info *pf_vvi;
  60. struct i40e_hw *hw = &adapter->hw;
  61. struct i40e_arq_event_info event;
  62. enum virtchnl_ops op;
  63. i40e_status err;
  64. event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
  65. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  66. if (!event.msg_buf) {
  67. err = -ENOMEM;
  68. goto out;
  69. }
  70. while (1) {
  71. err = i40evf_clean_arq_element(hw, &event, NULL);
  72. /* When the AQ is empty, i40evf_clean_arq_element will return
  73. * nonzero and this loop will terminate.
  74. */
  75. if (err)
  76. goto out_alloc;
  77. op =
  78. (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  79. if (op == VIRTCHNL_OP_VERSION)
  80. break;
  81. }
  82. err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
  83. if (err)
  84. goto out_alloc;
  85. if (op != VIRTCHNL_OP_VERSION) {
  86. dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
  87. op);
  88. err = -EIO;
  89. goto out_alloc;
  90. }
  91. pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
  92. adapter->pf_version = *pf_vvi;
  93. if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
  94. ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
  95. (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
  96. err = -EIO;
  97. out_alloc:
  98. kfree(event.msg_buf);
  99. out:
  100. return err;
  101. }
  102. /**
  103. * i40evf_send_vf_config_msg
  104. * @adapter: adapter structure
  105. *
  106. * Send VF configuration request admin queue message to the PF. The reply
  107. * is not checked in this function. Returns 0 if the message was
  108. * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
  109. **/
  110. int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
  111. {
  112. u32 caps;
  113. caps = VIRTCHNL_VF_OFFLOAD_L2 |
  114. VIRTCHNL_VF_OFFLOAD_RSS_PF |
  115. VIRTCHNL_VF_OFFLOAD_RSS_AQ |
  116. VIRTCHNL_VF_OFFLOAD_RSS_REG |
  117. VIRTCHNL_VF_OFFLOAD_VLAN |
  118. VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
  119. VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
  120. VIRTCHNL_VF_OFFLOAD_ENCAP |
  121. VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
  122. VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
  123. VIRTCHNL_VF_OFFLOAD_ADQ;
  124. adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
  125. adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
  126. if (PF_IS_V11(adapter))
  127. return i40evf_send_pf_msg(adapter,
  128. VIRTCHNL_OP_GET_VF_RESOURCES,
  129. (u8 *)&caps, sizeof(caps));
  130. else
  131. return i40evf_send_pf_msg(adapter,
  132. VIRTCHNL_OP_GET_VF_RESOURCES,
  133. NULL, 0);
  134. }
  135. /**
  136. * i40evf_validate_num_queues
  137. * @adapter: adapter structure
  138. *
  139. * Validate that the number of queues the PF has sent in
  140. * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
  141. **/
  142. static void i40evf_validate_num_queues(struct i40evf_adapter *adapter)
  143. {
  144. if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) {
  145. struct virtchnl_vsi_resource *vsi_res;
  146. int i;
  147. dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
  148. adapter->vf_res->num_queue_pairs,
  149. I40EVF_MAX_REQ_QUEUES);
  150. dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
  151. I40EVF_MAX_REQ_QUEUES);
  152. adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES;
  153. for (i = 0; i < adapter->vf_res->num_vsis; i++) {
  154. vsi_res = &adapter->vf_res->vsi_res[i];
  155. vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES;
  156. }
  157. }
  158. }
  159. /**
  160. * i40evf_get_vf_config
  161. * @adapter: private adapter structure
  162. *
  163. * Get VF configuration from PF and populate hw structure. Must be called after
  164. * admin queue is initialized. Busy waits until response is received from PF,
  165. * with maximum timeout. Response from PF is returned in the buffer for further
  166. * processing by the caller.
  167. **/
  168. int i40evf_get_vf_config(struct i40evf_adapter *adapter)
  169. {
  170. struct i40e_hw *hw = &adapter->hw;
  171. struct i40e_arq_event_info event;
  172. enum virtchnl_ops op;
  173. i40e_status err;
  174. u16 len;
  175. len = sizeof(struct virtchnl_vf_resource) +
  176. I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
  177. event.buf_len = len;
  178. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  179. if (!event.msg_buf) {
  180. err = -ENOMEM;
  181. goto out;
  182. }
  183. while (1) {
  184. /* When the AQ is empty, i40evf_clean_arq_element will return
  185. * nonzero and this loop will terminate.
  186. */
  187. err = i40evf_clean_arq_element(hw, &event, NULL);
  188. if (err)
  189. goto out_alloc;
  190. op =
  191. (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  192. if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
  193. break;
  194. }
  195. err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
  196. memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
  197. /* some PFs send more queues than we should have so validate that
  198. * we aren't getting too many queues
  199. */
  200. if (!err)
  201. i40evf_validate_num_queues(adapter);
  202. i40e_vf_parse_hw_config(hw, adapter->vf_res);
  203. out_alloc:
  204. kfree(event.msg_buf);
  205. out:
  206. return err;
  207. }
  208. /**
  209. * i40evf_configure_queues
  210. * @adapter: adapter structure
  211. *
  212. * Request that the PF set up our (previously allocated) queues.
  213. **/
  214. void i40evf_configure_queues(struct i40evf_adapter *adapter)
  215. {
  216. struct virtchnl_vsi_queue_config_info *vqci;
  217. struct virtchnl_queue_pair_info *vqpi;
  218. int pairs = adapter->num_active_queues;
  219. int i, len, max_frame = I40E_MAX_RXBUFFER;
  220. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  221. /* bail because we already have a command pending */
  222. dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
  223. adapter->current_op);
  224. return;
  225. }
  226. adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
  227. len = sizeof(struct virtchnl_vsi_queue_config_info) +
  228. (sizeof(struct virtchnl_queue_pair_info) * pairs);
  229. vqci = kzalloc(len, GFP_KERNEL);
  230. if (!vqci)
  231. return;
  232. /* Limit maximum frame size when jumbo frames is not enabled */
  233. if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
  234. (adapter->netdev->mtu <= ETH_DATA_LEN))
  235. max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
  236. vqci->vsi_id = adapter->vsi_res->vsi_id;
  237. vqci->num_queue_pairs = pairs;
  238. vqpi = vqci->qpair;
  239. /* Size check is not needed here - HW max is 16 queue pairs, and we
  240. * can fit info for 31 of them into the AQ buffer before it overflows.
  241. */
  242. for (i = 0; i < pairs; i++) {
  243. vqpi->txq.vsi_id = vqci->vsi_id;
  244. vqpi->txq.queue_id = i;
  245. vqpi->txq.ring_len = adapter->tx_rings[i].count;
  246. vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
  247. vqpi->rxq.vsi_id = vqci->vsi_id;
  248. vqpi->rxq.queue_id = i;
  249. vqpi->rxq.ring_len = adapter->rx_rings[i].count;
  250. vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
  251. vqpi->rxq.max_pkt_size = max_frame;
  252. vqpi->rxq.databuffer_size =
  253. ALIGN(adapter->rx_rings[i].rx_buf_len,
  254. BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
  255. vqpi++;
  256. }
  257. adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
  258. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
  259. (u8 *)vqci, len);
  260. kfree(vqci);
  261. }
  262. /**
  263. * i40evf_enable_queues
  264. * @adapter: adapter structure
  265. *
  266. * Request that the PF enable all of our queues.
  267. **/
  268. void i40evf_enable_queues(struct i40evf_adapter *adapter)
  269. {
  270. struct virtchnl_queue_select vqs;
  271. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  272. /* bail because we already have a command pending */
  273. dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
  274. adapter->current_op);
  275. return;
  276. }
  277. adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
  278. vqs.vsi_id = adapter->vsi_res->vsi_id;
  279. vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
  280. vqs.rx_queues = vqs.tx_queues;
  281. adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
  282. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
  283. (u8 *)&vqs, sizeof(vqs));
  284. }
  285. /**
  286. * i40evf_disable_queues
  287. * @adapter: adapter structure
  288. *
  289. * Request that the PF disable all of our queues.
  290. **/
  291. void i40evf_disable_queues(struct i40evf_adapter *adapter)
  292. {
  293. struct virtchnl_queue_select vqs;
  294. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  295. /* bail because we already have a command pending */
  296. dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
  297. adapter->current_op);
  298. return;
  299. }
  300. adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
  301. vqs.vsi_id = adapter->vsi_res->vsi_id;
  302. vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
  303. vqs.rx_queues = vqs.tx_queues;
  304. adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
  305. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
  306. (u8 *)&vqs, sizeof(vqs));
  307. }
  308. /**
  309. * i40evf_map_queues
  310. * @adapter: adapter structure
  311. *
  312. * Request that the PF map queues to interrupt vectors. Misc causes, including
  313. * admin queue, are always mapped to vector 0.
  314. **/
  315. void i40evf_map_queues(struct i40evf_adapter *adapter)
  316. {
  317. struct virtchnl_irq_map_info *vimi;
  318. struct virtchnl_vector_map *vecmap;
  319. int v_idx, q_vectors, len;
  320. struct i40e_q_vector *q_vector;
  321. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  322. /* bail because we already have a command pending */
  323. dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
  324. adapter->current_op);
  325. return;
  326. }
  327. adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
  328. q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  329. len = sizeof(struct virtchnl_irq_map_info) +
  330. (adapter->num_msix_vectors *
  331. sizeof(struct virtchnl_vector_map));
  332. vimi = kzalloc(len, GFP_KERNEL);
  333. if (!vimi)
  334. return;
  335. vimi->num_vectors = adapter->num_msix_vectors;
  336. /* Queue vectors first */
  337. for (v_idx = 0; v_idx < q_vectors; v_idx++) {
  338. q_vector = &adapter->q_vectors[v_idx];
  339. vecmap = &vimi->vecmap[v_idx];
  340. vecmap->vsi_id = adapter->vsi_res->vsi_id;
  341. vecmap->vector_id = v_idx + NONQ_VECS;
  342. vecmap->txq_map = q_vector->ring_mask;
  343. vecmap->rxq_map = q_vector->ring_mask;
  344. vecmap->rxitr_idx = I40E_RX_ITR;
  345. vecmap->txitr_idx = I40E_TX_ITR;
  346. }
  347. /* Misc vector last - this is only for AdminQ messages */
  348. vecmap = &vimi->vecmap[v_idx];
  349. vecmap->vsi_id = adapter->vsi_res->vsi_id;
  350. vecmap->vector_id = 0;
  351. vecmap->txq_map = 0;
  352. vecmap->rxq_map = 0;
  353. adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
  354. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
  355. (u8 *)vimi, len);
  356. kfree(vimi);
  357. }
  358. /**
  359. * i40evf_request_queues
  360. * @adapter: adapter structure
  361. * @num: number of requested queues
  362. *
  363. * We get a default number of queues from the PF. This enables us to request a
  364. * different number. Returns 0 on success, negative on failure
  365. **/
  366. int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
  367. {
  368. struct virtchnl_vf_res_request vfres;
  369. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  370. /* bail because we already have a command pending */
  371. dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
  372. adapter->current_op);
  373. return -EBUSY;
  374. }
  375. vfres.num_queue_pairs = num;
  376. adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
  377. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  378. return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
  379. (u8 *)&vfres, sizeof(vfres));
  380. }
  381. /**
  382. * i40evf_add_ether_addrs
  383. * @adapter: adapter structure
  384. *
  385. * Request that the PF add one or more addresses to our filters.
  386. **/
  387. void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
  388. {
  389. struct virtchnl_ether_addr_list *veal;
  390. int len, i = 0, count = 0;
  391. struct i40evf_mac_filter *f;
  392. bool more = false;
  393. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  394. /* bail because we already have a command pending */
  395. dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
  396. adapter->current_op);
  397. return;
  398. }
  399. spin_lock_bh(&adapter->mac_vlan_list_lock);
  400. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  401. if (f->add)
  402. count++;
  403. }
  404. if (!count) {
  405. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
  406. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  407. return;
  408. }
  409. adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
  410. len = sizeof(struct virtchnl_ether_addr_list) +
  411. (count * sizeof(struct virtchnl_ether_addr));
  412. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  413. dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
  414. count = (I40EVF_MAX_AQ_BUF_SIZE -
  415. sizeof(struct virtchnl_ether_addr_list)) /
  416. sizeof(struct virtchnl_ether_addr);
  417. len = sizeof(struct virtchnl_ether_addr_list) +
  418. (count * sizeof(struct virtchnl_ether_addr));
  419. more = true;
  420. }
  421. veal = kzalloc(len, GFP_ATOMIC);
  422. if (!veal) {
  423. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  424. return;
  425. }
  426. veal->vsi_id = adapter->vsi_res->vsi_id;
  427. veal->num_elements = count;
  428. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  429. if (f->add) {
  430. ether_addr_copy(veal->list[i].addr, f->macaddr);
  431. i++;
  432. f->add = false;
  433. if (i == count)
  434. break;
  435. }
  436. }
  437. if (!more)
  438. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
  439. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  440. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
  441. (u8 *)veal, len);
  442. kfree(veal);
  443. }
  444. /**
  445. * i40evf_del_ether_addrs
  446. * @adapter: adapter structure
  447. *
  448. * Request that the PF remove one or more addresses from our filters.
  449. **/
  450. void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
  451. {
  452. struct virtchnl_ether_addr_list *veal;
  453. struct i40evf_mac_filter *f, *ftmp;
  454. int len, i = 0, count = 0;
  455. bool more = false;
  456. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  457. /* bail because we already have a command pending */
  458. dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
  459. adapter->current_op);
  460. return;
  461. }
  462. spin_lock_bh(&adapter->mac_vlan_list_lock);
  463. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  464. if (f->remove)
  465. count++;
  466. }
  467. if (!count) {
  468. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  469. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  470. return;
  471. }
  472. adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
  473. len = sizeof(struct virtchnl_ether_addr_list) +
  474. (count * sizeof(struct virtchnl_ether_addr));
  475. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  476. dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
  477. count = (I40EVF_MAX_AQ_BUF_SIZE -
  478. sizeof(struct virtchnl_ether_addr_list)) /
  479. sizeof(struct virtchnl_ether_addr);
  480. len = sizeof(struct virtchnl_ether_addr_list) +
  481. (count * sizeof(struct virtchnl_ether_addr));
  482. more = true;
  483. }
  484. veal = kzalloc(len, GFP_ATOMIC);
  485. if (!veal) {
  486. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  487. return;
  488. }
  489. veal->vsi_id = adapter->vsi_res->vsi_id;
  490. veal->num_elements = count;
  491. list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
  492. if (f->remove) {
  493. ether_addr_copy(veal->list[i].addr, f->macaddr);
  494. i++;
  495. list_del(&f->list);
  496. kfree(f);
  497. if (i == count)
  498. break;
  499. }
  500. }
  501. if (!more)
  502. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  503. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  504. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
  505. (u8 *)veal, len);
  506. kfree(veal);
  507. }
  508. /**
  509. * i40evf_add_vlans
  510. * @adapter: adapter structure
  511. *
  512. * Request that the PF add one or more VLAN filters to our VSI.
  513. **/
  514. void i40evf_add_vlans(struct i40evf_adapter *adapter)
  515. {
  516. struct virtchnl_vlan_filter_list *vvfl;
  517. int len, i = 0, count = 0;
  518. struct i40evf_vlan_filter *f;
  519. bool more = false;
  520. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  521. /* bail because we already have a command pending */
  522. dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
  523. adapter->current_op);
  524. return;
  525. }
  526. spin_lock_bh(&adapter->mac_vlan_list_lock);
  527. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  528. if (f->add)
  529. count++;
  530. }
  531. if (!count) {
  532. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
  533. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  534. return;
  535. }
  536. adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
  537. len = sizeof(struct virtchnl_vlan_filter_list) +
  538. (count * sizeof(u16));
  539. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  540. dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
  541. count = (I40EVF_MAX_AQ_BUF_SIZE -
  542. sizeof(struct virtchnl_vlan_filter_list)) /
  543. sizeof(u16);
  544. len = sizeof(struct virtchnl_vlan_filter_list) +
  545. (count * sizeof(u16));
  546. more = true;
  547. }
  548. vvfl = kzalloc(len, GFP_ATOMIC);
  549. if (!vvfl) {
  550. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  551. return;
  552. }
  553. vvfl->vsi_id = adapter->vsi_res->vsi_id;
  554. vvfl->num_elements = count;
  555. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  556. if (f->add) {
  557. vvfl->vlan_id[i] = f->vlan;
  558. i++;
  559. f->add = false;
  560. if (i == count)
  561. break;
  562. }
  563. }
  564. if (!more)
  565. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
  566. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  567. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
  568. kfree(vvfl);
  569. }
  570. /**
  571. * i40evf_del_vlans
  572. * @adapter: adapter structure
  573. *
  574. * Request that the PF remove one or more VLAN filters from our VSI.
  575. **/
  576. void i40evf_del_vlans(struct i40evf_adapter *adapter)
  577. {
  578. struct virtchnl_vlan_filter_list *vvfl;
  579. struct i40evf_vlan_filter *f, *ftmp;
  580. int len, i = 0, count = 0;
  581. bool more = false;
  582. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  583. /* bail because we already have a command pending */
  584. dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
  585. adapter->current_op);
  586. return;
  587. }
  588. spin_lock_bh(&adapter->mac_vlan_list_lock);
  589. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  590. if (f->remove)
  591. count++;
  592. }
  593. if (!count) {
  594. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
  595. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  596. return;
  597. }
  598. adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
  599. len = sizeof(struct virtchnl_vlan_filter_list) +
  600. (count * sizeof(u16));
  601. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  602. dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
  603. count = (I40EVF_MAX_AQ_BUF_SIZE -
  604. sizeof(struct virtchnl_vlan_filter_list)) /
  605. sizeof(u16);
  606. len = sizeof(struct virtchnl_vlan_filter_list) +
  607. (count * sizeof(u16));
  608. more = true;
  609. }
  610. vvfl = kzalloc(len, GFP_ATOMIC);
  611. if (!vvfl) {
  612. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  613. return;
  614. }
  615. vvfl->vsi_id = adapter->vsi_res->vsi_id;
  616. vvfl->num_elements = count;
  617. list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
  618. if (f->remove) {
  619. vvfl->vlan_id[i] = f->vlan;
  620. i++;
  621. list_del(&f->list);
  622. kfree(f);
  623. if (i == count)
  624. break;
  625. }
  626. }
  627. if (!more)
  628. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
  629. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  630. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
  631. kfree(vvfl);
  632. }
  633. /**
  634. * i40evf_set_promiscuous
  635. * @adapter: adapter structure
  636. * @flags: bitmask to control unicast/multicast promiscuous.
  637. *
  638. * Request that the PF enable promiscuous mode for our VSI.
  639. **/
  640. void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
  641. {
  642. struct virtchnl_promisc_info vpi;
  643. int promisc_all;
  644. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  645. /* bail because we already have a command pending */
  646. dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
  647. adapter->current_op);
  648. return;
  649. }
  650. promisc_all = FLAG_VF_UNICAST_PROMISC |
  651. FLAG_VF_MULTICAST_PROMISC;
  652. if ((flags & promisc_all) == promisc_all) {
  653. adapter->flags |= I40EVF_FLAG_PROMISC_ON;
  654. adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
  655. dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
  656. }
  657. if (flags & FLAG_VF_MULTICAST_PROMISC) {
  658. adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
  659. adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
  660. dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
  661. }
  662. if (!flags) {
  663. adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
  664. I40EVF_FLAG_ALLMULTI_ON);
  665. adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
  666. I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
  667. dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
  668. }
  669. adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
  670. vpi.vsi_id = adapter->vsi_res->vsi_id;
  671. vpi.flags = flags;
  672. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
  673. (u8 *)&vpi, sizeof(vpi));
  674. }
  675. /**
  676. * i40evf_request_stats
  677. * @adapter: adapter structure
  678. *
  679. * Request VSI statistics from PF.
  680. **/
  681. void i40evf_request_stats(struct i40evf_adapter *adapter)
  682. {
  683. struct virtchnl_queue_select vqs;
  684. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  685. /* no error message, this isn't crucial */
  686. return;
  687. }
  688. adapter->current_op = VIRTCHNL_OP_GET_STATS;
  689. vqs.vsi_id = adapter->vsi_res->vsi_id;
  690. /* queue maps are ignored for this message - only the vsi is used */
  691. if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
  692. (u8 *)&vqs, sizeof(vqs)))
  693. /* if the request failed, don't lock out others */
  694. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  695. }
  696. /**
  697. * i40evf_get_hena
  698. * @adapter: adapter structure
  699. *
  700. * Request hash enable capabilities from PF
  701. **/
  702. void i40evf_get_hena(struct i40evf_adapter *adapter)
  703. {
  704. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  705. /* bail because we already have a command pending */
  706. dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
  707. adapter->current_op);
  708. return;
  709. }
  710. adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
  711. adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
  712. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
  713. NULL, 0);
  714. }
  715. /**
  716. * i40evf_set_hena
  717. * @adapter: adapter structure
  718. *
  719. * Request the PF to set our RSS hash capabilities
  720. **/
  721. void i40evf_set_hena(struct i40evf_adapter *adapter)
  722. {
  723. struct virtchnl_rss_hena vrh;
  724. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  725. /* bail because we already have a command pending */
  726. dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
  727. adapter->current_op);
  728. return;
  729. }
  730. vrh.hena = adapter->hena;
  731. adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
  732. adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
  733. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
  734. (u8 *)&vrh, sizeof(vrh));
  735. }
  736. /**
  737. * i40evf_set_rss_key
  738. * @adapter: adapter structure
  739. *
  740. * Request the PF to set our RSS hash key
  741. **/
  742. void i40evf_set_rss_key(struct i40evf_adapter *adapter)
  743. {
  744. struct virtchnl_rss_key *vrk;
  745. int len;
  746. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  747. /* bail because we already have a command pending */
  748. dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
  749. adapter->current_op);
  750. return;
  751. }
  752. len = sizeof(struct virtchnl_rss_key) +
  753. (adapter->rss_key_size * sizeof(u8)) - 1;
  754. vrk = kzalloc(len, GFP_KERNEL);
  755. if (!vrk)
  756. return;
  757. vrk->vsi_id = adapter->vsi.id;
  758. vrk->key_len = adapter->rss_key_size;
  759. memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
  760. adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
  761. adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
  762. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
  763. (u8 *)vrk, len);
  764. kfree(vrk);
  765. }
  766. /**
  767. * i40evf_set_rss_lut
  768. * @adapter: adapter structure
  769. *
  770. * Request the PF to set our RSS lookup table
  771. **/
  772. void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
  773. {
  774. struct virtchnl_rss_lut *vrl;
  775. int len;
  776. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  777. /* bail because we already have a command pending */
  778. dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
  779. adapter->current_op);
  780. return;
  781. }
  782. len = sizeof(struct virtchnl_rss_lut) +
  783. (adapter->rss_lut_size * sizeof(u8)) - 1;
  784. vrl = kzalloc(len, GFP_KERNEL);
  785. if (!vrl)
  786. return;
  787. vrl->vsi_id = adapter->vsi.id;
  788. vrl->lut_entries = adapter->rss_lut_size;
  789. memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
  790. adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
  791. adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
  792. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
  793. (u8 *)vrl, len);
  794. kfree(vrl);
  795. }
  796. /**
  797. * i40evf_enable_vlan_stripping
  798. * @adapter: adapter structure
  799. *
  800. * Request VLAN header stripping to be enabled
  801. **/
  802. void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
  803. {
  804. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  805. /* bail because we already have a command pending */
  806. dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
  807. adapter->current_op);
  808. return;
  809. }
  810. adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
  811. adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
  812. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
  813. NULL, 0);
  814. }
  815. /**
  816. * i40evf_disable_vlan_stripping
  817. * @adapter: adapter structure
  818. *
  819. * Request VLAN header stripping to be disabled
  820. **/
  821. void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
  822. {
  823. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  824. /* bail because we already have a command pending */
  825. dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
  826. adapter->current_op);
  827. return;
  828. }
  829. adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
  830. adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
  831. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
  832. NULL, 0);
  833. }
  834. /**
  835. * i40evf_print_link_message - print link up or down
  836. * @adapter: adapter structure
  837. *
  838. * Log a message telling the world of our wonderous link status
  839. */
  840. static void i40evf_print_link_message(struct i40evf_adapter *adapter)
  841. {
  842. struct net_device *netdev = adapter->netdev;
  843. char *speed = "Unknown ";
  844. if (!adapter->link_up) {
  845. netdev_info(netdev, "NIC Link is Down\n");
  846. return;
  847. }
  848. switch (adapter->link_speed) {
  849. case I40E_LINK_SPEED_40GB:
  850. speed = "40 G";
  851. break;
  852. case I40E_LINK_SPEED_25GB:
  853. speed = "25 G";
  854. break;
  855. case I40E_LINK_SPEED_20GB:
  856. speed = "20 G";
  857. break;
  858. case I40E_LINK_SPEED_10GB:
  859. speed = "10 G";
  860. break;
  861. case I40E_LINK_SPEED_1GB:
  862. speed = "1000 M";
  863. break;
  864. case I40E_LINK_SPEED_100MB:
  865. speed = "100 M";
  866. break;
  867. default:
  868. break;
  869. }
  870. netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
  871. }
  872. /**
  873. * i40evf_enable_channel
  874. * @adapter: adapter structure
  875. *
  876. * Request that the PF enable channels as specified by
  877. * the user via tc tool.
  878. **/
  879. void i40evf_enable_channels(struct i40evf_adapter *adapter)
  880. {
  881. struct virtchnl_tc_info *vti = NULL;
  882. u16 len;
  883. int i;
  884. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  885. /* bail because we already have a command pending */
  886. dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
  887. adapter->current_op);
  888. return;
  889. }
  890. len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
  891. sizeof(struct virtchnl_tc_info);
  892. vti = kzalloc(len, GFP_KERNEL);
  893. if (!vti)
  894. return;
  895. vti->num_tc = adapter->num_tc;
  896. for (i = 0; i < vti->num_tc; i++) {
  897. vti->list[i].count = adapter->ch_config.ch_info[i].count;
  898. vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
  899. vti->list[i].pad = 0;
  900. vti->list[i].max_tx_rate =
  901. adapter->ch_config.ch_info[i].max_tx_rate;
  902. }
  903. adapter->ch_config.state = __I40EVF_TC_RUNNING;
  904. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  905. adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
  906. adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
  907. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
  908. (u8 *)vti, len);
  909. kfree(vti);
  910. }
  911. /**
  912. * i40evf_disable_channel
  913. * @adapter: adapter structure
  914. *
  915. * Request that the PF disable channels that are configured
  916. **/
  917. void i40evf_disable_channels(struct i40evf_adapter *adapter)
  918. {
  919. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  920. /* bail because we already have a command pending */
  921. dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
  922. adapter->current_op);
  923. return;
  924. }
  925. adapter->ch_config.state = __I40EVF_TC_INVALID;
  926. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  927. adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
  928. adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
  929. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
  930. NULL, 0);
  931. }
  932. /**
  933. * i40evf_print_cloud_filter
  934. * @adapter: adapter structure
  935. * @f: cloud filter to print
  936. *
  937. * Print the cloud filter
  938. **/
  939. static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
  940. struct virtchnl_filter *f)
  941. {
  942. switch (f->flow_type) {
  943. case VIRTCHNL_TCP_V4_FLOW:
  944. dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
  945. &f->data.tcp_spec.dst_mac,
  946. &f->data.tcp_spec.src_mac,
  947. ntohs(f->data.tcp_spec.vlan_id),
  948. &f->data.tcp_spec.dst_ip[0],
  949. &f->data.tcp_spec.src_ip[0],
  950. ntohs(f->data.tcp_spec.dst_port),
  951. ntohs(f->data.tcp_spec.src_port));
  952. break;
  953. case VIRTCHNL_TCP_V6_FLOW:
  954. dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
  955. &f->data.tcp_spec.dst_mac,
  956. &f->data.tcp_spec.src_mac,
  957. ntohs(f->data.tcp_spec.vlan_id),
  958. &f->data.tcp_spec.dst_ip,
  959. &f->data.tcp_spec.src_ip,
  960. ntohs(f->data.tcp_spec.dst_port),
  961. ntohs(f->data.tcp_spec.src_port));
  962. break;
  963. }
  964. }
  965. /**
  966. * i40evf_add_cloud_filter
  967. * @adapter: adapter structure
  968. *
  969. * Request that the PF add cloud filters as specified
  970. * by the user via tc tool.
  971. **/
  972. void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
  973. {
  974. struct i40evf_cloud_filter *cf;
  975. struct virtchnl_filter *f;
  976. int len = 0, count = 0;
  977. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  978. /* bail because we already have a command pending */
  979. dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
  980. adapter->current_op);
  981. return;
  982. }
  983. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  984. if (cf->add) {
  985. count++;
  986. break;
  987. }
  988. }
  989. if (!count) {
  990. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
  991. return;
  992. }
  993. adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
  994. len = sizeof(struct virtchnl_filter);
  995. f = kzalloc(len, GFP_KERNEL);
  996. if (!f)
  997. return;
  998. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  999. if (cf->add) {
  1000. memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
  1001. cf->add = false;
  1002. cf->state = __I40EVF_CF_ADD_PENDING;
  1003. i40evf_send_pf_msg(adapter,
  1004. VIRTCHNL_OP_ADD_CLOUD_FILTER,
  1005. (u8 *)f, len);
  1006. }
  1007. }
  1008. kfree(f);
  1009. }
  1010. /**
  1011. * i40evf_del_cloud_filter
  1012. * @adapter: adapter structure
  1013. *
  1014. * Request that the PF delete cloud filters as specified
  1015. * by the user via tc tool.
  1016. **/
  1017. void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
  1018. {
  1019. struct i40evf_cloud_filter *cf, *cftmp;
  1020. struct virtchnl_filter *f;
  1021. int len = 0, count = 0;
  1022. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  1023. /* bail because we already have a command pending */
  1024. dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
  1025. adapter->current_op);
  1026. return;
  1027. }
  1028. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1029. if (cf->del) {
  1030. count++;
  1031. break;
  1032. }
  1033. }
  1034. if (!count) {
  1035. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
  1036. return;
  1037. }
  1038. adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
  1039. len = sizeof(struct virtchnl_filter);
  1040. f = kzalloc(len, GFP_KERNEL);
  1041. if (!f)
  1042. return;
  1043. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
  1044. if (cf->del) {
  1045. memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
  1046. cf->del = false;
  1047. cf->state = __I40EVF_CF_DEL_PENDING;
  1048. i40evf_send_pf_msg(adapter,
  1049. VIRTCHNL_OP_DEL_CLOUD_FILTER,
  1050. (u8 *)f, len);
  1051. }
  1052. }
  1053. kfree(f);
  1054. }
  1055. /**
  1056. * i40evf_request_reset
  1057. * @adapter: adapter structure
  1058. *
  1059. * Request that the PF reset this VF. No response is expected.
  1060. **/
  1061. void i40evf_request_reset(struct i40evf_adapter *adapter)
  1062. {
  1063. /* Don't check CURRENT_OP - this is always higher priority */
  1064. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
  1065. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1066. }
  1067. /**
  1068. * i40evf_virtchnl_completion
  1069. * @adapter: adapter structure
  1070. * @v_opcode: opcode sent by PF
  1071. * @v_retval: retval sent by PF
  1072. * @msg: message sent by PF
  1073. * @msglen: message length
  1074. *
  1075. * Asynchronous completion function for admin queue messages. Rather than busy
  1076. * wait, we fire off our requests and assume that no errors will be returned.
  1077. * This function handles the reply messages.
  1078. **/
  1079. void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
  1080. enum virtchnl_ops v_opcode,
  1081. i40e_status v_retval,
  1082. u8 *msg, u16 msglen)
  1083. {
  1084. struct net_device *netdev = adapter->netdev;
  1085. if (v_opcode == VIRTCHNL_OP_EVENT) {
  1086. struct virtchnl_pf_event *vpe =
  1087. (struct virtchnl_pf_event *)msg;
  1088. bool link_up = vpe->event_data.link_event.link_status;
  1089. switch (vpe->event) {
  1090. case VIRTCHNL_EVENT_LINK_CHANGE:
  1091. adapter->link_speed =
  1092. vpe->event_data.link_event.link_speed;
  1093. /* we've already got the right link status, bail */
  1094. if (adapter->link_up == link_up)
  1095. break;
  1096. if (link_up) {
  1097. /* If we get link up message and start queues
  1098. * before our queues are configured it will
  1099. * trigger a TX hang. In that case, just ignore
  1100. * the link status message,we'll get another one
  1101. * after we enable queues and actually prepared
  1102. * to send traffic.
  1103. */
  1104. if (adapter->state != __I40EVF_RUNNING)
  1105. break;
  1106. /* For ADq enabled VF, we reconfigure VSIs and
  1107. * re-allocate queues. Hence wait till all
  1108. * queues are enabled.
  1109. */
  1110. if (adapter->flags &
  1111. I40EVF_FLAG_QUEUES_DISABLED)
  1112. break;
  1113. }
  1114. adapter->link_up = link_up;
  1115. if (link_up) {
  1116. netif_tx_start_all_queues(netdev);
  1117. netif_carrier_on(netdev);
  1118. } else {
  1119. netif_tx_stop_all_queues(netdev);
  1120. netif_carrier_off(netdev);
  1121. }
  1122. i40evf_print_link_message(adapter);
  1123. break;
  1124. case VIRTCHNL_EVENT_RESET_IMPENDING:
  1125. dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
  1126. if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
  1127. adapter->flags |= I40EVF_FLAG_RESET_PENDING;
  1128. dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
  1129. schedule_work(&adapter->reset_task);
  1130. }
  1131. break;
  1132. default:
  1133. dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
  1134. vpe->event);
  1135. break;
  1136. }
  1137. return;
  1138. }
  1139. if (v_retval) {
  1140. switch (v_opcode) {
  1141. case VIRTCHNL_OP_ADD_VLAN:
  1142. dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
  1143. i40evf_stat_str(&adapter->hw, v_retval));
  1144. break;
  1145. case VIRTCHNL_OP_ADD_ETH_ADDR:
  1146. dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
  1147. i40evf_stat_str(&adapter->hw, v_retval));
  1148. break;
  1149. case VIRTCHNL_OP_DEL_VLAN:
  1150. dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
  1151. i40evf_stat_str(&adapter->hw, v_retval));
  1152. break;
  1153. case VIRTCHNL_OP_DEL_ETH_ADDR:
  1154. dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
  1155. i40evf_stat_str(&adapter->hw, v_retval));
  1156. break;
  1157. case VIRTCHNL_OP_ENABLE_CHANNELS:
  1158. dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
  1159. i40evf_stat_str(&adapter->hw, v_retval));
  1160. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1161. adapter->ch_config.state = __I40EVF_TC_INVALID;
  1162. netdev_reset_tc(netdev);
  1163. netif_tx_start_all_queues(netdev);
  1164. break;
  1165. case VIRTCHNL_OP_DISABLE_CHANNELS:
  1166. dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
  1167. i40evf_stat_str(&adapter->hw, v_retval));
  1168. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1169. adapter->ch_config.state = __I40EVF_TC_RUNNING;
  1170. netif_tx_start_all_queues(netdev);
  1171. break;
  1172. case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
  1173. struct i40evf_cloud_filter *cf, *cftmp;
  1174. list_for_each_entry_safe(cf, cftmp,
  1175. &adapter->cloud_filter_list,
  1176. list) {
  1177. if (cf->state == __I40EVF_CF_ADD_PENDING) {
  1178. cf->state = __I40EVF_CF_INVALID;
  1179. dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
  1180. i40evf_stat_str(&adapter->hw,
  1181. v_retval));
  1182. i40evf_print_cloud_filter(adapter,
  1183. &cf->f);
  1184. list_del(&cf->list);
  1185. kfree(cf);
  1186. adapter->num_cloud_filters--;
  1187. }
  1188. }
  1189. }
  1190. break;
  1191. case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
  1192. struct i40evf_cloud_filter *cf;
  1193. list_for_each_entry(cf, &adapter->cloud_filter_list,
  1194. list) {
  1195. if (cf->state == __I40EVF_CF_DEL_PENDING) {
  1196. cf->state = __I40EVF_CF_ACTIVE;
  1197. dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
  1198. i40evf_stat_str(&adapter->hw,
  1199. v_retval));
  1200. i40evf_print_cloud_filter(adapter,
  1201. &cf->f);
  1202. }
  1203. }
  1204. }
  1205. break;
  1206. default:
  1207. dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
  1208. v_retval,
  1209. i40evf_stat_str(&adapter->hw, v_retval),
  1210. v_opcode);
  1211. }
  1212. }
  1213. switch (v_opcode) {
  1214. case VIRTCHNL_OP_GET_STATS: {
  1215. struct i40e_eth_stats *stats =
  1216. (struct i40e_eth_stats *)msg;
  1217. netdev->stats.rx_packets = stats->rx_unicast +
  1218. stats->rx_multicast +
  1219. stats->rx_broadcast;
  1220. netdev->stats.tx_packets = stats->tx_unicast +
  1221. stats->tx_multicast +
  1222. stats->tx_broadcast;
  1223. netdev->stats.rx_bytes = stats->rx_bytes;
  1224. netdev->stats.tx_bytes = stats->tx_bytes;
  1225. netdev->stats.tx_errors = stats->tx_errors;
  1226. netdev->stats.rx_dropped = stats->rx_discards;
  1227. netdev->stats.tx_dropped = stats->tx_discards;
  1228. adapter->current_stats = *stats;
  1229. }
  1230. break;
  1231. case VIRTCHNL_OP_GET_VF_RESOURCES: {
  1232. u16 len = sizeof(struct virtchnl_vf_resource) +
  1233. I40E_MAX_VF_VSI *
  1234. sizeof(struct virtchnl_vsi_resource);
  1235. memcpy(adapter->vf_res, msg, min(msglen, len));
  1236. i40evf_validate_num_queues(adapter);
  1237. i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
  1238. /* restore current mac address */
  1239. ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
  1240. i40evf_process_config(adapter);
  1241. }
  1242. break;
  1243. case VIRTCHNL_OP_ENABLE_QUEUES:
  1244. /* enable transmits */
  1245. i40evf_irq_enable(adapter, true);
  1246. adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
  1247. break;
  1248. case VIRTCHNL_OP_DISABLE_QUEUES:
  1249. i40evf_free_all_tx_resources(adapter);
  1250. i40evf_free_all_rx_resources(adapter);
  1251. if (adapter->state == __I40EVF_DOWN_PENDING) {
  1252. adapter->state = __I40EVF_DOWN;
  1253. wake_up(&adapter->down_waitqueue);
  1254. }
  1255. break;
  1256. case VIRTCHNL_OP_VERSION:
  1257. case VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1258. /* Don't display an error if we get these out of sequence.
  1259. * If the firmware needed to get kicked, we'll get these and
  1260. * it's no problem.
  1261. */
  1262. if (v_opcode != adapter->current_op)
  1263. return;
  1264. break;
  1265. case VIRTCHNL_OP_IWARP:
  1266. /* Gobble zero-length replies from the PF. They indicate that
  1267. * a previous message was received OK, and the client doesn't
  1268. * care about that.
  1269. */
  1270. if (msglen && CLIENT_ENABLED(adapter))
  1271. i40evf_notify_client_message(&adapter->vsi,
  1272. msg, msglen);
  1273. break;
  1274. case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
  1275. adapter->client_pending &=
  1276. ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
  1277. break;
  1278. case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
  1279. struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
  1280. if (msglen == sizeof(*vrh))
  1281. adapter->hena = vrh->hena;
  1282. else
  1283. dev_warn(&adapter->pdev->dev,
  1284. "Invalid message %d from PF\n", v_opcode);
  1285. }
  1286. break;
  1287. case VIRTCHNL_OP_REQUEST_QUEUES: {
  1288. struct virtchnl_vf_res_request *vfres =
  1289. (struct virtchnl_vf_res_request *)msg;
  1290. if (vfres->num_queue_pairs != adapter->num_req_queues) {
  1291. dev_info(&adapter->pdev->dev,
  1292. "Requested %d queues, PF can support %d\n",
  1293. adapter->num_req_queues,
  1294. vfres->num_queue_pairs);
  1295. adapter->num_req_queues = 0;
  1296. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1297. }
  1298. }
  1299. break;
  1300. case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
  1301. struct i40evf_cloud_filter *cf;
  1302. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1303. if (cf->state == __I40EVF_CF_ADD_PENDING)
  1304. cf->state = __I40EVF_CF_ACTIVE;
  1305. }
  1306. }
  1307. break;
  1308. case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
  1309. struct i40evf_cloud_filter *cf, *cftmp;
  1310. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
  1311. list) {
  1312. if (cf->state == __I40EVF_CF_DEL_PENDING) {
  1313. cf->state = __I40EVF_CF_INVALID;
  1314. list_del(&cf->list);
  1315. kfree(cf);
  1316. adapter->num_cloud_filters--;
  1317. }
  1318. }
  1319. }
  1320. break;
  1321. default:
  1322. if (adapter->current_op && (v_opcode != adapter->current_op))
  1323. dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
  1324. adapter->current_op, v_opcode);
  1325. break;
  1326. } /* switch v_opcode */
  1327. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1328. }