i40iw_main.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ip.h>
  39. #include <linux/tcp.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/addrconf.h>
  42. #include "i40iw.h"
  43. #include "i40iw_register.h"
  44. #include <net/netevent.h>
  45. #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
  46. #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
  47. #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
  48. #define DRV_VERSION_MAJOR 0
  49. #define DRV_VERSION_MINOR 5
  50. #define DRV_VERSION_BUILD 123
  51. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  52. __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
  53. static int debug;
  54. module_param(debug, int, 0644);
  55. MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
  56. static int resource_profile;
  57. module_param(resource_profile, int, 0644);
  58. MODULE_PARM_DESC(resource_profile,
  59. "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
  60. static int max_rdma_vfs = 32;
  61. module_param(max_rdma_vfs, int, 0644);
  62. MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
  63. static int mpa_version = 2;
  64. module_param(mpa_version, int, 0644);
  65. MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
  66. MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
  67. MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
  68. MODULE_LICENSE("Dual BSD/GPL");
  69. static struct i40e_client i40iw_client;
  70. static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
  71. static LIST_HEAD(i40iw_handlers);
  72. static spinlock_t i40iw_handler_lock;
  73. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  74. u32 vf_id, u8 *msg, u16 len);
  75. static struct notifier_block i40iw_inetaddr_notifier = {
  76. .notifier_call = i40iw_inetaddr_event
  77. };
  78. static struct notifier_block i40iw_inetaddr6_notifier = {
  79. .notifier_call = i40iw_inet6addr_event
  80. };
  81. static struct notifier_block i40iw_net_notifier = {
  82. .notifier_call = i40iw_net_event
  83. };
  84. static struct notifier_block i40iw_netdevice_notifier = {
  85. .notifier_call = i40iw_netdevice_event
  86. };
  87. /**
  88. * i40iw_find_i40e_handler - find a handler given a client info
  89. * @ldev: pointer to a client info
  90. */
  91. static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
  92. {
  93. struct i40iw_handler *hdl;
  94. unsigned long flags;
  95. spin_lock_irqsave(&i40iw_handler_lock, flags);
  96. list_for_each_entry(hdl, &i40iw_handlers, list) {
  97. if (hdl->ldev.netdev == ldev->netdev) {
  98. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  99. return hdl;
  100. }
  101. }
  102. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  103. return NULL;
  104. }
  105. /**
  106. * i40iw_find_netdev - find a handler given a netdev
  107. * @netdev: pointer to net_device
  108. */
  109. struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
  110. {
  111. struct i40iw_handler *hdl;
  112. unsigned long flags;
  113. spin_lock_irqsave(&i40iw_handler_lock, flags);
  114. list_for_each_entry(hdl, &i40iw_handlers, list) {
  115. if (hdl->ldev.netdev == netdev) {
  116. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  117. return hdl;
  118. }
  119. }
  120. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  121. return NULL;
  122. }
  123. /**
  124. * i40iw_add_handler - add a handler to the list
  125. * @hdl: handler to be added to the handler list
  126. */
  127. static void i40iw_add_handler(struct i40iw_handler *hdl)
  128. {
  129. unsigned long flags;
  130. spin_lock_irqsave(&i40iw_handler_lock, flags);
  131. list_add(&hdl->list, &i40iw_handlers);
  132. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  133. }
  134. /**
  135. * i40iw_del_handler - delete a handler from the list
  136. * @hdl: handler to be deleted from the handler list
  137. */
  138. static int i40iw_del_handler(struct i40iw_handler *hdl)
  139. {
  140. unsigned long flags;
  141. spin_lock_irqsave(&i40iw_handler_lock, flags);
  142. list_del(&hdl->list);
  143. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  144. return 0;
  145. }
  146. /**
  147. * i40iw_enable_intr - set up device interrupts
  148. * @dev: hardware control device structure
  149. * @msix_id: id of the interrupt to be enabled
  150. */
  151. static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
  152. {
  153. u32 val;
  154. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  155. I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
  156. (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
  157. if (dev->is_pf)
  158. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
  159. else
  160. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
  161. }
  162. /**
  163. * i40iw_dpc - tasklet for aeq and ceq 0
  164. * @data: iwarp device
  165. */
  166. static void i40iw_dpc(unsigned long data)
  167. {
  168. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  169. if (iwdev->msix_shared)
  170. i40iw_process_ceq(iwdev, iwdev->ceqlist);
  171. i40iw_process_aeq(iwdev);
  172. i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
  173. }
  174. /**
  175. * i40iw_ceq_dpc - dpc handler for CEQ
  176. * @data: data points to CEQ
  177. */
  178. static void i40iw_ceq_dpc(unsigned long data)
  179. {
  180. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  181. struct i40iw_device *iwdev = iwceq->iwdev;
  182. i40iw_process_ceq(iwdev, iwceq);
  183. i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
  184. }
  185. /**
  186. * i40iw_irq_handler - interrupt handler for aeq and ceq0
  187. * @irq: Interrupt request number
  188. * @data: iwarp device
  189. */
  190. static irqreturn_t i40iw_irq_handler(int irq, void *data)
  191. {
  192. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  193. tasklet_schedule(&iwdev->dpc_tasklet);
  194. return IRQ_HANDLED;
  195. }
  196. /**
  197. * i40iw_destroy_cqp - destroy control qp
  198. * @iwdev: iwarp device
  199. * @create_done: 1 if cqp create poll was success
  200. *
  201. * Issue destroy cqp request and
  202. * free the resources associated with the cqp
  203. */
  204. static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
  205. {
  206. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  207. struct i40iw_cqp *cqp = &iwdev->cqp;
  208. if (free_hwcqp)
  209. dev->cqp_ops->cqp_destroy(dev->cqp);
  210. i40iw_cleanup_pending_cqp_op(iwdev);
  211. i40iw_free_dma_mem(dev->hw, &cqp->sq);
  212. kfree(cqp->scratch_array);
  213. iwdev->cqp.scratch_array = NULL;
  214. kfree(cqp->cqp_requests);
  215. cqp->cqp_requests = NULL;
  216. }
  217. /**
  218. * i40iw_disable_irqs - disable device interrupts
  219. * @dev: hardware control device structure
  220. * @msic_vec: msix vector to disable irq
  221. * @dev_id: parameter to pass to free_irq (used during irq setup)
  222. *
  223. * The function is called when destroying aeq/ceq
  224. */
  225. static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
  226. struct i40iw_msix_vector *msix_vec,
  227. void *dev_id)
  228. {
  229. if (dev->is_pf)
  230. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
  231. else
  232. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
  233. irq_set_affinity_hint(msix_vec->irq, NULL);
  234. free_irq(msix_vec->irq, dev_id);
  235. }
  236. /**
  237. * i40iw_destroy_aeq - destroy aeq
  238. * @iwdev: iwarp device
  239. *
  240. * Issue a destroy aeq request and
  241. * free the resources associated with the aeq
  242. * The function is called during driver unload
  243. */
  244. static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
  245. {
  246. enum i40iw_status_code status = I40IW_ERR_NOT_READY;
  247. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  248. struct i40iw_aeq *aeq = &iwdev->aeq;
  249. if (!iwdev->msix_shared)
  250. i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
  251. if (iwdev->reset)
  252. goto exit;
  253. if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
  254. status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
  255. if (status)
  256. i40iw_pr_err("destroy aeq failed %d\n", status);
  257. exit:
  258. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  259. }
  260. /**
  261. * i40iw_destroy_ceq - destroy ceq
  262. * @iwdev: iwarp device
  263. * @iwceq: ceq to be destroyed
  264. *
  265. * Issue a destroy ceq request and
  266. * free the resources associated with the ceq
  267. */
  268. static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
  269. struct i40iw_ceq *iwceq)
  270. {
  271. enum i40iw_status_code status;
  272. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  273. if (iwdev->reset)
  274. goto exit;
  275. status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
  276. if (status) {
  277. i40iw_pr_err("ceq destroy command failed %d\n", status);
  278. goto exit;
  279. }
  280. status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
  281. if (status)
  282. i40iw_pr_err("ceq destroy completion failed %d\n", status);
  283. exit:
  284. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  285. }
  286. /**
  287. * i40iw_dele_ceqs - destroy all ceq's
  288. * @iwdev: iwarp device
  289. *
  290. * Go through all of the device ceq's and for each ceq
  291. * disable the ceq interrupt and destroy the ceq
  292. */
  293. static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
  294. {
  295. u32 i = 0;
  296. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  297. struct i40iw_ceq *iwceq = iwdev->ceqlist;
  298. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  299. if (iwdev->msix_shared) {
  300. i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
  301. i40iw_destroy_ceq(iwdev, iwceq);
  302. iwceq++;
  303. i++;
  304. }
  305. for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
  306. i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
  307. i40iw_destroy_ceq(iwdev, iwceq);
  308. }
  309. iwdev->sc_dev.ceq_valid = false;
  310. }
  311. /**
  312. * i40iw_destroy_ccq - destroy control cq
  313. * @iwdev: iwarp device
  314. *
  315. * Issue destroy ccq request and
  316. * free the resources associated with the ccq
  317. */
  318. static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
  319. {
  320. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  321. struct i40iw_ccq *ccq = &iwdev->ccq;
  322. enum i40iw_status_code status = 0;
  323. if (!iwdev->reset)
  324. status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
  325. if (status)
  326. i40iw_pr_err("ccq destroy failed %d\n", status);
  327. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  328. }
  329. /* types of hmc objects */
  330. static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
  331. I40IW_HMC_IW_QP,
  332. I40IW_HMC_IW_CQ,
  333. I40IW_HMC_IW_HTE,
  334. I40IW_HMC_IW_ARP,
  335. I40IW_HMC_IW_APBVT_ENTRY,
  336. I40IW_HMC_IW_MR,
  337. I40IW_HMC_IW_XF,
  338. I40IW_HMC_IW_XFFL,
  339. I40IW_HMC_IW_Q1,
  340. I40IW_HMC_IW_Q1FL,
  341. I40IW_HMC_IW_TIMER,
  342. };
  343. /**
  344. * i40iw_close_hmc_objects_type - delete hmc objects of a given type
  345. * @iwdev: iwarp device
  346. * @obj_type: the hmc object type to be deleted
  347. * @is_pf: true if the function is PF otherwise false
  348. * @reset: true if called before reset
  349. */
  350. static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
  351. enum i40iw_hmc_rsrc_type obj_type,
  352. struct i40iw_hmc_info *hmc_info,
  353. bool is_pf,
  354. bool reset)
  355. {
  356. struct i40iw_hmc_del_obj_info info;
  357. memset(&info, 0, sizeof(info));
  358. info.hmc_info = hmc_info;
  359. info.rsrc_type = obj_type;
  360. info.count = hmc_info->hmc_obj[obj_type].cnt;
  361. info.is_pf = is_pf;
  362. if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
  363. i40iw_pr_err("del obj of type %d failed\n", obj_type);
  364. }
  365. /**
  366. * i40iw_del_hmc_objects - remove all device hmc objects
  367. * @dev: iwarp device
  368. * @hmc_info: hmc_info to free
  369. * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
  370. * by PF on behalf of VF
  371. * @reset: true if called before reset
  372. */
  373. static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
  374. struct i40iw_hmc_info *hmc_info,
  375. bool is_pf,
  376. bool reset)
  377. {
  378. unsigned int i;
  379. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
  380. i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
  381. }
  382. /**
  383. * i40iw_ceq_handler - interrupt handler for ceq
  384. * @data: ceq pointer
  385. */
  386. static irqreturn_t i40iw_ceq_handler(int irq, void *data)
  387. {
  388. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  389. if (iwceq->irq != irq)
  390. i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
  391. tasklet_schedule(&iwceq->dpc_tasklet);
  392. return IRQ_HANDLED;
  393. }
  394. /**
  395. * i40iw_create_hmc_obj_type - create hmc object of a given type
  396. * @dev: hardware control device structure
  397. * @info: information for the hmc object to create
  398. */
  399. static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
  400. struct i40iw_hmc_create_obj_info *info)
  401. {
  402. return dev->hmc_ops->create_hmc_object(dev, info);
  403. }
  404. /**
  405. * i40iw_create_hmc_objs - create all hmc objects for the device
  406. * @iwdev: iwarp device
  407. * @is_pf: true if the function is PF otherwise false
  408. *
  409. * Create the device hmc objects and allocate hmc pages
  410. * Return 0 if successful, otherwise clean up and return error
  411. */
  412. static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
  413. bool is_pf)
  414. {
  415. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  416. struct i40iw_hmc_create_obj_info info;
  417. enum i40iw_status_code status;
  418. int i;
  419. memset(&info, 0, sizeof(info));
  420. info.hmc_info = dev->hmc_info;
  421. info.is_pf = is_pf;
  422. info.entry_type = iwdev->sd_type;
  423. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
  424. info.rsrc_type = iw_hmc_obj_types[i];
  425. info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
  426. info.add_sd_cnt = 0;
  427. status = i40iw_create_hmc_obj_type(dev, &info);
  428. if (status) {
  429. i40iw_pr_err("create obj type %d status = %d\n",
  430. iw_hmc_obj_types[i], status);
  431. break;
  432. }
  433. }
  434. if (!status)
  435. return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
  436. dev->hmc_fn_id,
  437. true, true));
  438. while (i) {
  439. i--;
  440. /* destroy the hmc objects of a given type */
  441. i40iw_close_hmc_objects_type(dev,
  442. iw_hmc_obj_types[i],
  443. dev->hmc_info,
  444. is_pf,
  445. false);
  446. }
  447. return status;
  448. }
  449. /**
  450. * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
  451. * @iwdev: iwarp device
  452. * @memptr: points to the memory addresses
  453. * @size: size of memory needed
  454. * @mask: mask for the aligned memory
  455. *
  456. * Get aligned memory of the requested size and
  457. * update the memptr to point to the new aligned memory
  458. * Return 0 if successful, otherwise return no memory error
  459. */
  460. enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
  461. struct i40iw_dma_mem *memptr,
  462. u32 size,
  463. u32 mask)
  464. {
  465. unsigned long va, newva;
  466. unsigned long extra;
  467. va = (unsigned long)iwdev->obj_next.va;
  468. newva = va;
  469. if (mask)
  470. newva = ALIGN(va, (mask + 1));
  471. extra = newva - va;
  472. memptr->va = (u8 *)va + extra;
  473. memptr->pa = iwdev->obj_next.pa + extra;
  474. memptr->size = size;
  475. if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
  476. return I40IW_ERR_NO_MEMORY;
  477. iwdev->obj_next.va = memptr->va + size;
  478. iwdev->obj_next.pa = memptr->pa + size;
  479. return 0;
  480. }
  481. /**
  482. * i40iw_create_cqp - create control qp
  483. * @iwdev: iwarp device
  484. *
  485. * Return 0, if the cqp and all the resources associated with it
  486. * are successfully created, otherwise return error
  487. */
  488. static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
  489. {
  490. enum i40iw_status_code status;
  491. u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
  492. struct i40iw_dma_mem mem;
  493. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  494. struct i40iw_cqp_init_info cqp_init_info;
  495. struct i40iw_cqp *cqp = &iwdev->cqp;
  496. u16 maj_err, min_err;
  497. int i;
  498. cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
  499. if (!cqp->cqp_requests)
  500. return I40IW_ERR_NO_MEMORY;
  501. cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
  502. if (!cqp->scratch_array) {
  503. kfree(cqp->cqp_requests);
  504. return I40IW_ERR_NO_MEMORY;
  505. }
  506. dev->cqp = &cqp->sc_cqp;
  507. dev->cqp->dev = dev;
  508. memset(&cqp_init_info, 0, sizeof(cqp_init_info));
  509. status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
  510. (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
  511. I40IW_CQP_ALIGNMENT);
  512. if (status)
  513. goto exit;
  514. status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
  515. I40IW_HOST_CTX_ALIGNMENT_MASK);
  516. if (status)
  517. goto exit;
  518. dev->cqp->host_ctx_pa = mem.pa;
  519. dev->cqp->host_ctx = mem.va;
  520. /* populate the cqp init info */
  521. cqp_init_info.dev = dev;
  522. cqp_init_info.sq_size = sqsize;
  523. cqp_init_info.sq = cqp->sq.va;
  524. cqp_init_info.sq_pa = cqp->sq.pa;
  525. cqp_init_info.host_ctx_pa = mem.pa;
  526. cqp_init_info.host_ctx = mem.va;
  527. cqp_init_info.hmc_profile = iwdev->resource_profile;
  528. cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
  529. cqp_init_info.scratch_array = cqp->scratch_array;
  530. status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
  531. if (status) {
  532. i40iw_pr_err("cqp init status %d\n", status);
  533. goto exit;
  534. }
  535. status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
  536. if (status) {
  537. i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
  538. status, maj_err, min_err);
  539. goto exit;
  540. }
  541. spin_lock_init(&cqp->req_lock);
  542. INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
  543. INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
  544. /* init the waitq of the cqp_requests and add them to the list */
  545. for (i = 0; i < sqsize; i++) {
  546. init_waitqueue_head(&cqp->cqp_requests[i].waitq);
  547. list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
  548. }
  549. return 0;
  550. exit:
  551. /* clean up the created resources */
  552. i40iw_destroy_cqp(iwdev, false);
  553. return status;
  554. }
  555. /**
  556. * i40iw_create_ccq - create control cq
  557. * @iwdev: iwarp device
  558. *
  559. * Return 0, if the ccq and the resources associated with it
  560. * are successfully created, otherwise return error
  561. */
  562. static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
  563. {
  564. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  565. struct i40iw_dma_mem mem;
  566. enum i40iw_status_code status;
  567. struct i40iw_ccq_init_info info;
  568. struct i40iw_ccq *ccq = &iwdev->ccq;
  569. memset(&info, 0, sizeof(info));
  570. dev->ccq = &ccq->sc_cq;
  571. dev->ccq->dev = dev;
  572. info.dev = dev;
  573. ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
  574. ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
  575. status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
  576. ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
  577. if (status)
  578. goto exit;
  579. status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
  580. I40IW_SHADOWAREA_MASK);
  581. if (status)
  582. goto exit;
  583. ccq->sc_cq.back_cq = (void *)ccq;
  584. /* populate the ccq init info */
  585. info.cq_base = ccq->mem_cq.va;
  586. info.cq_pa = ccq->mem_cq.pa;
  587. info.num_elem = IW_CCQ_SIZE;
  588. info.shadow_area = mem.va;
  589. info.shadow_area_pa = mem.pa;
  590. info.ceqe_mask = false;
  591. info.ceq_id_valid = true;
  592. info.shadow_read_threshold = 16;
  593. status = dev->ccq_ops->ccq_init(dev->ccq, &info);
  594. if (!status)
  595. status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
  596. exit:
  597. if (status)
  598. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  599. return status;
  600. }
  601. /**
  602. * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
  603. * @iwdev: iwarp device
  604. * @msix_vec: interrupt vector information
  605. * @iwceq: ceq associated with the vector
  606. * @ceq_id: the id number of the iwceq
  607. *
  608. * Allocate interrupt resources and enable irq handling
  609. * Return 0 if successful, otherwise return error
  610. */
  611. static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
  612. struct i40iw_ceq *iwceq,
  613. u32 ceq_id,
  614. struct i40iw_msix_vector *msix_vec)
  615. {
  616. enum i40iw_status_code status;
  617. if (iwdev->msix_shared && !ceq_id) {
  618. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  619. status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
  620. } else {
  621. tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
  622. status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
  623. }
  624. cpumask_clear(&msix_vec->mask);
  625. cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
  626. irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
  627. if (status) {
  628. i40iw_pr_err("ceq irq config fail\n");
  629. return I40IW_ERR_CONFIG;
  630. }
  631. msix_vec->ceq_id = ceq_id;
  632. return 0;
  633. }
  634. /**
  635. * i40iw_create_ceq - create completion event queue
  636. * @iwdev: iwarp device
  637. * @iwceq: pointer to the ceq resources to be created
  638. * @ceq_id: the id number of the iwceq
  639. *
  640. * Return 0, if the ceq and the resources associated with it
  641. * are successfully created, otherwise return error
  642. */
  643. static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
  644. struct i40iw_ceq *iwceq,
  645. u32 ceq_id)
  646. {
  647. enum i40iw_status_code status;
  648. struct i40iw_ceq_init_info info;
  649. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  650. u64 scratch;
  651. memset(&info, 0, sizeof(info));
  652. info.ceq_id = ceq_id;
  653. iwceq->iwdev = iwdev;
  654. iwceq->mem.size = sizeof(struct i40iw_ceqe) *
  655. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  656. status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
  657. I40IW_CEQ_ALIGNMENT);
  658. if (status)
  659. goto exit;
  660. info.ceq_id = ceq_id;
  661. info.ceqe_base = iwceq->mem.va;
  662. info.ceqe_pa = iwceq->mem.pa;
  663. info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  664. iwceq->sc_ceq.ceq_id = ceq_id;
  665. info.dev = dev;
  666. scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
  667. status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
  668. if (!status)
  669. status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
  670. exit:
  671. if (status)
  672. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  673. return status;
  674. }
  675. void i40iw_request_reset(struct i40iw_device *iwdev)
  676. {
  677. struct i40e_info *ldev = iwdev->ldev;
  678. ldev->ops->request_reset(ldev, iwdev->client, 1);
  679. }
  680. /**
  681. * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
  682. * @iwdev: iwarp device
  683. * @ldev: i40e lan device
  684. *
  685. * Allocate a list for all device completion event queues
  686. * Create the ceq's and configure their msix interrupt vectors
  687. * Return 0, if at least one ceq is successfully set up, otherwise return error
  688. */
  689. static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
  690. struct i40e_info *ldev)
  691. {
  692. u32 i;
  693. u32 ceq_id;
  694. struct i40iw_ceq *iwceq;
  695. struct i40iw_msix_vector *msix_vec;
  696. enum i40iw_status_code status = 0;
  697. u32 num_ceqs;
  698. if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
  699. status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
  700. iwdev->iw_qvlist);
  701. if (status)
  702. goto exit;
  703. } else {
  704. status = I40IW_ERR_BAD_PTR;
  705. goto exit;
  706. }
  707. num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
  708. iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
  709. if (!iwdev->ceqlist) {
  710. status = I40IW_ERR_NO_MEMORY;
  711. goto exit;
  712. }
  713. i = (iwdev->msix_shared) ? 0 : 1;
  714. for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
  715. iwceq = &iwdev->ceqlist[ceq_id];
  716. status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
  717. if (status) {
  718. i40iw_pr_err("create ceq status = %d\n", status);
  719. break;
  720. }
  721. msix_vec = &iwdev->iw_msixtbl[i];
  722. iwceq->irq = msix_vec->irq;
  723. iwceq->msix_idx = msix_vec->idx;
  724. status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
  725. if (status) {
  726. i40iw_destroy_ceq(iwdev, iwceq);
  727. break;
  728. }
  729. i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
  730. iwdev->ceqs_count++;
  731. }
  732. exit:
  733. if (status && !iwdev->ceqs_count) {
  734. kfree(iwdev->ceqlist);
  735. iwdev->ceqlist = NULL;
  736. return status;
  737. } else {
  738. iwdev->sc_dev.ceq_valid = true;
  739. return 0;
  740. }
  741. }
  742. /**
  743. * i40iw_configure_aeq_vector - set up the msix vector for aeq
  744. * @iwdev: iwarp device
  745. *
  746. * Allocate interrupt resources and enable irq handling
  747. * Return 0 if successful, otherwise return error
  748. */
  749. static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
  750. {
  751. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  752. u32 ret = 0;
  753. if (!iwdev->msix_shared) {
  754. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  755. ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
  756. }
  757. if (ret) {
  758. i40iw_pr_err("aeq irq config fail\n");
  759. return I40IW_ERR_CONFIG;
  760. }
  761. return 0;
  762. }
  763. /**
  764. * i40iw_create_aeq - create async event queue
  765. * @iwdev: iwarp device
  766. *
  767. * Return 0, if the aeq and the resources associated with it
  768. * are successfully created, otherwise return error
  769. */
  770. static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
  771. {
  772. enum i40iw_status_code status;
  773. struct i40iw_aeq_init_info info;
  774. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  775. struct i40iw_aeq *aeq = &iwdev->aeq;
  776. u64 scratch = 0;
  777. u32 aeq_size;
  778. aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
  779. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  780. memset(&info, 0, sizeof(info));
  781. aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
  782. status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
  783. I40IW_AEQ_ALIGNMENT);
  784. if (status)
  785. goto exit;
  786. info.aeqe_base = aeq->mem.va;
  787. info.aeq_elem_pa = aeq->mem.pa;
  788. info.elem_cnt = aeq_size;
  789. info.dev = dev;
  790. status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
  791. if (status)
  792. goto exit;
  793. status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
  794. if (!status)
  795. status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
  796. exit:
  797. if (status)
  798. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  799. return status;
  800. }
  801. /**
  802. * i40iw_setup_aeq - set up the device aeq
  803. * @iwdev: iwarp device
  804. *
  805. * Create the aeq and configure its msix interrupt vector
  806. * Return 0 if successful, otherwise return error
  807. */
  808. static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
  809. {
  810. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  811. enum i40iw_status_code status;
  812. status = i40iw_create_aeq(iwdev);
  813. if (status)
  814. return status;
  815. status = i40iw_configure_aeq_vector(iwdev);
  816. if (status) {
  817. i40iw_destroy_aeq(iwdev);
  818. return status;
  819. }
  820. if (!iwdev->msix_shared)
  821. i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
  822. return 0;
  823. }
  824. /**
  825. * i40iw_initialize_ilq - create iwarp local queue for cm
  826. * @iwdev: iwarp device
  827. *
  828. * Return 0 if successful, otherwise return error
  829. */
  830. static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
  831. {
  832. struct i40iw_puda_rsrc_info info;
  833. enum i40iw_status_code status;
  834. memset(&info, 0, sizeof(info));
  835. info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
  836. info.cq_id = 1;
  837. info.qp_id = 0;
  838. info.count = 1;
  839. info.pd_id = 1;
  840. info.sq_size = 8192;
  841. info.rq_size = 8192;
  842. info.buf_size = 1024;
  843. info.tx_buf_cnt = 16384;
  844. info.receive = i40iw_receive_ilq;
  845. info.xmit_complete = i40iw_free_sqbuf;
  846. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  847. if (status)
  848. i40iw_pr_err("ilq create fail\n");
  849. return status;
  850. }
  851. /**
  852. * i40iw_initialize_ieq - create iwarp exception queue
  853. * @iwdev: iwarp device
  854. *
  855. * Return 0 if successful, otherwise return error
  856. */
  857. static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
  858. {
  859. struct i40iw_puda_rsrc_info info;
  860. enum i40iw_status_code status;
  861. memset(&info, 0, sizeof(info));
  862. info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
  863. info.cq_id = 2;
  864. info.qp_id = iwdev->vsi.exception_lan_queue;
  865. info.count = 1;
  866. info.pd_id = 2;
  867. info.sq_size = 8192;
  868. info.rq_size = 8192;
  869. info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
  870. info.tx_buf_cnt = 4096;
  871. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  872. if (status)
  873. i40iw_pr_err("ieq create fail\n");
  874. return status;
  875. }
  876. /**
  877. * i40iw_reinitialize_ieq - destroy and re-create ieq
  878. * @dev: iwarp device
  879. */
  880. void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
  881. {
  882. struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
  883. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
  884. if (i40iw_initialize_ieq(iwdev)) {
  885. iwdev->reset = true;
  886. i40iw_request_reset(iwdev);
  887. }
  888. }
  889. /**
  890. * i40iw_hmc_setup - create hmc objects for the device
  891. * @iwdev: iwarp device
  892. *
  893. * Set up the device private memory space for the number and size of
  894. * the hmc objects and create the objects
  895. * Return 0 if successful, otherwise return error
  896. */
  897. static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
  898. {
  899. enum i40iw_status_code status;
  900. iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
  901. status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
  902. if (status)
  903. goto exit;
  904. status = i40iw_create_hmc_objs(iwdev, true);
  905. if (status)
  906. goto exit;
  907. iwdev->init_state = HMC_OBJS_CREATED;
  908. exit:
  909. return status;
  910. }
  911. /**
  912. * i40iw_del_init_mem - deallocate memory resources
  913. * @iwdev: iwarp device
  914. */
  915. static void i40iw_del_init_mem(struct i40iw_device *iwdev)
  916. {
  917. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  918. i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
  919. kfree(dev->hmc_info->sd_table.sd_entry);
  920. dev->hmc_info->sd_table.sd_entry = NULL;
  921. kfree(iwdev->mem_resources);
  922. iwdev->mem_resources = NULL;
  923. kfree(iwdev->ceqlist);
  924. iwdev->ceqlist = NULL;
  925. kfree(iwdev->iw_msixtbl);
  926. iwdev->iw_msixtbl = NULL;
  927. kfree(iwdev->hmc_info_mem);
  928. iwdev->hmc_info_mem = NULL;
  929. }
  930. /**
  931. * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
  932. * @iwdev: iwarp device
  933. * @idx: the index of the mac ip address to delete
  934. */
  935. static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
  936. {
  937. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  938. struct i40iw_cqp_request *cqp_request;
  939. struct cqp_commands_info *cqp_info;
  940. enum i40iw_status_code status = 0;
  941. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  942. if (!cqp_request) {
  943. i40iw_pr_err("cqp_request memory failed\n");
  944. return;
  945. }
  946. cqp_info = &cqp_request->info;
  947. cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
  948. cqp_info->post_sq = 1;
  949. cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  950. cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  951. cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
  952. cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
  953. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  954. if (status)
  955. i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
  956. }
  957. /**
  958. * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
  959. * @iwdev: iwarp device
  960. * @mac_addr: pointer to mac address
  961. * @idx: the index of the mac ip address to add
  962. */
  963. static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
  964. u8 *mac_addr,
  965. u8 idx)
  966. {
  967. struct i40iw_local_mac_ipaddr_entry_info *info;
  968. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  969. struct i40iw_cqp_request *cqp_request;
  970. struct cqp_commands_info *cqp_info;
  971. enum i40iw_status_code status = 0;
  972. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  973. if (!cqp_request) {
  974. i40iw_pr_err("cqp_request memory failed\n");
  975. return I40IW_ERR_NO_MEMORY;
  976. }
  977. cqp_info = &cqp_request->info;
  978. cqp_info->post_sq = 1;
  979. info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
  980. ether_addr_copy(info->mac_addr, mac_addr);
  981. info->entry_idx = idx;
  982. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  983. cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
  984. cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  985. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  986. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  987. if (status)
  988. i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
  989. return status;
  990. }
  991. /**
  992. * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
  993. * @iwdev: iwarp device
  994. * @mac_ip_tbl_idx: the index of the new mac ip address
  995. *
  996. * Allocate a mac ip address entry and update the mac_ip_tbl_idx
  997. * to hold the index of the newly created mac ip address
  998. * Return 0 if successful, otherwise return error
  999. */
  1000. static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
  1001. u16 *mac_ip_tbl_idx)
  1002. {
  1003. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  1004. struct i40iw_cqp_request *cqp_request;
  1005. struct cqp_commands_info *cqp_info;
  1006. enum i40iw_status_code status = 0;
  1007. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  1008. if (!cqp_request) {
  1009. i40iw_pr_err("cqp_request memory failed\n");
  1010. return I40IW_ERR_NO_MEMORY;
  1011. }
  1012. /* increment refcount, because we need the cqp request ret value */
  1013. atomic_inc(&cqp_request->refcount);
  1014. cqp_info = &cqp_request->info;
  1015. cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
  1016. cqp_info->post_sq = 1;
  1017. cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  1018. cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  1019. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1020. if (!status)
  1021. *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
  1022. else
  1023. i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
  1024. /* decrement refcount and free the cqp request, if no longer used */
  1025. i40iw_put_cqp_request(iwcqp, cqp_request);
  1026. return status;
  1027. }
  1028. /**
  1029. * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
  1030. * @iwdev: iwarp device
  1031. * @macaddr: pointer to mac address
  1032. *
  1033. * Allocate a mac ip address entry and add it to the hw table
  1034. * Return 0 if successful, otherwise return error
  1035. */
  1036. static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
  1037. u8 *macaddr)
  1038. {
  1039. enum i40iw_status_code status;
  1040. status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
  1041. if (!status) {
  1042. status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
  1043. (u8)iwdev->mac_ip_table_idx);
  1044. if (status)
  1045. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1046. }
  1047. return status;
  1048. }
  1049. /**
  1050. * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
  1051. * @iwdev: iwarp device
  1052. */
  1053. static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
  1054. {
  1055. struct net_device *ip_dev;
  1056. struct inet6_dev *idev;
  1057. struct inet6_ifaddr *ifp, *tmp;
  1058. u32 local_ipaddr6[4];
  1059. rcu_read_lock();
  1060. for_each_netdev_rcu(&init_net, ip_dev) {
  1061. if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
  1062. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1063. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1064. idev = __in6_dev_get(ip_dev);
  1065. if (!idev) {
  1066. i40iw_pr_err("ipv6 inet device not found\n");
  1067. break;
  1068. }
  1069. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1070. i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
  1071. rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
  1072. i40iw_copy_ip_ntohl(local_ipaddr6,
  1073. ifp->addr.in6_u.u6_addr32);
  1074. i40iw_manage_arp_cache(iwdev,
  1075. ip_dev->dev_addr,
  1076. local_ipaddr6,
  1077. false,
  1078. I40IW_ARP_ADD);
  1079. }
  1080. }
  1081. }
  1082. rcu_read_unlock();
  1083. }
  1084. /**
  1085. * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
  1086. * @iwdev: iwarp device
  1087. */
  1088. static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
  1089. {
  1090. struct net_device *dev;
  1091. struct in_device *idev;
  1092. bool got_lock = true;
  1093. u32 ip_addr;
  1094. if (!rtnl_trylock())
  1095. got_lock = false;
  1096. for_each_netdev(&init_net, dev) {
  1097. if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
  1098. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1099. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1100. idev = in_dev_get(dev);
  1101. for_ifa(idev) {
  1102. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1103. "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
  1104. rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
  1105. ip_addr = ntohl(ifa->ifa_address);
  1106. i40iw_manage_arp_cache(iwdev,
  1107. dev->dev_addr,
  1108. &ip_addr,
  1109. true,
  1110. I40IW_ARP_ADD);
  1111. }
  1112. endfor_ifa(idev);
  1113. in_dev_put(idev);
  1114. }
  1115. }
  1116. if (got_lock)
  1117. rtnl_unlock();
  1118. }
  1119. /**
  1120. * i40iw_add_mac_ip - add mac and ip addresses
  1121. * @iwdev: iwarp device
  1122. *
  1123. * Create and add a mac ip address entry to the hw table and
  1124. * ipv4/ipv6 addresses to the arp cache
  1125. * Return 0 if successful, otherwise return error
  1126. */
  1127. static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
  1128. {
  1129. struct net_device *netdev = iwdev->netdev;
  1130. enum i40iw_status_code status;
  1131. status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
  1132. if (status)
  1133. return status;
  1134. i40iw_add_ipv4_addr(iwdev);
  1135. i40iw_add_ipv6_addr(iwdev);
  1136. return 0;
  1137. }
  1138. /**
  1139. * i40iw_wait_pe_ready - Check if firmware is ready
  1140. * @hw: provides access to registers
  1141. */
  1142. static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
  1143. {
  1144. u32 statusfw;
  1145. u32 statuscpu0;
  1146. u32 statuscpu1;
  1147. u32 statuscpu2;
  1148. u32 retrycount = 0;
  1149. do {
  1150. statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
  1151. i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
  1152. statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
  1153. i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
  1154. statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
  1155. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
  1156. __LINE__, statuscpu1);
  1157. statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
  1158. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
  1159. __LINE__, statuscpu2);
  1160. if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
  1161. break; /* SUCCESS */
  1162. msleep(1000);
  1163. retrycount++;
  1164. } while (retrycount < 14);
  1165. i40iw_wr32(hw, 0xb4040, 0x4C104C5);
  1166. }
  1167. /**
  1168. * i40iw_initialize_dev - initialize device
  1169. * @iwdev: iwarp device
  1170. * @ldev: lan device information
  1171. *
  1172. * Allocate memory for the hmc objects and initialize iwdev
  1173. * Return 0 if successful, otherwise clean up the resources
  1174. * and return error
  1175. */
  1176. static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
  1177. struct i40e_info *ldev)
  1178. {
  1179. enum i40iw_status_code status;
  1180. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1181. struct i40iw_device_init_info info;
  1182. struct i40iw_vsi_init_info vsi_info;
  1183. struct i40iw_dma_mem mem;
  1184. struct i40iw_l2params l2params;
  1185. u32 size;
  1186. struct i40iw_vsi_stats_info stats_info;
  1187. u16 last_qset = I40IW_NO_QSET;
  1188. u16 qset;
  1189. u32 i;
  1190. memset(&l2params, 0, sizeof(l2params));
  1191. memset(&info, 0, sizeof(info));
  1192. size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
  1193. (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
  1194. iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
  1195. if (!iwdev->hmc_info_mem)
  1196. return I40IW_ERR_NO_MEMORY;
  1197. iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
  1198. dev->hmc_info = &iwdev->hw.hmc;
  1199. dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
  1200. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
  1201. I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
  1202. if (status)
  1203. goto error;
  1204. info.fpm_query_buf_pa = mem.pa;
  1205. info.fpm_query_buf = mem.va;
  1206. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
  1207. I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
  1208. if (status)
  1209. goto error;
  1210. info.fpm_commit_buf_pa = mem.pa;
  1211. info.fpm_commit_buf = mem.va;
  1212. info.hmc_fn_id = ldev->fid;
  1213. info.is_pf = (ldev->ftype) ? false : true;
  1214. info.bar0 = ldev->hw_addr;
  1215. info.hw = &iwdev->hw;
  1216. info.debug_mask = debug;
  1217. l2params.mtu =
  1218. (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
  1219. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
  1220. qset = ldev->params.qos.prio_qos[i].qs_handle;
  1221. l2params.qs_handle_list[i] = qset;
  1222. if (last_qset == I40IW_NO_QSET)
  1223. last_qset = qset;
  1224. else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
  1225. iwdev->dcb = true;
  1226. }
  1227. i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
  1228. info.vchnl_send = i40iw_virtchnl_send;
  1229. status = i40iw_device_init(&iwdev->sc_dev, &info);
  1230. if (status)
  1231. goto error;
  1232. memset(&vsi_info, 0, sizeof(vsi_info));
  1233. vsi_info.dev = &iwdev->sc_dev;
  1234. vsi_info.back_vsi = (void *)iwdev;
  1235. vsi_info.params = &l2params;
  1236. vsi_info.exception_lan_queue = 1;
  1237. i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
  1238. if (dev->is_pf) {
  1239. memset(&stats_info, 0, sizeof(stats_info));
  1240. stats_info.fcn_id = ldev->fid;
  1241. stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
  1242. if (!stats_info.pestat) {
  1243. status = I40IW_ERR_NO_MEMORY;
  1244. goto error;
  1245. }
  1246. stats_info.stats_initialize = true;
  1247. if (stats_info.pestat)
  1248. i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
  1249. }
  1250. return status;
  1251. error:
  1252. kfree(iwdev->hmc_info_mem);
  1253. iwdev->hmc_info_mem = NULL;
  1254. return status;
  1255. }
  1256. /**
  1257. * i40iw_register_notifiers - register tcp ip notifiers
  1258. */
  1259. static void i40iw_register_notifiers(void)
  1260. {
  1261. register_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1262. register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1263. register_netevent_notifier(&i40iw_net_notifier);
  1264. register_netdevice_notifier(&i40iw_netdevice_notifier);
  1265. }
  1266. /**
  1267. * i40iw_unregister_notifiers - unregister tcp ip notifiers
  1268. */
  1269. static void i40iw_unregister_notifiers(void)
  1270. {
  1271. unregister_netevent_notifier(&i40iw_net_notifier);
  1272. unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1273. unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1274. unregister_netdevice_notifier(&i40iw_netdevice_notifier);
  1275. }
  1276. /**
  1277. * i40iw_save_msix_info - copy msix vector information to iwarp device
  1278. * @iwdev: iwarp device
  1279. * @ldev: lan device information
  1280. *
  1281. * Allocate iwdev msix table and copy the ldev msix info to the table
  1282. * Return 0 if successful, otherwise return error
  1283. */
  1284. static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  1285. struct i40e_info *ldev)
  1286. {
  1287. struct i40e_qvlist_info *iw_qvlist;
  1288. struct i40e_qv_info *iw_qvinfo;
  1289. u32 ceq_idx;
  1290. u32 i;
  1291. u32 size;
  1292. if (!ldev->msix_count) {
  1293. i40iw_pr_err("No MSI-X vectors\n");
  1294. return I40IW_ERR_CONFIG;
  1295. }
  1296. iwdev->msix_count = ldev->msix_count;
  1297. size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
  1298. size += sizeof(struct i40e_qvlist_info);
  1299. size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
  1300. iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
  1301. if (!iwdev->iw_msixtbl)
  1302. return I40IW_ERR_NO_MEMORY;
  1303. iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
  1304. iw_qvlist = iwdev->iw_qvlist;
  1305. iw_qvinfo = iw_qvlist->qv_info;
  1306. iw_qvlist->num_vectors = iwdev->msix_count;
  1307. if (iwdev->msix_count <= num_online_cpus())
  1308. iwdev->msix_shared = true;
  1309. for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
  1310. iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
  1311. iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
  1312. iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
  1313. if (i == 0) {
  1314. iw_qvinfo->aeq_idx = 0;
  1315. if (iwdev->msix_shared)
  1316. iw_qvinfo->ceq_idx = ceq_idx++;
  1317. else
  1318. iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
  1319. } else {
  1320. iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
  1321. iw_qvinfo->ceq_idx = ceq_idx++;
  1322. }
  1323. iw_qvinfo->itr_idx = 3;
  1324. iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
  1325. }
  1326. return 0;
  1327. }
  1328. /**
  1329. * i40iw_deinit_device - clean up the device resources
  1330. * @iwdev: iwarp device
  1331. *
  1332. * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  1333. * destroy the device queues and free the pble and the hmc objects
  1334. */
  1335. static void i40iw_deinit_device(struct i40iw_device *iwdev)
  1336. {
  1337. struct i40e_info *ldev = iwdev->ldev;
  1338. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1339. i40iw_pr_info("state = %d\n", iwdev->init_state);
  1340. if (iwdev->param_wq)
  1341. destroy_workqueue(iwdev->param_wq);
  1342. switch (iwdev->init_state) {
  1343. case RDMA_DEV_REGISTERED:
  1344. iwdev->iw_status = 0;
  1345. i40iw_port_ibevent(iwdev);
  1346. i40iw_destroy_rdma_device(iwdev->iwibdev);
  1347. /* fallthrough */
  1348. case IP_ADDR_REGISTERED:
  1349. if (!iwdev->reset)
  1350. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1351. /* fallthrough */
  1352. /* fallthrough */
  1353. case PBLE_CHUNK_MEM:
  1354. i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
  1355. /* fallthrough */
  1356. case CEQ_CREATED:
  1357. i40iw_dele_ceqs(iwdev);
  1358. /* fallthrough */
  1359. case AEQ_CREATED:
  1360. i40iw_destroy_aeq(iwdev);
  1361. /* fallthrough */
  1362. case IEQ_CREATED:
  1363. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
  1364. /* fallthrough */
  1365. case ILQ_CREATED:
  1366. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
  1367. /* fallthrough */
  1368. case CCQ_CREATED:
  1369. i40iw_destroy_ccq(iwdev);
  1370. /* fallthrough */
  1371. case HMC_OBJS_CREATED:
  1372. i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
  1373. /* fallthrough */
  1374. case CQP_CREATED:
  1375. i40iw_destroy_cqp(iwdev, true);
  1376. /* fallthrough */
  1377. case INITIAL_STATE:
  1378. i40iw_cleanup_cm_core(&iwdev->cm_core);
  1379. if (iwdev->vsi.pestat) {
  1380. i40iw_vsi_stats_free(&iwdev->vsi);
  1381. kfree(iwdev->vsi.pestat);
  1382. }
  1383. i40iw_del_init_mem(iwdev);
  1384. break;
  1385. case INVALID_STATE:
  1386. /* fallthrough */
  1387. default:
  1388. i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
  1389. break;
  1390. }
  1391. i40iw_del_handler(i40iw_find_i40e_handler(ldev));
  1392. kfree(iwdev->hdl);
  1393. }
  1394. /**
  1395. * i40iw_setup_init_state - set up the initial device struct
  1396. * @hdl: handler for iwarp device - one per instance
  1397. * @ldev: lan device information
  1398. * @client: iwarp client information, provided during registration
  1399. *
  1400. * Initialize the iwarp device and its hdl information
  1401. * using the ldev and client information
  1402. * Return 0 if successful, otherwise return error
  1403. */
  1404. static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
  1405. struct i40e_info *ldev,
  1406. struct i40e_client *client)
  1407. {
  1408. struct i40iw_device *iwdev = &hdl->device;
  1409. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1410. enum i40iw_status_code status;
  1411. memcpy(&hdl->ldev, ldev, sizeof(*ldev));
  1412. iwdev->mpa_version = mpa_version;
  1413. iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
  1414. (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
  1415. I40IW_HMC_PROFILE_DEFAULT;
  1416. iwdev->max_rdma_vfs =
  1417. (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
  1418. iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
  1419. iwdev->netdev = ldev->netdev;
  1420. hdl->client = client;
  1421. if (!ldev->ftype)
  1422. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
  1423. else
  1424. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
  1425. status = i40iw_save_msix_info(iwdev, ldev);
  1426. if (status)
  1427. return status;
  1428. iwdev->hw.dev_context = (void *)ldev->pcidev;
  1429. iwdev->hw.hw_addr = ldev->hw_addr;
  1430. status = i40iw_allocate_dma_mem(&iwdev->hw,
  1431. &iwdev->obj_mem, 8192, 4096);
  1432. if (status)
  1433. goto exit;
  1434. iwdev->obj_next = iwdev->obj_mem;
  1435. init_waitqueue_head(&iwdev->vchnl_waitq);
  1436. init_waitqueue_head(&dev->vf_reqs);
  1437. init_waitqueue_head(&iwdev->close_wq);
  1438. status = i40iw_initialize_dev(iwdev, ldev);
  1439. exit:
  1440. if (status) {
  1441. kfree(iwdev->iw_msixtbl);
  1442. i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
  1443. iwdev->iw_msixtbl = NULL;
  1444. }
  1445. return status;
  1446. }
  1447. /**
  1448. * i40iw_get_used_rsrc - determine resources used internally
  1449. * @iwdev: iwarp device
  1450. *
  1451. * Called after internal allocations
  1452. */
  1453. static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
  1454. {
  1455. iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
  1456. iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
  1457. iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
  1458. iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
  1459. }
  1460. /**
  1461. * i40iw_open - client interface operation open for iwarp/uda device
  1462. * @ldev: lan device information
  1463. * @client: iwarp client information, provided during registration
  1464. *
  1465. * Called by the lan driver during the processing of client register
  1466. * Create device resources, set up queues, pble and hmc objects and
  1467. * register the device with the ib verbs interface
  1468. * Return 0 if successful, otherwise return error
  1469. */
  1470. static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
  1471. {
  1472. struct i40iw_device *iwdev;
  1473. struct i40iw_sc_dev *dev;
  1474. enum i40iw_status_code status;
  1475. struct i40iw_handler *hdl;
  1476. hdl = i40iw_find_netdev(ldev->netdev);
  1477. if (hdl)
  1478. return 0;
  1479. hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
  1480. if (!hdl)
  1481. return -ENOMEM;
  1482. iwdev = &hdl->device;
  1483. iwdev->hdl = hdl;
  1484. dev = &iwdev->sc_dev;
  1485. i40iw_setup_cm_core(iwdev);
  1486. dev->back_dev = (void *)iwdev;
  1487. iwdev->ldev = &hdl->ldev;
  1488. iwdev->client = client;
  1489. mutex_init(&iwdev->pbl_mutex);
  1490. i40iw_add_handler(hdl);
  1491. do {
  1492. status = i40iw_setup_init_state(hdl, ldev, client);
  1493. if (status)
  1494. break;
  1495. iwdev->init_state = INITIAL_STATE;
  1496. if (dev->is_pf)
  1497. i40iw_wait_pe_ready(dev->hw);
  1498. status = i40iw_create_cqp(iwdev);
  1499. if (status)
  1500. break;
  1501. iwdev->init_state = CQP_CREATED;
  1502. status = i40iw_hmc_setup(iwdev);
  1503. if (status)
  1504. break;
  1505. status = i40iw_create_ccq(iwdev);
  1506. if (status)
  1507. break;
  1508. iwdev->init_state = CCQ_CREATED;
  1509. status = i40iw_initialize_ilq(iwdev);
  1510. if (status)
  1511. break;
  1512. iwdev->init_state = ILQ_CREATED;
  1513. status = i40iw_initialize_ieq(iwdev);
  1514. if (status)
  1515. break;
  1516. iwdev->init_state = IEQ_CREATED;
  1517. status = i40iw_setup_aeq(iwdev);
  1518. if (status)
  1519. break;
  1520. iwdev->init_state = AEQ_CREATED;
  1521. status = i40iw_setup_ceqs(iwdev, ldev);
  1522. if (status)
  1523. break;
  1524. iwdev->init_state = CEQ_CREATED;
  1525. status = i40iw_initialize_hw_resources(iwdev);
  1526. if (status)
  1527. break;
  1528. i40iw_get_used_rsrc(iwdev);
  1529. dev->ccq_ops->ccq_arm(dev->ccq);
  1530. status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
  1531. if (status)
  1532. break;
  1533. iwdev->init_state = PBLE_CHUNK_MEM;
  1534. iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
  1535. status = i40iw_add_mac_ip(iwdev);
  1536. if (status)
  1537. break;
  1538. iwdev->init_state = IP_ADDR_REGISTERED;
  1539. if (i40iw_register_rdma_device(iwdev)) {
  1540. i40iw_pr_err("register rdma device fail\n");
  1541. break;
  1542. };
  1543. iwdev->init_state = RDMA_DEV_REGISTERED;
  1544. iwdev->iw_status = 1;
  1545. i40iw_port_ibevent(iwdev);
  1546. iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
  1547. if(iwdev->param_wq == NULL)
  1548. break;
  1549. i40iw_pr_info("i40iw_open completed\n");
  1550. return 0;
  1551. } while (0);
  1552. i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
  1553. i40iw_deinit_device(iwdev);
  1554. return -ERESTART;
  1555. }
  1556. /**
  1557. * i40iw_l2params_worker - worker for l2 params change
  1558. * @work: work pointer for l2 params
  1559. */
  1560. static void i40iw_l2params_worker(struct work_struct *work)
  1561. {
  1562. struct l2params_work *dwork =
  1563. container_of(work, struct l2params_work, work);
  1564. struct i40iw_device *iwdev = dwork->iwdev;
  1565. i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
  1566. atomic_dec(&iwdev->params_busy);
  1567. kfree(work);
  1568. }
  1569. /**
  1570. * i40iw_l2param_change - handle qs handles for qos and mss change
  1571. * @ldev: lan device information
  1572. * @client: client for paramater change
  1573. * @params: new parameters from L2
  1574. */
  1575. static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
  1576. struct i40e_params *params)
  1577. {
  1578. struct i40iw_handler *hdl;
  1579. struct i40iw_l2params *l2params;
  1580. struct l2params_work *work;
  1581. struct i40iw_device *iwdev;
  1582. int i;
  1583. hdl = i40iw_find_i40e_handler(ldev);
  1584. if (!hdl)
  1585. return;
  1586. iwdev = &hdl->device;
  1587. if (atomic_read(&iwdev->params_busy))
  1588. return;
  1589. work = kzalloc(sizeof(*work), GFP_KERNEL);
  1590. if (!work)
  1591. return;
  1592. atomic_inc(&iwdev->params_busy);
  1593. work->iwdev = iwdev;
  1594. l2params = &work->l2params;
  1595. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
  1596. l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
  1597. l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
  1598. INIT_WORK(&work->work, i40iw_l2params_worker);
  1599. queue_work(iwdev->param_wq, &work->work);
  1600. }
  1601. /**
  1602. * i40iw_close - client interface operation close for iwarp/uda device
  1603. * @ldev: lan device information
  1604. * @client: client to close
  1605. *
  1606. * Called by the lan driver during the processing of client unregister
  1607. * Destroy and clean up the driver resources
  1608. */
  1609. static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
  1610. {
  1611. struct i40iw_device *iwdev;
  1612. struct i40iw_handler *hdl;
  1613. hdl = i40iw_find_i40e_handler(ldev);
  1614. if (!hdl)
  1615. return;
  1616. iwdev = &hdl->device;
  1617. iwdev->closing = true;
  1618. if (reset)
  1619. iwdev->reset = true;
  1620. i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
  1621. destroy_workqueue(iwdev->virtchnl_wq);
  1622. i40iw_deinit_device(iwdev);
  1623. }
  1624. /**
  1625. * i40iw_vf_reset - process VF reset
  1626. * @ldev: lan device information
  1627. * @client: client interface instance
  1628. * @vf_id: virtual function id
  1629. *
  1630. * Called when a VF is reset by the PF
  1631. * Destroy and clean up the VF resources
  1632. */
  1633. static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
  1634. {
  1635. struct i40iw_handler *hdl;
  1636. struct i40iw_sc_dev *dev;
  1637. struct i40iw_hmc_fcn_info hmc_fcn_info;
  1638. struct i40iw_virt_mem vf_dev_mem;
  1639. struct i40iw_vfdev *tmp_vfdev;
  1640. unsigned int i;
  1641. unsigned long flags;
  1642. struct i40iw_device *iwdev;
  1643. hdl = i40iw_find_i40e_handler(ldev);
  1644. if (!hdl)
  1645. return;
  1646. dev = &hdl->device.sc_dev;
  1647. iwdev = (struct i40iw_device *)dev->back_dev;
  1648. for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
  1649. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
  1650. continue;
  1651. /* free all resources allocated on behalf of vf */
  1652. tmp_vfdev = dev->vf_dev[i];
  1653. spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
  1654. dev->vf_dev[i] = NULL;
  1655. spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
  1656. i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
  1657. /* remove vf hmc function */
  1658. memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
  1659. hmc_fcn_info.vf_id = vf_id;
  1660. hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
  1661. hmc_fcn_info.free_fcn = true;
  1662. i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
  1663. /* free vf_dev */
  1664. vf_dev_mem.va = tmp_vfdev;
  1665. vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
  1666. sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
  1667. i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
  1668. break;
  1669. }
  1670. }
  1671. /**
  1672. * i40iw_vf_enable - enable a number of VFs
  1673. * @ldev: lan device information
  1674. * @client: client interface instance
  1675. * @num_vfs: number of VFs for the PF
  1676. *
  1677. * Called when the number of VFs changes
  1678. */
  1679. static void i40iw_vf_enable(struct i40e_info *ldev,
  1680. struct i40e_client *client,
  1681. u32 num_vfs)
  1682. {
  1683. struct i40iw_handler *hdl;
  1684. hdl = i40iw_find_i40e_handler(ldev);
  1685. if (!hdl)
  1686. return;
  1687. if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
  1688. hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
  1689. else
  1690. hdl->device.max_enabled_vfs = num_vfs;
  1691. }
  1692. /**
  1693. * i40iw_vf_capable - check if VF capable
  1694. * @ldev: lan device information
  1695. * @client: client interface instance
  1696. * @vf_id: virtual function id
  1697. *
  1698. * Return 1 if a VF slot is available or if VF is already RDMA enabled
  1699. * Return 0 otherwise
  1700. */
  1701. static int i40iw_vf_capable(struct i40e_info *ldev,
  1702. struct i40e_client *client,
  1703. u32 vf_id)
  1704. {
  1705. struct i40iw_handler *hdl;
  1706. struct i40iw_sc_dev *dev;
  1707. unsigned int i;
  1708. hdl = i40iw_find_i40e_handler(ldev);
  1709. if (!hdl)
  1710. return 0;
  1711. dev = &hdl->device.sc_dev;
  1712. for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
  1713. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
  1714. return 1;
  1715. }
  1716. return 0;
  1717. }
  1718. /**
  1719. * i40iw_virtchnl_receive - receive a message through the virtual channel
  1720. * @ldev: lan device information
  1721. * @client: client interface instance
  1722. * @vf_id: virtual function id associated with the message
  1723. * @msg: message buffer pointer
  1724. * @len: length of the message
  1725. *
  1726. * Invoke virtual channel receive operation for the given msg
  1727. * Return 0 if successful, otherwise return error
  1728. */
  1729. static int i40iw_virtchnl_receive(struct i40e_info *ldev,
  1730. struct i40e_client *client,
  1731. u32 vf_id,
  1732. u8 *msg,
  1733. u16 len)
  1734. {
  1735. struct i40iw_handler *hdl;
  1736. struct i40iw_sc_dev *dev;
  1737. struct i40iw_device *iwdev;
  1738. int ret_code = I40IW_NOT_SUPPORTED;
  1739. if (!len || !msg)
  1740. return I40IW_ERR_PARAM;
  1741. hdl = i40iw_find_i40e_handler(ldev);
  1742. if (!hdl)
  1743. return I40IW_ERR_PARAM;
  1744. dev = &hdl->device.sc_dev;
  1745. iwdev = dev->back_dev;
  1746. if (dev->vchnl_if.vchnl_recv) {
  1747. ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
  1748. if (!dev->is_pf) {
  1749. atomic_dec(&iwdev->vchnl_msgs);
  1750. wake_up(&iwdev->vchnl_waitq);
  1751. }
  1752. }
  1753. return ret_code;
  1754. }
  1755. /**
  1756. * i40iw_vf_clear_to_send - wait to send virtual channel message
  1757. * @dev: iwarp device *
  1758. * Wait for until virtual channel is clear
  1759. * before sending the next message
  1760. *
  1761. * Returns false if error
  1762. * Returns true if clear to send
  1763. */
  1764. bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
  1765. {
  1766. struct i40iw_device *iwdev;
  1767. wait_queue_entry_t wait;
  1768. iwdev = dev->back_dev;
  1769. if (!wq_has_sleeper(&dev->vf_reqs) &&
  1770. (atomic_read(&iwdev->vchnl_msgs) == 0))
  1771. return true; /* virtual channel is clear */
  1772. init_wait(&wait);
  1773. add_wait_queue_exclusive(&dev->vf_reqs, &wait);
  1774. if (!wait_event_timeout(dev->vf_reqs,
  1775. (atomic_read(&iwdev->vchnl_msgs) == 0),
  1776. I40IW_VCHNL_EVENT_TIMEOUT))
  1777. dev->vchnl_up = false;
  1778. remove_wait_queue(&dev->vf_reqs, &wait);
  1779. return dev->vchnl_up;
  1780. }
  1781. /**
  1782. * i40iw_virtchnl_send - send a message through the virtual channel
  1783. * @dev: iwarp device
  1784. * @vf_id: virtual function id associated with the message
  1785. * @msg: virtual channel message buffer pointer
  1786. * @len: length of the message
  1787. *
  1788. * Invoke virtual channel send operation for the given msg
  1789. * Return 0 if successful, otherwise return error
  1790. */
  1791. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  1792. u32 vf_id,
  1793. u8 *msg,
  1794. u16 len)
  1795. {
  1796. struct i40iw_device *iwdev;
  1797. struct i40e_info *ldev;
  1798. if (!dev || !dev->back_dev)
  1799. return I40IW_ERR_BAD_PTR;
  1800. iwdev = dev->back_dev;
  1801. ldev = iwdev->ldev;
  1802. if (ldev && ldev->ops && ldev->ops->virtchnl_send)
  1803. return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
  1804. return I40IW_ERR_BAD_PTR;
  1805. }
  1806. /* client interface functions */
  1807. static const struct i40e_client_ops i40e_ops = {
  1808. .open = i40iw_open,
  1809. .close = i40iw_close,
  1810. .l2_param_change = i40iw_l2param_change,
  1811. .virtchnl_receive = i40iw_virtchnl_receive,
  1812. .vf_reset = i40iw_vf_reset,
  1813. .vf_enable = i40iw_vf_enable,
  1814. .vf_capable = i40iw_vf_capable
  1815. };
  1816. /**
  1817. * i40iw_init_module - driver initialization function
  1818. *
  1819. * First function to call when the driver is loaded
  1820. * Register the driver as i40e client and port mapper client
  1821. */
  1822. static int __init i40iw_init_module(void)
  1823. {
  1824. int ret;
  1825. memset(&i40iw_client, 0, sizeof(i40iw_client));
  1826. i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
  1827. i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
  1828. i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
  1829. i40iw_client.ops = &i40e_ops;
  1830. memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
  1831. i40iw_client.type = I40E_CLIENT_IWARP;
  1832. spin_lock_init(&i40iw_handler_lock);
  1833. ret = i40e_register_client(&i40iw_client);
  1834. i40iw_register_notifiers();
  1835. return ret;
  1836. }
  1837. /**
  1838. * i40iw_exit_module - driver exit clean up function
  1839. *
  1840. * The function is called just before the driver is unloaded
  1841. * Unregister the driver as i40e client and port mapper client
  1842. */
  1843. static void __exit i40iw_exit_module(void)
  1844. {
  1845. i40iw_unregister_notifiers();
  1846. i40e_unregister_client(&i40iw_client);
  1847. }
  1848. module_init(i40iw_init_module);
  1849. module_exit(i40iw_exit_module);