qedi_main.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727
  1. /*
  2. * QLogic iSCSI Offload Driver
  3. * Copyright (c) 2016 Cavium Inc.
  4. *
  5. * This software is available under the terms of the GNU General Public License
  6. * (GPL) Version 2, available from the file COPYING in the main directory of
  7. * this source tree.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/pci.h>
  11. #include <linux/kernel.h>
  12. #include <linux/if_arp.h>
  13. #include <scsi/iscsi_if.h>
  14. #include <linux/inet.h>
  15. #include <net/arp.h>
  16. #include <linux/list.h>
  17. #include <linux/kthread.h>
  18. #include <linux/mm.h>
  19. #include <linux/if_vlan.h>
  20. #include <linux/cpu.h>
  21. #include <linux/iscsi_boot_sysfs.h>
  22. #include <scsi/scsi_cmnd.h>
  23. #include <scsi/scsi_device.h>
  24. #include <scsi/scsi_eh.h>
  25. #include <scsi/scsi_host.h>
  26. #include <scsi/scsi.h>
  27. #include "qedi.h"
  28. #include "qedi_gbl.h"
  29. #include "qedi_iscsi.h"
  30. static uint qedi_fw_debug;
  31. module_param(qedi_fw_debug, uint, 0644);
  32. MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
  33. uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
  34. module_param(qedi_dbg_log, uint, 0644);
  35. MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
  36. uint qedi_io_tracing;
  37. module_param(qedi_io_tracing, uint, 0644);
  38. MODULE_PARM_DESC(qedi_io_tracing,
  39. " Enable logging of SCSI requests/completions into trace buffer. (default off).");
  40. const struct qed_iscsi_ops *qedi_ops;
  41. static struct scsi_transport_template *qedi_scsi_transport;
  42. static struct pci_driver qedi_pci_driver;
  43. static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
  44. static LIST_HEAD(qedi_udev_list);
  45. /* Static function declaration */
  46. static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
  47. static void qedi_free_global_queues(struct qedi_ctx *qedi);
  48. static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
  49. static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
  50. static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
  51. static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
  52. static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
  53. {
  54. struct qedi_ctx *qedi;
  55. struct qedi_endpoint *qedi_ep;
  56. struct iscsi_eqe_data *data;
  57. int rval = 0;
  58. if (!context || !fw_handle) {
  59. QEDI_ERR(NULL, "Recv event with ctx NULL\n");
  60. return -EINVAL;
  61. }
  62. qedi = (struct qedi_ctx *)context;
  63. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  64. "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
  65. data = (struct iscsi_eqe_data *)fw_handle;
  66. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  67. "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
  68. data->icid, data->conn_id, data->error_code,
  69. data->error_pdu_opcode_reserved);
  70. qedi_ep = qedi->ep_tbl[data->icid];
  71. if (!qedi_ep) {
  72. QEDI_WARN(&qedi->dbg_ctx,
  73. "Cannot process event, ep already disconnected, cid=0x%x\n",
  74. data->icid);
  75. WARN_ON(1);
  76. return -ENODEV;
  77. }
  78. switch (fw_event_code) {
  79. case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
  80. if (qedi_ep->state == EP_STATE_OFLDCONN_START)
  81. qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
  82. wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
  83. break;
  84. case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
  85. qedi_ep->state = EP_STATE_DISCONN_COMPL;
  86. wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
  87. break;
  88. case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
  89. qedi_process_iscsi_error(qedi_ep, data);
  90. break;
  91. case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
  92. case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
  93. case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
  94. case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
  95. case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
  96. case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
  97. case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
  98. qedi_process_tcp_error(qedi_ep, data);
  99. break;
  100. default:
  101. QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
  102. fw_event_code);
  103. }
  104. return rval;
  105. }
  106. static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
  107. {
  108. struct qedi_uio_dev *udev = uinfo->priv;
  109. struct qedi_ctx *qedi = udev->qedi;
  110. if (!capable(CAP_NET_ADMIN))
  111. return -EPERM;
  112. if (udev->uio_dev != -1)
  113. return -EBUSY;
  114. rtnl_lock();
  115. udev->uio_dev = iminor(inode);
  116. qedi_reset_uio_rings(udev);
  117. set_bit(UIO_DEV_OPENED, &qedi->flags);
  118. rtnl_unlock();
  119. return 0;
  120. }
  121. static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
  122. {
  123. struct qedi_uio_dev *udev = uinfo->priv;
  124. struct qedi_ctx *qedi = udev->qedi;
  125. udev->uio_dev = -1;
  126. clear_bit(UIO_DEV_OPENED, &qedi->flags);
  127. qedi_ll2_free_skbs(qedi);
  128. return 0;
  129. }
  130. static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
  131. {
  132. if (udev->uctrl) {
  133. free_page((unsigned long)udev->uctrl);
  134. udev->uctrl = NULL;
  135. }
  136. if (udev->ll2_ring) {
  137. free_page((unsigned long)udev->ll2_ring);
  138. udev->ll2_ring = NULL;
  139. }
  140. if (udev->ll2_buf) {
  141. free_pages((unsigned long)udev->ll2_buf, 2);
  142. udev->ll2_buf = NULL;
  143. }
  144. }
  145. static void __qedi_free_uio(struct qedi_uio_dev *udev)
  146. {
  147. uio_unregister_device(&udev->qedi_uinfo);
  148. __qedi_free_uio_rings(udev);
  149. pci_dev_put(udev->pdev);
  150. kfree(udev);
  151. }
  152. static void qedi_free_uio(struct qedi_uio_dev *udev)
  153. {
  154. if (!udev)
  155. return;
  156. list_del_init(&udev->list);
  157. __qedi_free_uio(udev);
  158. }
  159. static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
  160. {
  161. struct qedi_ctx *qedi = NULL;
  162. struct qedi_uio_ctrl *uctrl = NULL;
  163. qedi = udev->qedi;
  164. uctrl = udev->uctrl;
  165. spin_lock_bh(&qedi->ll2_lock);
  166. uctrl->host_rx_cons = 0;
  167. uctrl->hw_rx_prod = 0;
  168. uctrl->hw_rx_bd_prod = 0;
  169. uctrl->host_rx_bd_cons = 0;
  170. memset(udev->ll2_ring, 0, udev->ll2_ring_size);
  171. memset(udev->ll2_buf, 0, udev->ll2_buf_size);
  172. spin_unlock_bh(&qedi->ll2_lock);
  173. }
  174. static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
  175. {
  176. int rc = 0;
  177. if (udev->ll2_ring || udev->ll2_buf)
  178. return rc;
  179. /* Memory for control area. */
  180. udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
  181. if (!udev->uctrl)
  182. return -ENOMEM;
  183. /* Allocating memory for LL2 ring */
  184. udev->ll2_ring_size = QEDI_PAGE_SIZE;
  185. udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
  186. if (!udev->ll2_ring) {
  187. rc = -ENOMEM;
  188. goto exit_alloc_ring;
  189. }
  190. /* Allocating memory for Tx/Rx pkt buffer */
  191. udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
  192. udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
  193. udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
  194. __GFP_ZERO, 2);
  195. if (!udev->ll2_buf) {
  196. rc = -ENOMEM;
  197. goto exit_alloc_buf;
  198. }
  199. return rc;
  200. exit_alloc_buf:
  201. free_page((unsigned long)udev->ll2_ring);
  202. udev->ll2_ring = NULL;
  203. exit_alloc_ring:
  204. return rc;
  205. }
  206. static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
  207. {
  208. struct qedi_uio_dev *udev = NULL;
  209. int rc = 0;
  210. list_for_each_entry(udev, &qedi_udev_list, list) {
  211. if (udev->pdev == qedi->pdev) {
  212. udev->qedi = qedi;
  213. if (__qedi_alloc_uio_rings(udev)) {
  214. udev->qedi = NULL;
  215. return -ENOMEM;
  216. }
  217. qedi->udev = udev;
  218. return 0;
  219. }
  220. }
  221. udev = kzalloc(sizeof(*udev), GFP_KERNEL);
  222. if (!udev) {
  223. rc = -ENOMEM;
  224. goto err_udev;
  225. }
  226. udev->uio_dev = -1;
  227. udev->qedi = qedi;
  228. udev->pdev = qedi->pdev;
  229. rc = __qedi_alloc_uio_rings(udev);
  230. if (rc)
  231. goto err_uctrl;
  232. list_add(&udev->list, &qedi_udev_list);
  233. pci_dev_get(udev->pdev);
  234. qedi->udev = udev;
  235. udev->tx_pkt = udev->ll2_buf;
  236. udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
  237. return 0;
  238. err_uctrl:
  239. kfree(udev);
  240. err_udev:
  241. return -ENOMEM;
  242. }
  243. static int qedi_init_uio(struct qedi_ctx *qedi)
  244. {
  245. struct qedi_uio_dev *udev = qedi->udev;
  246. struct uio_info *uinfo;
  247. int ret = 0;
  248. if (!udev)
  249. return -ENOMEM;
  250. uinfo = &udev->qedi_uinfo;
  251. uinfo->mem[0].addr = (unsigned long)udev->uctrl;
  252. uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
  253. uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
  254. uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
  255. uinfo->mem[1].size = udev->ll2_ring_size;
  256. uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
  257. uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
  258. uinfo->mem[2].size = udev->ll2_buf_size;
  259. uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
  260. uinfo->name = "qedi_uio";
  261. uinfo->version = QEDI_MODULE_VERSION;
  262. uinfo->irq = UIO_IRQ_CUSTOM;
  263. uinfo->open = qedi_uio_open;
  264. uinfo->release = qedi_uio_close;
  265. if (udev->uio_dev == -1) {
  266. if (!uinfo->priv) {
  267. uinfo->priv = udev;
  268. ret = uio_register_device(&udev->pdev->dev, uinfo);
  269. if (ret) {
  270. QEDI_ERR(&qedi->dbg_ctx,
  271. "UIO registration failed\n");
  272. }
  273. }
  274. }
  275. return ret;
  276. }
  277. static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
  278. struct qed_sb_info *sb_info, u16 sb_id)
  279. {
  280. struct status_block_e4 *sb_virt;
  281. dma_addr_t sb_phys;
  282. int ret;
  283. sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
  284. sizeof(struct status_block_e4), &sb_phys,
  285. GFP_KERNEL);
  286. if (!sb_virt) {
  287. QEDI_ERR(&qedi->dbg_ctx,
  288. "Status block allocation failed for id = %d.\n",
  289. sb_id);
  290. return -ENOMEM;
  291. }
  292. ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
  293. sb_id, QED_SB_TYPE_STORAGE);
  294. if (ret) {
  295. QEDI_ERR(&qedi->dbg_ctx,
  296. "Status block initialization failed for id = %d.\n",
  297. sb_id);
  298. return ret;
  299. }
  300. return 0;
  301. }
  302. static void qedi_free_sb(struct qedi_ctx *qedi)
  303. {
  304. struct qed_sb_info *sb_info;
  305. int id;
  306. for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
  307. sb_info = &qedi->sb_array[id];
  308. if (sb_info->sb_virt)
  309. dma_free_coherent(&qedi->pdev->dev,
  310. sizeof(*sb_info->sb_virt),
  311. (void *)sb_info->sb_virt,
  312. sb_info->sb_phys);
  313. }
  314. }
  315. static void qedi_free_fp(struct qedi_ctx *qedi)
  316. {
  317. kfree(qedi->fp_array);
  318. kfree(qedi->sb_array);
  319. }
  320. static void qedi_destroy_fp(struct qedi_ctx *qedi)
  321. {
  322. qedi_free_sb(qedi);
  323. qedi_free_fp(qedi);
  324. }
  325. static int qedi_alloc_fp(struct qedi_ctx *qedi)
  326. {
  327. int ret = 0;
  328. qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
  329. sizeof(struct qedi_fastpath), GFP_KERNEL);
  330. if (!qedi->fp_array) {
  331. QEDI_ERR(&qedi->dbg_ctx,
  332. "fastpath fp array allocation failed.\n");
  333. return -ENOMEM;
  334. }
  335. qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
  336. sizeof(struct qed_sb_info), GFP_KERNEL);
  337. if (!qedi->sb_array) {
  338. QEDI_ERR(&qedi->dbg_ctx,
  339. "fastpath sb array allocation failed.\n");
  340. ret = -ENOMEM;
  341. goto free_fp;
  342. }
  343. return ret;
  344. free_fp:
  345. qedi_free_fp(qedi);
  346. return ret;
  347. }
  348. static void qedi_int_fp(struct qedi_ctx *qedi)
  349. {
  350. struct qedi_fastpath *fp;
  351. int id;
  352. memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
  353. sizeof(*qedi->fp_array));
  354. memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
  355. sizeof(*qedi->sb_array));
  356. for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
  357. fp = &qedi->fp_array[id];
  358. fp->sb_info = &qedi->sb_array[id];
  359. fp->sb_id = id;
  360. fp->qedi = qedi;
  361. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  362. "qedi", id);
  363. /* fp_array[i] ---- irq cookie
  364. * So init data which is needed in int ctx
  365. */
  366. }
  367. }
  368. static int qedi_prepare_fp(struct qedi_ctx *qedi)
  369. {
  370. struct qedi_fastpath *fp;
  371. int id, ret = 0;
  372. ret = qedi_alloc_fp(qedi);
  373. if (ret)
  374. goto err;
  375. qedi_int_fp(qedi);
  376. for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
  377. fp = &qedi->fp_array[id];
  378. ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
  379. if (ret) {
  380. QEDI_ERR(&qedi->dbg_ctx,
  381. "SB allocation and initialization failed.\n");
  382. ret = -EIO;
  383. goto err_init;
  384. }
  385. }
  386. return 0;
  387. err_init:
  388. qedi_free_sb(qedi);
  389. qedi_free_fp(qedi);
  390. err:
  391. return ret;
  392. }
  393. static int qedi_setup_cid_que(struct qedi_ctx *qedi)
  394. {
  395. int i;
  396. qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
  397. sizeof(u32), GFP_KERNEL);
  398. if (!qedi->cid_que.cid_que_base)
  399. return -ENOMEM;
  400. qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
  401. sizeof(struct qedi_conn *),
  402. GFP_KERNEL);
  403. if (!qedi->cid_que.conn_cid_tbl) {
  404. kfree(qedi->cid_que.cid_que_base);
  405. qedi->cid_que.cid_que_base = NULL;
  406. return -ENOMEM;
  407. }
  408. qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
  409. qedi->cid_que.cid_q_prod_idx = 0;
  410. qedi->cid_que.cid_q_cons_idx = 0;
  411. qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
  412. qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
  413. for (i = 0; i < qedi->max_active_conns; i++) {
  414. qedi->cid_que.cid_que[i] = i;
  415. qedi->cid_que.conn_cid_tbl[i] = NULL;
  416. }
  417. return 0;
  418. }
  419. static void qedi_release_cid_que(struct qedi_ctx *qedi)
  420. {
  421. kfree(qedi->cid_que.cid_que_base);
  422. qedi->cid_que.cid_que_base = NULL;
  423. kfree(qedi->cid_que.conn_cid_tbl);
  424. qedi->cid_que.conn_cid_tbl = NULL;
  425. }
  426. static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
  427. u16 start_id, u16 next)
  428. {
  429. id_tbl->start = start_id;
  430. id_tbl->max = size;
  431. id_tbl->next = next;
  432. spin_lock_init(&id_tbl->lock);
  433. id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
  434. if (!id_tbl->table)
  435. return -ENOMEM;
  436. return 0;
  437. }
  438. static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
  439. {
  440. kfree(id_tbl->table);
  441. id_tbl->table = NULL;
  442. }
  443. int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
  444. {
  445. int ret = -1;
  446. id -= id_tbl->start;
  447. if (id >= id_tbl->max)
  448. return ret;
  449. spin_lock(&id_tbl->lock);
  450. if (!test_bit(id, id_tbl->table)) {
  451. set_bit(id, id_tbl->table);
  452. ret = 0;
  453. }
  454. spin_unlock(&id_tbl->lock);
  455. return ret;
  456. }
  457. u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
  458. {
  459. u16 id;
  460. spin_lock(&id_tbl->lock);
  461. id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
  462. if (id >= id_tbl->max) {
  463. id = QEDI_LOCAL_PORT_INVALID;
  464. if (id_tbl->next != 0) {
  465. id = find_first_zero_bit(id_tbl->table, id_tbl->next);
  466. if (id >= id_tbl->next)
  467. id = QEDI_LOCAL_PORT_INVALID;
  468. }
  469. }
  470. if (id < id_tbl->max) {
  471. set_bit(id, id_tbl->table);
  472. id_tbl->next = (id + 1) & (id_tbl->max - 1);
  473. id += id_tbl->start;
  474. }
  475. spin_unlock(&id_tbl->lock);
  476. return id;
  477. }
  478. void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
  479. {
  480. if (id == QEDI_LOCAL_PORT_INVALID)
  481. return;
  482. id -= id_tbl->start;
  483. if (id >= id_tbl->max)
  484. return;
  485. clear_bit(id, id_tbl->table);
  486. }
  487. static void qedi_cm_free_mem(struct qedi_ctx *qedi)
  488. {
  489. kfree(qedi->ep_tbl);
  490. qedi->ep_tbl = NULL;
  491. qedi_free_id_tbl(&qedi->lcl_port_tbl);
  492. }
  493. static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
  494. {
  495. u16 port_id;
  496. qedi->ep_tbl = kzalloc((qedi->max_active_conns *
  497. sizeof(struct qedi_endpoint *)), GFP_KERNEL);
  498. if (!qedi->ep_tbl)
  499. return -ENOMEM;
  500. port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
  501. if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
  502. QEDI_LOCAL_PORT_MIN, port_id)) {
  503. qedi_cm_free_mem(qedi);
  504. return -ENOMEM;
  505. }
  506. return 0;
  507. }
  508. static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
  509. {
  510. struct Scsi_Host *shost;
  511. struct qedi_ctx *qedi = NULL;
  512. shost = iscsi_host_alloc(&qedi_host_template,
  513. sizeof(struct qedi_ctx), 0);
  514. if (!shost) {
  515. QEDI_ERR(NULL, "Could not allocate shost\n");
  516. goto exit_setup_shost;
  517. }
  518. shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
  519. shost->max_channel = 0;
  520. shost->max_lun = ~0;
  521. shost->max_cmd_len = 16;
  522. shost->transportt = qedi_scsi_transport;
  523. qedi = iscsi_host_priv(shost);
  524. memset(qedi, 0, sizeof(*qedi));
  525. qedi->shost = shost;
  526. qedi->dbg_ctx.host_no = shost->host_no;
  527. qedi->pdev = pdev;
  528. qedi->dbg_ctx.pdev = pdev;
  529. qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
  530. qedi->max_sqes = QEDI_SQ_SIZE;
  531. if (shost_use_blk_mq(shost))
  532. shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
  533. pci_set_drvdata(pdev, qedi);
  534. exit_setup_shost:
  535. return qedi;
  536. }
  537. static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
  538. {
  539. struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
  540. struct qedi_uio_dev *udev;
  541. struct qedi_uio_ctrl *uctrl;
  542. struct skb_work_list *work;
  543. u32 prod;
  544. if (!qedi) {
  545. QEDI_ERR(NULL, "qedi is NULL\n");
  546. return -1;
  547. }
  548. if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
  549. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
  550. "UIO DEV is not opened\n");
  551. kfree_skb(skb);
  552. return 0;
  553. }
  554. udev = qedi->udev;
  555. uctrl = udev->uctrl;
  556. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  557. if (!work) {
  558. QEDI_WARN(&qedi->dbg_ctx,
  559. "Could not allocate work so dropping frame.\n");
  560. kfree_skb(skb);
  561. return 0;
  562. }
  563. INIT_LIST_HEAD(&work->list);
  564. work->skb = skb;
  565. if (skb_vlan_tag_present(skb))
  566. work->vlan_id = skb_vlan_tag_get(skb);
  567. if (work->vlan_id)
  568. __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
  569. spin_lock_bh(&qedi->ll2_lock);
  570. list_add_tail(&work->list, &qedi->ll2_skb_list);
  571. ++uctrl->hw_rx_prod_cnt;
  572. prod = (uctrl->hw_rx_prod + 1) % RX_RING;
  573. if (prod != uctrl->host_rx_cons) {
  574. uctrl->hw_rx_prod = prod;
  575. spin_unlock_bh(&qedi->ll2_lock);
  576. wake_up_process(qedi->ll2_recv_thread);
  577. return 0;
  578. }
  579. spin_unlock_bh(&qedi->ll2_lock);
  580. return 0;
  581. }
  582. /* map this skb to iscsiuio mmaped region */
  583. static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
  584. u16 vlan_id)
  585. {
  586. struct qedi_uio_dev *udev = NULL;
  587. struct qedi_uio_ctrl *uctrl = NULL;
  588. struct qedi_rx_bd rxbd;
  589. struct qedi_rx_bd *p_rxbd;
  590. u32 rx_bd_prod;
  591. void *pkt;
  592. int len = 0;
  593. if (!qedi) {
  594. QEDI_ERR(NULL, "qedi is NULL\n");
  595. return -1;
  596. }
  597. udev = qedi->udev;
  598. uctrl = udev->uctrl;
  599. pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
  600. len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
  601. memcpy(pkt, skb->data, len);
  602. memset(&rxbd, 0, sizeof(rxbd));
  603. rxbd.rx_pkt_index = uctrl->hw_rx_prod;
  604. rxbd.rx_pkt_len = len;
  605. rxbd.vlan_id = vlan_id;
  606. uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
  607. rx_bd_prod = uctrl->hw_rx_bd_prod;
  608. p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
  609. p_rxbd += rx_bd_prod;
  610. memcpy(p_rxbd, &rxbd, sizeof(rxbd));
  611. /* notify the iscsiuio about new packet */
  612. uio_event_notify(&udev->qedi_uinfo);
  613. return 0;
  614. }
  615. static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
  616. {
  617. struct skb_work_list *work, *work_tmp;
  618. spin_lock_bh(&qedi->ll2_lock);
  619. list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
  620. list_del(&work->list);
  621. if (work->skb)
  622. kfree_skb(work->skb);
  623. kfree(work);
  624. }
  625. spin_unlock_bh(&qedi->ll2_lock);
  626. }
  627. static int qedi_ll2_recv_thread(void *arg)
  628. {
  629. struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
  630. struct skb_work_list *work, *work_tmp;
  631. set_user_nice(current, -20);
  632. while (!kthread_should_stop()) {
  633. spin_lock_bh(&qedi->ll2_lock);
  634. list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
  635. list) {
  636. list_del(&work->list);
  637. qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
  638. kfree_skb(work->skb);
  639. kfree(work);
  640. }
  641. set_current_state(TASK_INTERRUPTIBLE);
  642. spin_unlock_bh(&qedi->ll2_lock);
  643. schedule();
  644. }
  645. __set_current_state(TASK_RUNNING);
  646. return 0;
  647. }
  648. static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
  649. {
  650. u8 num_sq_pages;
  651. u32 log_page_size;
  652. int rval = 0;
  653. num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
  654. qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
  655. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  656. "Number of CQ count is %d\n", qedi->num_queues);
  657. memset(&qedi->pf_params.iscsi_pf_params, 0,
  658. sizeof(qedi->pf_params.iscsi_pf_params));
  659. qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
  660. qedi->num_queues * sizeof(struct qedi_glbl_q_params),
  661. &qedi->hw_p_cpuq);
  662. if (!qedi->p_cpuq) {
  663. QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
  664. rval = -1;
  665. goto err_alloc_mem;
  666. }
  667. rval = qedi_alloc_global_queues(qedi);
  668. if (rval) {
  669. QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
  670. rval = -1;
  671. goto err_alloc_mem;
  672. }
  673. qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
  674. qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
  675. qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
  676. qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
  677. qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
  678. qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
  679. qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
  680. qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
  681. qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
  682. qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
  683. for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
  684. if ((1 << log_page_size) == PAGE_SIZE)
  685. break;
  686. }
  687. qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
  688. qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
  689. (u64)qedi->hw_p_cpuq;
  690. /* RQ BDQ initializations.
  691. * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
  692. * rqe_log_size: 8 for 256B RQE
  693. */
  694. qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
  695. /* BDQ address and size */
  696. qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
  697. qedi->bdq_pbl_list_dma;
  698. qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
  699. qedi->bdq_pbl_list_num_entries;
  700. qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
  701. /* cq_num_entries: num_tasks + rq_num_entries */
  702. qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
  703. qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
  704. qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
  705. err_alloc_mem:
  706. return rval;
  707. }
  708. /* Free DMA coherent memory for array of queue pointers we pass to qed */
  709. static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
  710. {
  711. size_t size = 0;
  712. if (qedi->p_cpuq) {
  713. size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
  714. pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
  715. qedi->hw_p_cpuq);
  716. }
  717. qedi_free_global_queues(qedi);
  718. kfree(qedi->global_queues);
  719. }
  720. static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
  721. struct qedi_boot_target *tgt, u8 index)
  722. {
  723. u32 ipv6_en;
  724. ipv6_en = !!(block->generic.ctrl_flags &
  725. NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
  726. snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
  727. block->target[index].target_name.byte);
  728. tgt->ipv6_en = ipv6_en;
  729. if (ipv6_en)
  730. snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
  731. block->target[index].ipv6_addr.byte);
  732. else
  733. snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
  734. block->target[index].ipv4_addr.byte);
  735. }
  736. static int qedi_find_boot_info(struct qedi_ctx *qedi,
  737. struct qed_mfw_tlv_iscsi *iscsi,
  738. struct nvm_iscsi_block *block)
  739. {
  740. struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL;
  741. u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0;
  742. struct iscsi_cls_session *cls_sess;
  743. struct iscsi_cls_conn *cls_conn;
  744. struct qedi_conn *qedi_conn;
  745. struct iscsi_session *sess;
  746. struct iscsi_conn *conn;
  747. char ep_ip_addr[64];
  748. int i, ret = 0;
  749. pri_ctrl_flags = !!(block->target[0].ctrl_flags &
  750. NVM_ISCSI_CFG_TARGET_ENABLED);
  751. if (pri_ctrl_flags) {
  752. pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL);
  753. if (!pri_tgt)
  754. return -1;
  755. qedi_get_boot_tgt_info(block, pri_tgt, 0);
  756. }
  757. sec_ctrl_flags = !!(block->target[1].ctrl_flags &
  758. NVM_ISCSI_CFG_TARGET_ENABLED);
  759. if (sec_ctrl_flags) {
  760. sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL);
  761. if (!sec_tgt) {
  762. ret = -1;
  763. goto free_tgt;
  764. }
  765. qedi_get_boot_tgt_info(block, sec_tgt, 1);
  766. }
  767. for (i = 0; i < qedi->max_active_conns; i++) {
  768. qedi_conn = qedi_get_conn_from_id(qedi, i);
  769. if (!qedi_conn)
  770. continue;
  771. if (qedi_conn->ep->ip_type == TCP_IPV4)
  772. snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n",
  773. qedi_conn->ep->dst_addr);
  774. else
  775. snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n",
  776. qedi_conn->ep->dst_addr);
  777. cls_conn = qedi_conn->cls_conn;
  778. conn = cls_conn->dd_data;
  779. cls_sess = iscsi_conn_to_session(cls_conn);
  780. sess = cls_sess->dd_data;
  781. if (!iscsi_is_session_online(cls_sess))
  782. continue;
  783. if (!sess->targetname)
  784. continue;
  785. if (pri_ctrl_flags) {
  786. if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
  787. !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
  788. found = 1;
  789. break;
  790. }
  791. }
  792. if (sec_ctrl_flags) {
  793. if (!strcmp(sec_tgt->iscsi_name, sess->targetname) &&
  794. !strcmp(sec_tgt->ip_addr, ep_ip_addr)) {
  795. found = 1;
  796. break;
  797. }
  798. }
  799. }
  800. if (found) {
  801. if (conn->hdrdgst_en) {
  802. iscsi->header_digest_set = true;
  803. iscsi->header_digest = 1;
  804. }
  805. if (conn->datadgst_en) {
  806. iscsi->data_digest_set = true;
  807. iscsi->data_digest = 1;
  808. }
  809. iscsi->boot_taget_portal_set = true;
  810. iscsi->boot_taget_portal = sess->tpgt;
  811. } else {
  812. ret = -1;
  813. }
  814. if (sec_ctrl_flags)
  815. kfree(sec_tgt);
  816. free_tgt:
  817. if (pri_ctrl_flags)
  818. kfree(pri_tgt);
  819. return ret;
  820. }
  821. static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
  822. {
  823. struct qedi_ctx *qedi;
  824. if (!dev) {
  825. QEDI_INFO(NULL, QEDI_LOG_EVT,
  826. "dev is NULL so ignoring get_generic_tlv_data request.\n");
  827. return;
  828. }
  829. qedi = (struct qedi_ctx *)dev;
  830. memset(data, 0, sizeof(struct qed_generic_tlvs));
  831. ether_addr_copy(data->mac[0], qedi->mac);
  832. }
  833. /*
  834. * Protocol TLV handler
  835. */
  836. static void qedi_get_protocol_tlv_data(void *dev, void *data)
  837. {
  838. struct qed_mfw_tlv_iscsi *iscsi = data;
  839. struct qed_iscsi_stats *fw_iscsi_stats;
  840. struct nvm_iscsi_block *block = NULL;
  841. u32 chap_en = 0, mchap_en = 0;
  842. struct qedi_ctx *qedi = dev;
  843. int rval = 0;
  844. fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL);
  845. if (!fw_iscsi_stats) {
  846. QEDI_ERR(&qedi->dbg_ctx,
  847. "Could not allocate memory for fw_iscsi_stats.\n");
  848. goto exit_get_data;
  849. }
  850. mutex_lock(&qedi->stats_lock);
  851. /* Query firmware for offload stats */
  852. qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats);
  853. mutex_unlock(&qedi->stats_lock);
  854. iscsi->rx_frames_set = true;
  855. iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt;
  856. iscsi->rx_bytes_set = true;
  857. iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt;
  858. iscsi->tx_frames_set = true;
  859. iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt;
  860. iscsi->tx_bytes_set = true;
  861. iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt;
  862. iscsi->frame_size_set = true;
  863. iscsi->frame_size = qedi->ll2_mtu;
  864. block = qedi_get_nvram_block(qedi);
  865. if (block) {
  866. chap_en = !!(block->generic.ctrl_flags &
  867. NVM_ISCSI_CFG_GEN_CHAP_ENABLED);
  868. mchap_en = !!(block->generic.ctrl_flags &
  869. NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED);
  870. iscsi->auth_method_set = (chap_en || mchap_en) ? true : false;
  871. iscsi->auth_method = 1;
  872. if (chap_en)
  873. iscsi->auth_method = 2;
  874. if (mchap_en)
  875. iscsi->auth_method = 3;
  876. iscsi->tx_desc_size_set = true;
  877. iscsi->tx_desc_size = QEDI_SQ_SIZE;
  878. iscsi->rx_desc_size_set = true;
  879. iscsi->rx_desc_size = QEDI_CQ_SIZE;
  880. /* tpgt, hdr digest, data digest */
  881. rval = qedi_find_boot_info(qedi, iscsi, block);
  882. if (rval)
  883. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  884. "Boot target not set");
  885. }
  886. kfree(fw_iscsi_stats);
  887. exit_get_data:
  888. return;
  889. }
  890. static void qedi_link_update(void *dev, struct qed_link_output *link)
  891. {
  892. struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
  893. if (link->link_up) {
  894. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
  895. atomic_set(&qedi->link_state, QEDI_LINK_UP);
  896. } else {
  897. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  898. "Link Down event.\n");
  899. atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
  900. }
  901. }
  902. static struct qed_iscsi_cb_ops qedi_cb_ops = {
  903. {
  904. .link_update = qedi_link_update,
  905. .get_protocol_tlv_data = qedi_get_protocol_tlv_data,
  906. .get_generic_tlv_data = qedi_get_generic_tlv_data,
  907. }
  908. };
  909. static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
  910. u16 que_idx, struct qedi_percpu_s *p)
  911. {
  912. struct qedi_work *qedi_work;
  913. struct qedi_conn *q_conn;
  914. struct iscsi_conn *conn;
  915. struct qedi_cmd *qedi_cmd;
  916. u32 iscsi_cid;
  917. int rc = 0;
  918. iscsi_cid = cqe->cqe_common.conn_id;
  919. q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  920. if (!q_conn) {
  921. QEDI_WARN(&qedi->dbg_ctx,
  922. "Session no longer exists for cid=0x%x!!\n",
  923. iscsi_cid);
  924. return -1;
  925. }
  926. conn = q_conn->cls_conn->dd_data;
  927. switch (cqe->cqe_common.cqe_type) {
  928. case ISCSI_CQE_TYPE_SOLICITED:
  929. case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
  930. qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
  931. if (!qedi_cmd) {
  932. rc = -1;
  933. break;
  934. }
  935. INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
  936. qedi_cmd->cqe_work.qedi = qedi;
  937. memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
  938. qedi_cmd->cqe_work.que_idx = que_idx;
  939. qedi_cmd->cqe_work.is_solicited = true;
  940. list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
  941. break;
  942. case ISCSI_CQE_TYPE_UNSOLICITED:
  943. case ISCSI_CQE_TYPE_DUMMY:
  944. case ISCSI_CQE_TYPE_TASK_CLEANUP:
  945. qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
  946. if (!qedi_work) {
  947. rc = -1;
  948. break;
  949. }
  950. INIT_LIST_HEAD(&qedi_work->list);
  951. qedi_work->qedi = qedi;
  952. memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
  953. qedi_work->que_idx = que_idx;
  954. qedi_work->is_solicited = false;
  955. list_add_tail(&qedi_work->list, &p->work_list);
  956. break;
  957. default:
  958. rc = -1;
  959. QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
  960. }
  961. return rc;
  962. }
  963. static bool qedi_process_completions(struct qedi_fastpath *fp)
  964. {
  965. struct qedi_ctx *qedi = fp->qedi;
  966. struct qed_sb_info *sb_info = fp->sb_info;
  967. struct status_block_e4 *sb = sb_info->sb_virt;
  968. struct qedi_percpu_s *p = NULL;
  969. struct global_queue *que;
  970. u16 prod_idx;
  971. unsigned long flags;
  972. union iscsi_cqe *cqe;
  973. int cpu;
  974. int ret;
  975. /* Get the current firmware producer index */
  976. prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
  977. if (prod_idx >= QEDI_CQ_SIZE)
  978. prod_idx = prod_idx % QEDI_CQ_SIZE;
  979. que = qedi->global_queues[fp->sb_id];
  980. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  981. "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
  982. que, prod_idx, que->cq_cons_idx, fp->sb_id);
  983. qedi->intr_cpu = fp->sb_id;
  984. cpu = smp_processor_id();
  985. p = &per_cpu(qedi_percpu, cpu);
  986. if (unlikely(!p->iothread))
  987. WARN_ON(1);
  988. spin_lock_irqsave(&p->p_work_lock, flags);
  989. while (que->cq_cons_idx != prod_idx) {
  990. cqe = &que->cq[que->cq_cons_idx];
  991. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  992. "cqe=%p prod_idx=%d cons_idx=%d.\n",
  993. cqe, prod_idx, que->cq_cons_idx);
  994. ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
  995. if (ret)
  996. QEDI_WARN(&qedi->dbg_ctx,
  997. "Dropping CQE 0x%x for cid=0x%x.\n",
  998. que->cq_cons_idx, cqe->cqe_common.conn_id);
  999. que->cq_cons_idx++;
  1000. if (que->cq_cons_idx == QEDI_CQ_SIZE)
  1001. que->cq_cons_idx = 0;
  1002. }
  1003. wake_up_process(p->iothread);
  1004. spin_unlock_irqrestore(&p->p_work_lock, flags);
  1005. return true;
  1006. }
  1007. static bool qedi_fp_has_work(struct qedi_fastpath *fp)
  1008. {
  1009. struct qedi_ctx *qedi = fp->qedi;
  1010. struct global_queue *que;
  1011. struct qed_sb_info *sb_info = fp->sb_info;
  1012. struct status_block_e4 *sb = sb_info->sb_virt;
  1013. u16 prod_idx;
  1014. barrier();
  1015. /* Get the current firmware producer index */
  1016. prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
  1017. /* Get the pointer to the global CQ this completion is on */
  1018. que = qedi->global_queues[fp->sb_id];
  1019. /* prod idx wrap around uint16 */
  1020. if (prod_idx >= QEDI_CQ_SIZE)
  1021. prod_idx = prod_idx % QEDI_CQ_SIZE;
  1022. return (que->cq_cons_idx != prod_idx);
  1023. }
  1024. /* MSI-X fastpath handler code */
  1025. static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
  1026. {
  1027. struct qedi_fastpath *fp = dev_id;
  1028. struct qedi_ctx *qedi = fp->qedi;
  1029. bool wake_io_thread = true;
  1030. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
  1031. process_again:
  1032. wake_io_thread = qedi_process_completions(fp);
  1033. if (wake_io_thread) {
  1034. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  1035. "process already running\n");
  1036. }
  1037. if (qedi_fp_has_work(fp) == 0)
  1038. qed_sb_update_sb_idx(fp->sb_info);
  1039. /* Check for more work */
  1040. rmb();
  1041. if (qedi_fp_has_work(fp) == 0)
  1042. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
  1043. else
  1044. goto process_again;
  1045. return IRQ_HANDLED;
  1046. }
  1047. /* simd handler for MSI/INTa */
  1048. static void qedi_simd_int_handler(void *cookie)
  1049. {
  1050. /* Cookie is qedi_ctx struct */
  1051. struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
  1052. QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
  1053. }
  1054. #define QEDI_SIMD_HANDLER_NUM 0
  1055. static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
  1056. {
  1057. int i;
  1058. if (qedi->int_info.msix_cnt) {
  1059. for (i = 0; i < qedi->int_info.used_cnt; i++) {
  1060. synchronize_irq(qedi->int_info.msix[i].vector);
  1061. irq_set_affinity_hint(qedi->int_info.msix[i].vector,
  1062. NULL);
  1063. free_irq(qedi->int_info.msix[i].vector,
  1064. &qedi->fp_array[i]);
  1065. }
  1066. } else {
  1067. qedi_ops->common->simd_handler_clean(qedi->cdev,
  1068. QEDI_SIMD_HANDLER_NUM);
  1069. }
  1070. qedi->int_info.used_cnt = 0;
  1071. qedi_ops->common->set_fp_int(qedi->cdev, 0);
  1072. }
  1073. static int qedi_request_msix_irq(struct qedi_ctx *qedi)
  1074. {
  1075. int i, rc, cpu;
  1076. cpu = cpumask_first(cpu_online_mask);
  1077. for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
  1078. rc = request_irq(qedi->int_info.msix[i].vector,
  1079. qedi_msix_handler, 0, "qedi",
  1080. &qedi->fp_array[i]);
  1081. if (rc) {
  1082. QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
  1083. qedi_sync_free_irqs(qedi);
  1084. return rc;
  1085. }
  1086. qedi->int_info.used_cnt++;
  1087. rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
  1088. get_cpu_mask(cpu));
  1089. cpu = cpumask_next(cpu, cpu_online_mask);
  1090. }
  1091. return 0;
  1092. }
  1093. static int qedi_setup_int(struct qedi_ctx *qedi)
  1094. {
  1095. int rc = 0;
  1096. rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
  1097. rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
  1098. if (rc)
  1099. goto exit_setup_int;
  1100. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  1101. "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
  1102. qedi->int_info.msix_cnt, num_online_cpus());
  1103. if (qedi->int_info.msix_cnt) {
  1104. rc = qedi_request_msix_irq(qedi);
  1105. goto exit_setup_int;
  1106. } else {
  1107. qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
  1108. QEDI_SIMD_HANDLER_NUM,
  1109. qedi_simd_int_handler);
  1110. qedi->int_info.used_cnt = 1;
  1111. }
  1112. exit_setup_int:
  1113. return rc;
  1114. }
  1115. static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
  1116. {
  1117. if (qedi->iscsi_image)
  1118. dma_free_coherent(&qedi->pdev->dev,
  1119. sizeof(struct qedi_nvm_iscsi_image),
  1120. qedi->iscsi_image, qedi->nvm_buf_dma);
  1121. }
  1122. static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
  1123. {
  1124. struct qedi_nvm_iscsi_image nvm_image;
  1125. qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
  1126. sizeof(nvm_image),
  1127. &qedi->nvm_buf_dma,
  1128. GFP_KERNEL);
  1129. if (!qedi->iscsi_image) {
  1130. QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
  1131. return -ENOMEM;
  1132. }
  1133. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1134. "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
  1135. qedi->nvm_buf_dma);
  1136. return 0;
  1137. }
  1138. static void qedi_free_bdq(struct qedi_ctx *qedi)
  1139. {
  1140. int i;
  1141. if (qedi->bdq_pbl_list)
  1142. dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
  1143. qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
  1144. if (qedi->bdq_pbl)
  1145. dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
  1146. qedi->bdq_pbl, qedi->bdq_pbl_dma);
  1147. for (i = 0; i < QEDI_BDQ_NUM; i++) {
  1148. if (qedi->bdq[i].buf_addr) {
  1149. dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
  1150. qedi->bdq[i].buf_addr,
  1151. qedi->bdq[i].buf_dma);
  1152. }
  1153. }
  1154. }
  1155. static void qedi_free_global_queues(struct qedi_ctx *qedi)
  1156. {
  1157. int i;
  1158. struct global_queue **gl = qedi->global_queues;
  1159. for (i = 0; i < qedi->num_queues; i++) {
  1160. if (!gl[i])
  1161. continue;
  1162. if (gl[i]->cq)
  1163. dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
  1164. gl[i]->cq, gl[i]->cq_dma);
  1165. if (gl[i]->cq_pbl)
  1166. dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
  1167. gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
  1168. kfree(gl[i]);
  1169. }
  1170. qedi_free_bdq(qedi);
  1171. qedi_free_nvm_iscsi_cfg(qedi);
  1172. }
  1173. static int qedi_alloc_bdq(struct qedi_ctx *qedi)
  1174. {
  1175. int i;
  1176. struct scsi_bd *pbl;
  1177. u64 *list;
  1178. dma_addr_t page;
  1179. /* Alloc dma memory for BDQ buffers */
  1180. for (i = 0; i < QEDI_BDQ_NUM; i++) {
  1181. qedi->bdq[i].buf_addr =
  1182. dma_alloc_coherent(&qedi->pdev->dev,
  1183. QEDI_BDQ_BUF_SIZE,
  1184. &qedi->bdq[i].buf_dma,
  1185. GFP_KERNEL);
  1186. if (!qedi->bdq[i].buf_addr) {
  1187. QEDI_ERR(&qedi->dbg_ctx,
  1188. "Could not allocate BDQ buffer %d.\n", i);
  1189. return -ENOMEM;
  1190. }
  1191. }
  1192. /* Alloc dma memory for BDQ page buffer list */
  1193. qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
  1194. qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
  1195. qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
  1196. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
  1197. qedi->rq_num_entries);
  1198. qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
  1199. qedi->bdq_pbl_mem_size,
  1200. &qedi->bdq_pbl_dma, GFP_KERNEL);
  1201. if (!qedi->bdq_pbl) {
  1202. QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
  1203. return -ENOMEM;
  1204. }
  1205. /*
  1206. * Populate BDQ PBL with physical and virtual address of individual
  1207. * BDQ buffers
  1208. */
  1209. pbl = (struct scsi_bd *)qedi->bdq_pbl;
  1210. for (i = 0; i < QEDI_BDQ_NUM; i++) {
  1211. pbl->address.hi =
  1212. cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
  1213. pbl->address.lo =
  1214. cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
  1215. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1216. "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
  1217. pbl, pbl->address.hi, pbl->address.lo, i);
  1218. pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
  1219. pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
  1220. pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
  1221. pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
  1222. pbl++;
  1223. }
  1224. /* Allocate list of PBL pages */
  1225. qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
  1226. &qedi->bdq_pbl_list_dma,
  1227. GFP_KERNEL);
  1228. if (!qedi->bdq_pbl_list) {
  1229. QEDI_ERR(&qedi->dbg_ctx,
  1230. "Could not allocate list of PBL pages.\n");
  1231. return -ENOMEM;
  1232. }
  1233. /*
  1234. * Now populate PBL list with pages that contain pointers to the
  1235. * individual buffers.
  1236. */
  1237. qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
  1238. list = (u64 *)qedi->bdq_pbl_list;
  1239. page = qedi->bdq_pbl_list_dma;
  1240. for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
  1241. *list = qedi->bdq_pbl_dma;
  1242. list++;
  1243. page += PAGE_SIZE;
  1244. }
  1245. return 0;
  1246. }
  1247. static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
  1248. {
  1249. u32 *list;
  1250. int i;
  1251. int status = 0, rc;
  1252. u32 *pbl;
  1253. dma_addr_t page;
  1254. int num_pages;
  1255. /*
  1256. * Number of global queues (CQ / RQ). This should
  1257. * be <= number of available MSIX vectors for the PF
  1258. */
  1259. if (!qedi->num_queues) {
  1260. QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
  1261. return 1;
  1262. }
  1263. /* Make sure we allocated the PBL that will contain the physical
  1264. * addresses of our queues
  1265. */
  1266. if (!qedi->p_cpuq) {
  1267. status = 1;
  1268. goto mem_alloc_failure;
  1269. }
  1270. qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
  1271. qedi->num_queues), GFP_KERNEL);
  1272. if (!qedi->global_queues) {
  1273. QEDI_ERR(&qedi->dbg_ctx,
  1274. "Unable to allocate global queues array ptr memory\n");
  1275. return -ENOMEM;
  1276. }
  1277. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  1278. "qedi->global_queues=%p.\n", qedi->global_queues);
  1279. /* Allocate DMA coherent buffers for BDQ */
  1280. rc = qedi_alloc_bdq(qedi);
  1281. if (rc)
  1282. goto mem_alloc_failure;
  1283. /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
  1284. rc = qedi_alloc_nvm_iscsi_cfg(qedi);
  1285. if (rc)
  1286. goto mem_alloc_failure;
  1287. /* Allocate a CQ and an associated PBL for each MSI-X
  1288. * vector.
  1289. */
  1290. for (i = 0; i < qedi->num_queues; i++) {
  1291. qedi->global_queues[i] =
  1292. kzalloc(sizeof(*qedi->global_queues[0]),
  1293. GFP_KERNEL);
  1294. if (!qedi->global_queues[i]) {
  1295. QEDI_ERR(&qedi->dbg_ctx,
  1296. "Unable to allocation global queue %d.\n", i);
  1297. status = -ENOMEM;
  1298. goto mem_alloc_failure;
  1299. }
  1300. qedi->global_queues[i]->cq_mem_size =
  1301. (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
  1302. qedi->global_queues[i]->cq_mem_size =
  1303. (qedi->global_queues[i]->cq_mem_size +
  1304. (QEDI_PAGE_SIZE - 1));
  1305. qedi->global_queues[i]->cq_pbl_size =
  1306. (qedi->global_queues[i]->cq_mem_size /
  1307. QEDI_PAGE_SIZE) * sizeof(void *);
  1308. qedi->global_queues[i]->cq_pbl_size =
  1309. (qedi->global_queues[i]->cq_pbl_size +
  1310. (QEDI_PAGE_SIZE - 1));
  1311. qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
  1312. qedi->global_queues[i]->cq_mem_size,
  1313. &qedi->global_queues[i]->cq_dma,
  1314. GFP_KERNEL);
  1315. if (!qedi->global_queues[i]->cq) {
  1316. QEDI_WARN(&qedi->dbg_ctx,
  1317. "Could not allocate cq.\n");
  1318. status = -ENOMEM;
  1319. goto mem_alloc_failure;
  1320. }
  1321. qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
  1322. qedi->global_queues[i]->cq_pbl_size,
  1323. &qedi->global_queues[i]->cq_pbl_dma,
  1324. GFP_KERNEL);
  1325. if (!qedi->global_queues[i]->cq_pbl) {
  1326. QEDI_WARN(&qedi->dbg_ctx,
  1327. "Could not allocate cq PBL.\n");
  1328. status = -ENOMEM;
  1329. goto mem_alloc_failure;
  1330. }
  1331. /* Create PBL */
  1332. num_pages = qedi->global_queues[i]->cq_mem_size /
  1333. QEDI_PAGE_SIZE;
  1334. page = qedi->global_queues[i]->cq_dma;
  1335. pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
  1336. while (num_pages--) {
  1337. *pbl = (u32)page;
  1338. pbl++;
  1339. *pbl = (u32)((u64)page >> 32);
  1340. pbl++;
  1341. page += QEDI_PAGE_SIZE;
  1342. }
  1343. }
  1344. list = (u32 *)qedi->p_cpuq;
  1345. /*
  1346. * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
  1347. * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
  1348. * to the physical address which contains an array of pointers to the
  1349. * physical addresses of the specific queue pages.
  1350. */
  1351. for (i = 0; i < qedi->num_queues; i++) {
  1352. *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
  1353. list++;
  1354. *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
  1355. list++;
  1356. *list = (u32)0;
  1357. list++;
  1358. *list = (u32)((u64)0 >> 32);
  1359. list++;
  1360. }
  1361. return 0;
  1362. mem_alloc_failure:
  1363. qedi_free_global_queues(qedi);
  1364. return status;
  1365. }
  1366. int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
  1367. {
  1368. int rval = 0;
  1369. u32 *pbl;
  1370. dma_addr_t page;
  1371. int num_pages;
  1372. if (!ep)
  1373. return -EIO;
  1374. /* Calculate appropriate queue and PBL sizes */
  1375. ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
  1376. ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
  1377. ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
  1378. ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
  1379. ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
  1380. &ep->sq_dma, GFP_KERNEL);
  1381. if (!ep->sq) {
  1382. QEDI_WARN(&qedi->dbg_ctx,
  1383. "Could not allocate send queue.\n");
  1384. rval = -ENOMEM;
  1385. goto out;
  1386. }
  1387. ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
  1388. &ep->sq_pbl_dma, GFP_KERNEL);
  1389. if (!ep->sq_pbl) {
  1390. QEDI_WARN(&qedi->dbg_ctx,
  1391. "Could not allocate send queue PBL.\n");
  1392. rval = -ENOMEM;
  1393. goto out_free_sq;
  1394. }
  1395. /* Create PBL */
  1396. num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
  1397. page = ep->sq_dma;
  1398. pbl = (u32 *)ep->sq_pbl;
  1399. while (num_pages--) {
  1400. *pbl = (u32)page;
  1401. pbl++;
  1402. *pbl = (u32)((u64)page >> 32);
  1403. pbl++;
  1404. page += QEDI_PAGE_SIZE;
  1405. }
  1406. return rval;
  1407. out_free_sq:
  1408. dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
  1409. ep->sq_dma);
  1410. out:
  1411. return rval;
  1412. }
  1413. void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
  1414. {
  1415. if (ep->sq_pbl)
  1416. dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
  1417. ep->sq_pbl_dma);
  1418. if (ep->sq)
  1419. dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
  1420. ep->sq_dma);
  1421. }
  1422. int qedi_get_task_idx(struct qedi_ctx *qedi)
  1423. {
  1424. s16 tmp_idx;
  1425. again:
  1426. tmp_idx = find_first_zero_bit(qedi->task_idx_map,
  1427. MAX_ISCSI_TASK_ENTRIES);
  1428. if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
  1429. QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
  1430. tmp_idx = -1;
  1431. goto err_idx;
  1432. }
  1433. if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
  1434. goto again;
  1435. err_idx:
  1436. return tmp_idx;
  1437. }
  1438. void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
  1439. {
  1440. if (!test_and_clear_bit(idx, qedi->task_idx_map))
  1441. QEDI_ERR(&qedi->dbg_ctx,
  1442. "FW task context, already cleared, tid=0x%x\n", idx);
  1443. }
  1444. void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
  1445. struct qedi_cmd *cmd)
  1446. {
  1447. qedi->itt_map[tid].itt = proto_itt;
  1448. qedi->itt_map[tid].p_cmd = cmd;
  1449. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1450. "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
  1451. qedi->itt_map[tid].itt);
  1452. }
  1453. void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
  1454. {
  1455. u16 i;
  1456. for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
  1457. if (qedi->itt_map[i].itt == itt) {
  1458. *tid = i;
  1459. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1460. "Ref itt=0x%x, found at tid=0x%x\n",
  1461. itt, *tid);
  1462. return;
  1463. }
  1464. }
  1465. WARN_ON(1);
  1466. }
  1467. void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
  1468. {
  1469. *proto_itt = qedi->itt_map[tid].itt;
  1470. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1471. "Get itt map tid [0x%x with proto itt[0x%x]",
  1472. tid, *proto_itt);
  1473. }
  1474. struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
  1475. {
  1476. struct qedi_cmd *cmd = NULL;
  1477. if (tid >= MAX_ISCSI_TASK_ENTRIES)
  1478. return NULL;
  1479. cmd = qedi->itt_map[tid].p_cmd;
  1480. if (cmd->task_id != tid)
  1481. return NULL;
  1482. qedi->itt_map[tid].p_cmd = NULL;
  1483. return cmd;
  1484. }
  1485. static int qedi_alloc_itt(struct qedi_ctx *qedi)
  1486. {
  1487. qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
  1488. sizeof(struct qedi_itt_map), GFP_KERNEL);
  1489. if (!qedi->itt_map) {
  1490. QEDI_ERR(&qedi->dbg_ctx,
  1491. "Unable to allocate itt map array memory\n");
  1492. return -ENOMEM;
  1493. }
  1494. return 0;
  1495. }
  1496. static void qedi_free_itt(struct qedi_ctx *qedi)
  1497. {
  1498. kfree(qedi->itt_map);
  1499. }
  1500. static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
  1501. .rx_cb = qedi_ll2_rx,
  1502. .tx_cb = NULL,
  1503. };
  1504. static int qedi_percpu_io_thread(void *arg)
  1505. {
  1506. struct qedi_percpu_s *p = arg;
  1507. struct qedi_work *work, *tmp;
  1508. unsigned long flags;
  1509. LIST_HEAD(work_list);
  1510. set_user_nice(current, -20);
  1511. while (!kthread_should_stop()) {
  1512. spin_lock_irqsave(&p->p_work_lock, flags);
  1513. while (!list_empty(&p->work_list)) {
  1514. list_splice_init(&p->work_list, &work_list);
  1515. spin_unlock_irqrestore(&p->p_work_lock, flags);
  1516. list_for_each_entry_safe(work, tmp, &work_list, list) {
  1517. list_del_init(&work->list);
  1518. qedi_fp_process_cqes(work);
  1519. if (!work->is_solicited)
  1520. kfree(work);
  1521. }
  1522. cond_resched();
  1523. spin_lock_irqsave(&p->p_work_lock, flags);
  1524. }
  1525. set_current_state(TASK_INTERRUPTIBLE);
  1526. spin_unlock_irqrestore(&p->p_work_lock, flags);
  1527. schedule();
  1528. }
  1529. __set_current_state(TASK_RUNNING);
  1530. return 0;
  1531. }
  1532. static int qedi_cpu_online(unsigned int cpu)
  1533. {
  1534. struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
  1535. struct task_struct *thread;
  1536. thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
  1537. cpu_to_node(cpu),
  1538. "qedi_thread/%d", cpu);
  1539. if (IS_ERR(thread))
  1540. return PTR_ERR(thread);
  1541. kthread_bind(thread, cpu);
  1542. p->iothread = thread;
  1543. wake_up_process(thread);
  1544. return 0;
  1545. }
  1546. static int qedi_cpu_offline(unsigned int cpu)
  1547. {
  1548. struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
  1549. struct qedi_work *work, *tmp;
  1550. struct task_struct *thread;
  1551. spin_lock_bh(&p->p_work_lock);
  1552. thread = p->iothread;
  1553. p->iothread = NULL;
  1554. list_for_each_entry_safe(work, tmp, &p->work_list, list) {
  1555. list_del_init(&work->list);
  1556. qedi_fp_process_cqes(work);
  1557. if (!work->is_solicited)
  1558. kfree(work);
  1559. }
  1560. spin_unlock_bh(&p->p_work_lock);
  1561. if (thread)
  1562. kthread_stop(thread);
  1563. return 0;
  1564. }
  1565. void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
  1566. {
  1567. struct qed_ll2_params params;
  1568. qedi_recover_all_conns(qedi);
  1569. qedi_ops->ll2->stop(qedi->cdev);
  1570. qedi_ll2_free_skbs(qedi);
  1571. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
  1572. qedi->ll2_mtu, mtu);
  1573. memset(&params, 0, sizeof(params));
  1574. qedi->ll2_mtu = mtu;
  1575. params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
  1576. params.drop_ttl0_packets = 0;
  1577. params.rx_vlan_stripping = 1;
  1578. ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
  1579. qedi_ops->ll2->start(qedi->cdev, &params);
  1580. }
  1581. /**
  1582. * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
  1583. * for gaps) for the matching absolute-pf-id of the QEDI device.
  1584. */
  1585. static struct nvm_iscsi_block *
  1586. qedi_get_nvram_block(struct qedi_ctx *qedi)
  1587. {
  1588. int i;
  1589. u8 pf;
  1590. u32 flags;
  1591. struct nvm_iscsi_block *block;
  1592. pf = qedi->dev_info.common.abs_pf_id;
  1593. block = &qedi->iscsi_image->iscsi_cfg.block[0];
  1594. for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
  1595. flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
  1596. NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
  1597. if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
  1598. NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
  1599. (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
  1600. >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
  1601. return block;
  1602. }
  1603. return NULL;
  1604. }
  1605. static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
  1606. {
  1607. struct qedi_ctx *qedi = data;
  1608. struct nvm_iscsi_initiator *initiator;
  1609. int rc = 1;
  1610. u32 ipv6_en, dhcp_en, ip_len;
  1611. struct nvm_iscsi_block *block;
  1612. char *fmt, *ip, *sub, *gw;
  1613. block = qedi_get_nvram_block(qedi);
  1614. if (!block)
  1615. return 0;
  1616. initiator = &block->initiator;
  1617. ipv6_en = block->generic.ctrl_flags &
  1618. NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
  1619. dhcp_en = block->generic.ctrl_flags &
  1620. NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
  1621. /* Static IP assignments. */
  1622. fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
  1623. ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
  1624. ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
  1625. sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
  1626. initiator->ipv4.subnet_mask.byte;
  1627. gw = ipv6_en ? initiator->ipv6.gateway.byte :
  1628. initiator->ipv4.gateway.byte;
  1629. /* DHCP IP adjustments. */
  1630. fmt = dhcp_en ? "%s\n" : fmt;
  1631. if (dhcp_en) {
  1632. ip = ipv6_en ? "0::0" : "0.0.0.0";
  1633. sub = ip;
  1634. gw = ip;
  1635. ip_len = ipv6_en ? 5 : 8;
  1636. }
  1637. switch (type) {
  1638. case ISCSI_BOOT_ETH_IP_ADDR:
  1639. rc = snprintf(buf, ip_len, fmt, ip);
  1640. break;
  1641. case ISCSI_BOOT_ETH_SUBNET_MASK:
  1642. rc = snprintf(buf, ip_len, fmt, sub);
  1643. break;
  1644. case ISCSI_BOOT_ETH_GATEWAY:
  1645. rc = snprintf(buf, ip_len, fmt, gw);
  1646. break;
  1647. case ISCSI_BOOT_ETH_FLAGS:
  1648. rc = snprintf(buf, 3, "%hhd\n",
  1649. SYSFS_FLAG_FW_SEL_BOOT);
  1650. break;
  1651. case ISCSI_BOOT_ETH_INDEX:
  1652. rc = snprintf(buf, 3, "0\n");
  1653. break;
  1654. case ISCSI_BOOT_ETH_MAC:
  1655. rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
  1656. break;
  1657. case ISCSI_BOOT_ETH_VLAN:
  1658. rc = snprintf(buf, 12, "%d\n",
  1659. GET_FIELD2(initiator->generic_cont0,
  1660. NVM_ISCSI_CFG_INITIATOR_VLAN));
  1661. break;
  1662. case ISCSI_BOOT_ETH_ORIGIN:
  1663. if (dhcp_en)
  1664. rc = snprintf(buf, 3, "3\n");
  1665. break;
  1666. default:
  1667. rc = 0;
  1668. break;
  1669. }
  1670. return rc;
  1671. }
  1672. static umode_t qedi_eth_get_attr_visibility(void *data, int type)
  1673. {
  1674. int rc = 1;
  1675. switch (type) {
  1676. case ISCSI_BOOT_ETH_FLAGS:
  1677. case ISCSI_BOOT_ETH_MAC:
  1678. case ISCSI_BOOT_ETH_INDEX:
  1679. case ISCSI_BOOT_ETH_IP_ADDR:
  1680. case ISCSI_BOOT_ETH_SUBNET_MASK:
  1681. case ISCSI_BOOT_ETH_GATEWAY:
  1682. case ISCSI_BOOT_ETH_ORIGIN:
  1683. case ISCSI_BOOT_ETH_VLAN:
  1684. rc = 0444;
  1685. break;
  1686. default:
  1687. rc = 0;
  1688. break;
  1689. }
  1690. return rc;
  1691. }
  1692. static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
  1693. {
  1694. struct qedi_ctx *qedi = data;
  1695. struct nvm_iscsi_initiator *initiator;
  1696. int rc;
  1697. struct nvm_iscsi_block *block;
  1698. block = qedi_get_nvram_block(qedi);
  1699. if (!block)
  1700. return 0;
  1701. initiator = &block->initiator;
  1702. switch (type) {
  1703. case ISCSI_BOOT_INI_INITIATOR_NAME:
  1704. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
  1705. initiator->initiator_name.byte);
  1706. break;
  1707. default:
  1708. rc = 0;
  1709. break;
  1710. }
  1711. return rc;
  1712. }
  1713. static umode_t qedi_ini_get_attr_visibility(void *data, int type)
  1714. {
  1715. int rc;
  1716. switch (type) {
  1717. case ISCSI_BOOT_INI_INITIATOR_NAME:
  1718. rc = 0444;
  1719. break;
  1720. default:
  1721. rc = 0;
  1722. break;
  1723. }
  1724. return rc;
  1725. }
  1726. static ssize_t
  1727. qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
  1728. char *buf, enum qedi_nvm_tgts idx)
  1729. {
  1730. int rc = 1;
  1731. u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
  1732. struct nvm_iscsi_block *block;
  1733. char *chap_name, *chap_secret;
  1734. char *mchap_name, *mchap_secret;
  1735. block = qedi_get_nvram_block(qedi);
  1736. if (!block)
  1737. goto exit_show_tgt_info;
  1738. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
  1739. "Port:%d, tgt_idx:%d\n",
  1740. GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
  1741. ctrl_flags = block->target[idx].ctrl_flags &
  1742. NVM_ISCSI_CFG_TARGET_ENABLED;
  1743. if (!ctrl_flags) {
  1744. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
  1745. "Target disabled\n");
  1746. goto exit_show_tgt_info;
  1747. }
  1748. ipv6_en = block->generic.ctrl_flags &
  1749. NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
  1750. ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
  1751. chap_en = block->generic.ctrl_flags &
  1752. NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
  1753. chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
  1754. chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
  1755. mchap_en = block->generic.ctrl_flags &
  1756. NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
  1757. mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
  1758. mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
  1759. switch (type) {
  1760. case ISCSI_BOOT_TGT_NAME:
  1761. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
  1762. block->target[idx].target_name.byte);
  1763. break;
  1764. case ISCSI_BOOT_TGT_IP_ADDR:
  1765. if (ipv6_en)
  1766. rc = snprintf(buf, ip_len, "%pI6\n",
  1767. block->target[idx].ipv6_addr.byte);
  1768. else
  1769. rc = snprintf(buf, ip_len, "%pI4\n",
  1770. block->target[idx].ipv4_addr.byte);
  1771. break;
  1772. case ISCSI_BOOT_TGT_PORT:
  1773. rc = snprintf(buf, 12, "%d\n",
  1774. GET_FIELD2(block->target[idx].generic_cont0,
  1775. NVM_ISCSI_CFG_TARGET_TCP_PORT));
  1776. break;
  1777. case ISCSI_BOOT_TGT_LUN:
  1778. rc = snprintf(buf, 22, "%.*d\n",
  1779. block->target[idx].lun.value[1],
  1780. block->target[idx].lun.value[0]);
  1781. break;
  1782. case ISCSI_BOOT_TGT_CHAP_NAME:
  1783. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
  1784. chap_name);
  1785. break;
  1786. case ISCSI_BOOT_TGT_CHAP_SECRET:
  1787. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
  1788. chap_secret);
  1789. break;
  1790. case ISCSI_BOOT_TGT_REV_CHAP_NAME:
  1791. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
  1792. mchap_name);
  1793. break;
  1794. case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
  1795. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
  1796. mchap_secret);
  1797. break;
  1798. case ISCSI_BOOT_TGT_FLAGS:
  1799. rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
  1800. break;
  1801. case ISCSI_BOOT_TGT_NIC_ASSOC:
  1802. rc = snprintf(buf, 3, "0\n");
  1803. break;
  1804. default:
  1805. rc = 0;
  1806. break;
  1807. }
  1808. exit_show_tgt_info:
  1809. return rc;
  1810. }
  1811. static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
  1812. {
  1813. struct qedi_ctx *qedi = data;
  1814. return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
  1815. }
  1816. static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
  1817. {
  1818. struct qedi_ctx *qedi = data;
  1819. return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
  1820. }
  1821. static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
  1822. {
  1823. int rc;
  1824. switch (type) {
  1825. case ISCSI_BOOT_TGT_NAME:
  1826. case ISCSI_BOOT_TGT_IP_ADDR:
  1827. case ISCSI_BOOT_TGT_PORT:
  1828. case ISCSI_BOOT_TGT_LUN:
  1829. case ISCSI_BOOT_TGT_CHAP_NAME:
  1830. case ISCSI_BOOT_TGT_CHAP_SECRET:
  1831. case ISCSI_BOOT_TGT_REV_CHAP_NAME:
  1832. case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
  1833. case ISCSI_BOOT_TGT_NIC_ASSOC:
  1834. case ISCSI_BOOT_TGT_FLAGS:
  1835. rc = 0444;
  1836. break;
  1837. default:
  1838. rc = 0;
  1839. break;
  1840. }
  1841. return rc;
  1842. }
  1843. static void qedi_boot_release(void *data)
  1844. {
  1845. struct qedi_ctx *qedi = data;
  1846. scsi_host_put(qedi->shost);
  1847. }
  1848. static int qedi_get_boot_info(struct qedi_ctx *qedi)
  1849. {
  1850. int ret = 1;
  1851. struct qedi_nvm_iscsi_image nvm_image;
  1852. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1853. "Get NVM iSCSI CFG image\n");
  1854. ret = qedi_ops->common->nvm_get_image(qedi->cdev,
  1855. QED_NVM_IMAGE_ISCSI_CFG,
  1856. (char *)qedi->iscsi_image,
  1857. sizeof(nvm_image));
  1858. if (ret)
  1859. QEDI_ERR(&qedi->dbg_ctx,
  1860. "Could not get NVM image. ret = %d\n", ret);
  1861. return ret;
  1862. }
  1863. static int qedi_setup_boot_info(struct qedi_ctx *qedi)
  1864. {
  1865. struct iscsi_boot_kobj *boot_kobj;
  1866. if (qedi_get_boot_info(qedi))
  1867. return -EPERM;
  1868. qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
  1869. if (!qedi->boot_kset)
  1870. goto kset_free;
  1871. if (!scsi_host_get(qedi->shost))
  1872. goto kset_free;
  1873. boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
  1874. qedi_show_boot_tgt_pri_info,
  1875. qedi_tgt_get_attr_visibility,
  1876. qedi_boot_release);
  1877. if (!boot_kobj)
  1878. goto put_host;
  1879. if (!scsi_host_get(qedi->shost))
  1880. goto kset_free;
  1881. boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
  1882. qedi_show_boot_tgt_sec_info,
  1883. qedi_tgt_get_attr_visibility,
  1884. qedi_boot_release);
  1885. if (!boot_kobj)
  1886. goto put_host;
  1887. if (!scsi_host_get(qedi->shost))
  1888. goto kset_free;
  1889. boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
  1890. qedi_show_boot_ini_info,
  1891. qedi_ini_get_attr_visibility,
  1892. qedi_boot_release);
  1893. if (!boot_kobj)
  1894. goto put_host;
  1895. if (!scsi_host_get(qedi->shost))
  1896. goto kset_free;
  1897. boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
  1898. qedi_show_boot_eth_info,
  1899. qedi_eth_get_attr_visibility,
  1900. qedi_boot_release);
  1901. if (!boot_kobj)
  1902. goto put_host;
  1903. return 0;
  1904. put_host:
  1905. scsi_host_put(qedi->shost);
  1906. kset_free:
  1907. iscsi_boot_destroy_kset(qedi->boot_kset);
  1908. return -ENOMEM;
  1909. }
  1910. static void __qedi_remove(struct pci_dev *pdev, int mode)
  1911. {
  1912. struct qedi_ctx *qedi = pci_get_drvdata(pdev);
  1913. int rval;
  1914. if (qedi->tmf_thread) {
  1915. flush_workqueue(qedi->tmf_thread);
  1916. destroy_workqueue(qedi->tmf_thread);
  1917. qedi->tmf_thread = NULL;
  1918. }
  1919. if (qedi->offload_thread) {
  1920. flush_workqueue(qedi->offload_thread);
  1921. destroy_workqueue(qedi->offload_thread);
  1922. qedi->offload_thread = NULL;
  1923. }
  1924. #ifdef CONFIG_DEBUG_FS
  1925. qedi_dbg_host_exit(&qedi->dbg_ctx);
  1926. #endif
  1927. if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
  1928. qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
  1929. qedi_sync_free_irqs(qedi);
  1930. if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
  1931. qedi_ops->stop(qedi->cdev);
  1932. qedi_ops->ll2->stop(qedi->cdev);
  1933. }
  1934. if (mode == QEDI_MODE_NORMAL)
  1935. qedi_free_iscsi_pf_param(qedi);
  1936. rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
  1937. if (rval)
  1938. QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
  1939. if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
  1940. qedi_ops->common->slowpath_stop(qedi->cdev);
  1941. qedi_ops->common->remove(qedi->cdev);
  1942. }
  1943. qedi_destroy_fp(qedi);
  1944. if (mode == QEDI_MODE_NORMAL) {
  1945. qedi_release_cid_que(qedi);
  1946. qedi_cm_free_mem(qedi);
  1947. qedi_free_uio(qedi->udev);
  1948. qedi_free_itt(qedi);
  1949. iscsi_host_remove(qedi->shost);
  1950. iscsi_host_free(qedi->shost);
  1951. if (qedi->ll2_recv_thread) {
  1952. kthread_stop(qedi->ll2_recv_thread);
  1953. qedi->ll2_recv_thread = NULL;
  1954. }
  1955. qedi_ll2_free_skbs(qedi);
  1956. if (qedi->boot_kset)
  1957. iscsi_boot_destroy_kset(qedi->boot_kset);
  1958. }
  1959. }
  1960. static int __qedi_probe(struct pci_dev *pdev, int mode)
  1961. {
  1962. struct qedi_ctx *qedi;
  1963. struct qed_ll2_params params;
  1964. u32 dp_module = 0;
  1965. u8 dp_level = 0;
  1966. bool is_vf = false;
  1967. char host_buf[16];
  1968. struct qed_link_params link_params;
  1969. struct qed_slowpath_params sp_params;
  1970. struct qed_probe_params qed_params;
  1971. void *task_start, *task_end;
  1972. int rc;
  1973. u16 tmp;
  1974. if (mode != QEDI_MODE_RECOVERY) {
  1975. qedi = qedi_host_alloc(pdev);
  1976. if (!qedi) {
  1977. rc = -ENOMEM;
  1978. goto exit_probe;
  1979. }
  1980. } else {
  1981. qedi = pci_get_drvdata(pdev);
  1982. }
  1983. memset(&qed_params, 0, sizeof(qed_params));
  1984. qed_params.protocol = QED_PROTOCOL_ISCSI;
  1985. qed_params.dp_module = dp_module;
  1986. qed_params.dp_level = dp_level;
  1987. qed_params.is_vf = is_vf;
  1988. qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
  1989. if (!qedi->cdev) {
  1990. rc = -ENODEV;
  1991. QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
  1992. goto free_host;
  1993. }
  1994. atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
  1995. rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
  1996. if (rc)
  1997. goto free_host;
  1998. if (mode != QEDI_MODE_RECOVERY) {
  1999. rc = qedi_set_iscsi_pf_param(qedi);
  2000. if (rc) {
  2001. rc = -ENOMEM;
  2002. QEDI_ERR(&qedi->dbg_ctx,
  2003. "Set iSCSI pf param fail\n");
  2004. goto free_host;
  2005. }
  2006. }
  2007. qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
  2008. rc = qedi_prepare_fp(qedi);
  2009. if (rc) {
  2010. QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
  2011. goto free_pf_params;
  2012. }
  2013. /* Start the Slowpath-process */
  2014. memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
  2015. sp_params.int_mode = QED_INT_MODE_MSIX;
  2016. sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
  2017. sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
  2018. sp_params.drv_rev = QEDI_DRIVER_REV_VER;
  2019. sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
  2020. strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
  2021. rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
  2022. if (rc) {
  2023. QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
  2024. goto stop_hw;
  2025. }
  2026. /* update_pf_params needs to be called before and after slowpath
  2027. * start
  2028. */
  2029. qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
  2030. rc = qedi_setup_int(qedi);
  2031. if (rc)
  2032. goto stop_iscsi_func;
  2033. qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
  2034. /* Learn information crucial for qedi to progress */
  2035. rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
  2036. if (rc)
  2037. goto stop_iscsi_func;
  2038. /* Record BDQ producer doorbell addresses */
  2039. qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
  2040. qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
  2041. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  2042. "BDQ primary_prod=%p secondary_prod=%p.\n",
  2043. qedi->bdq_primary_prod,
  2044. qedi->bdq_secondary_prod);
  2045. /*
  2046. * We need to write the number of BDs in the BDQ we've preallocated so
  2047. * the f/w will do a prefetch and we'll get an unsolicited CQE when a
  2048. * packet arrives.
  2049. */
  2050. qedi->bdq_prod_idx = QEDI_BDQ_NUM;
  2051. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  2052. "Writing %d to primary and secondary BDQ doorbell registers.\n",
  2053. qedi->bdq_prod_idx);
  2054. writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
  2055. tmp = readw(qedi->bdq_primary_prod);
  2056. writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
  2057. tmp = readw(qedi->bdq_secondary_prod);
  2058. ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
  2059. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
  2060. qedi->mac);
  2061. sprintf(host_buf, "host_%d", qedi->shost->host_no);
  2062. qedi_ops->common->set_name(qedi->cdev, host_buf);
  2063. qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
  2064. memset(&params, 0, sizeof(params));
  2065. params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
  2066. qedi->ll2_mtu = DEF_PATH_MTU;
  2067. params.drop_ttl0_packets = 0;
  2068. params.rx_vlan_stripping = 1;
  2069. ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
  2070. if (mode != QEDI_MODE_RECOVERY) {
  2071. /* set up rx path */
  2072. INIT_LIST_HEAD(&qedi->ll2_skb_list);
  2073. spin_lock_init(&qedi->ll2_lock);
  2074. /* start qedi context */
  2075. spin_lock_init(&qedi->hba_lock);
  2076. spin_lock_init(&qedi->task_idx_lock);
  2077. mutex_init(&qedi->stats_lock);
  2078. }
  2079. qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
  2080. qedi_ops->ll2->start(qedi->cdev, &params);
  2081. if (mode != QEDI_MODE_RECOVERY) {
  2082. qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
  2083. (void *)qedi,
  2084. "qedi_ll2_thread");
  2085. }
  2086. rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
  2087. qedi, qedi_iscsi_event_cb);
  2088. if (rc) {
  2089. rc = -ENODEV;
  2090. QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
  2091. goto stop_slowpath;
  2092. }
  2093. task_start = qedi_get_task_mem(&qedi->tasks, 0);
  2094. task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
  2095. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  2096. "Task context start=%p, end=%p block_size=%u.\n",
  2097. task_start, task_end, qedi->tasks.size);
  2098. memset(&link_params, 0, sizeof(link_params));
  2099. link_params.link_up = true;
  2100. rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
  2101. if (rc) {
  2102. QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
  2103. atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
  2104. }
  2105. #ifdef CONFIG_DEBUG_FS
  2106. qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops,
  2107. qedi_dbg_fops);
  2108. #endif
  2109. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  2110. "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
  2111. QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
  2112. FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
  2113. if (mode == QEDI_MODE_NORMAL) {
  2114. if (iscsi_host_add(qedi->shost, &pdev->dev)) {
  2115. QEDI_ERR(&qedi->dbg_ctx,
  2116. "Could not add iscsi host\n");
  2117. rc = -ENOMEM;
  2118. goto remove_host;
  2119. }
  2120. /* Allocate uio buffers */
  2121. rc = qedi_alloc_uio_rings(qedi);
  2122. if (rc) {
  2123. QEDI_ERR(&qedi->dbg_ctx,
  2124. "UIO alloc ring failed err=%d\n", rc);
  2125. goto remove_host;
  2126. }
  2127. rc = qedi_init_uio(qedi);
  2128. if (rc) {
  2129. QEDI_ERR(&qedi->dbg_ctx,
  2130. "UIO init failed, err=%d\n", rc);
  2131. goto free_uio;
  2132. }
  2133. /* host the array on iscsi_conn */
  2134. rc = qedi_setup_cid_que(qedi);
  2135. if (rc) {
  2136. QEDI_ERR(&qedi->dbg_ctx,
  2137. "Could not setup cid que\n");
  2138. goto free_uio;
  2139. }
  2140. rc = qedi_cm_alloc_mem(qedi);
  2141. if (rc) {
  2142. QEDI_ERR(&qedi->dbg_ctx,
  2143. "Could not alloc cm memory\n");
  2144. goto free_cid_que;
  2145. }
  2146. rc = qedi_alloc_itt(qedi);
  2147. if (rc) {
  2148. QEDI_ERR(&qedi->dbg_ctx,
  2149. "Could not alloc itt memory\n");
  2150. goto free_cid_que;
  2151. }
  2152. sprintf(host_buf, "host_%d", qedi->shost->host_no);
  2153. qedi->tmf_thread = create_singlethread_workqueue(host_buf);
  2154. if (!qedi->tmf_thread) {
  2155. QEDI_ERR(&qedi->dbg_ctx,
  2156. "Unable to start tmf thread!\n");
  2157. rc = -ENODEV;
  2158. goto free_cid_que;
  2159. }
  2160. sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
  2161. qedi->offload_thread = create_workqueue(host_buf);
  2162. if (!qedi->offload_thread) {
  2163. QEDI_ERR(&qedi->dbg_ctx,
  2164. "Unable to start offload thread!\n");
  2165. rc = -ENODEV;
  2166. goto free_tmf_thread;
  2167. }
  2168. /* F/w needs 1st task context memory entry for performance */
  2169. set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
  2170. atomic_set(&qedi->num_offloads, 0);
  2171. if (qedi_setup_boot_info(qedi))
  2172. QEDI_ERR(&qedi->dbg_ctx,
  2173. "No iSCSI boot target configured\n");
  2174. rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
  2175. if (rc)
  2176. QEDI_ERR(&qedi->dbg_ctx,
  2177. "Failed to send drv state to MFW\n");
  2178. }
  2179. return 0;
  2180. free_tmf_thread:
  2181. destroy_workqueue(qedi->tmf_thread);
  2182. free_cid_que:
  2183. qedi_release_cid_que(qedi);
  2184. free_uio:
  2185. qedi_free_uio(qedi->udev);
  2186. remove_host:
  2187. #ifdef CONFIG_DEBUG_FS
  2188. qedi_dbg_host_exit(&qedi->dbg_ctx);
  2189. #endif
  2190. iscsi_host_remove(qedi->shost);
  2191. stop_iscsi_func:
  2192. qedi_ops->stop(qedi->cdev);
  2193. stop_slowpath:
  2194. qedi_ops->common->slowpath_stop(qedi->cdev);
  2195. stop_hw:
  2196. qedi_ops->common->remove(qedi->cdev);
  2197. free_pf_params:
  2198. qedi_free_iscsi_pf_param(qedi);
  2199. free_host:
  2200. iscsi_host_free(qedi->shost);
  2201. exit_probe:
  2202. return rc;
  2203. }
  2204. static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2205. {
  2206. return __qedi_probe(pdev, QEDI_MODE_NORMAL);
  2207. }
  2208. static void qedi_remove(struct pci_dev *pdev)
  2209. {
  2210. __qedi_remove(pdev, QEDI_MODE_NORMAL);
  2211. }
  2212. static struct pci_device_id qedi_pci_tbl[] = {
  2213. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
  2214. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
  2215. { 0 },
  2216. };
  2217. MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
  2218. static enum cpuhp_state qedi_cpuhp_state;
  2219. static struct pci_driver qedi_pci_driver = {
  2220. .name = QEDI_MODULE_NAME,
  2221. .id_table = qedi_pci_tbl,
  2222. .probe = qedi_probe,
  2223. .remove = qedi_remove,
  2224. };
  2225. static int __init qedi_init(void)
  2226. {
  2227. struct qedi_percpu_s *p;
  2228. int cpu, rc = 0;
  2229. qedi_ops = qed_get_iscsi_ops();
  2230. if (!qedi_ops) {
  2231. QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
  2232. return -EINVAL;
  2233. }
  2234. #ifdef CONFIG_DEBUG_FS
  2235. qedi_dbg_init("qedi");
  2236. #endif
  2237. qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
  2238. if (!qedi_scsi_transport) {
  2239. QEDI_ERR(NULL, "Could not register qedi transport");
  2240. rc = -ENOMEM;
  2241. goto exit_qedi_init_1;
  2242. }
  2243. for_each_possible_cpu(cpu) {
  2244. p = &per_cpu(qedi_percpu, cpu);
  2245. INIT_LIST_HEAD(&p->work_list);
  2246. spin_lock_init(&p->p_work_lock);
  2247. p->iothread = NULL;
  2248. }
  2249. rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
  2250. qedi_cpu_online, qedi_cpu_offline);
  2251. if (rc < 0)
  2252. goto exit_qedi_init_2;
  2253. qedi_cpuhp_state = rc;
  2254. rc = pci_register_driver(&qedi_pci_driver);
  2255. if (rc) {
  2256. QEDI_ERR(NULL, "Failed to register driver\n");
  2257. goto exit_qedi_hp;
  2258. }
  2259. return 0;
  2260. exit_qedi_hp:
  2261. cpuhp_remove_state(qedi_cpuhp_state);
  2262. exit_qedi_init_2:
  2263. iscsi_unregister_transport(&qedi_iscsi_transport);
  2264. exit_qedi_init_1:
  2265. #ifdef CONFIG_DEBUG_FS
  2266. qedi_dbg_exit();
  2267. #endif
  2268. qed_put_iscsi_ops();
  2269. return rc;
  2270. }
  2271. static void __exit qedi_cleanup(void)
  2272. {
  2273. pci_unregister_driver(&qedi_pci_driver);
  2274. cpuhp_remove_state(qedi_cpuhp_state);
  2275. iscsi_unregister_transport(&qedi_iscsi_transport);
  2276. #ifdef CONFIG_DEBUG_FS
  2277. qedi_dbg_exit();
  2278. #endif
  2279. qed_put_iscsi_ops();
  2280. }
  2281. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
  2282. MODULE_LICENSE("GPL");
  2283. MODULE_AUTHOR("QLogic Corporation");
  2284. MODULE_VERSION(QEDI_MODULE_VERSION);
  2285. module_init(qedi_init);
  2286. module_exit(qedi_cleanup);