fnic_scsi.c 84 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #include <linux/mempool.h>
  7. #include <linux/errno.h>
  8. #include <linux/init.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/pci.h>
  11. #include <linux/scatterlist.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/if_vlan.h>
  17. #include <linux/delay.h>
  18. #include <linux/gfp.h>
  19. #include <scsi/scsi.h>
  20. #include <scsi/scsi_host.h>
  21. #include <scsi/scsi_device.h>
  22. #include <scsi/scsi_cmnd.h>
  23. #include <scsi/scsi_tcq.h>
  24. #include <scsi/fc/fc_els.h>
  25. #include <scsi/fc/fc_fcoe.h>
  26. #include <scsi/libfc.h>
  27. #include <scsi/fc_frame.h>
  28. #include "fnic_io.h"
  29. #include "fnic.h"
  30. const char *fnic_state_str[] = {
  31. [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
  32. [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  33. [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
  34. [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  35. };
  36. static const char *fnic_ioreq_state_str[] = {
  37. [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
  38. [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  39. [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  40. [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  41. [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  42. };
  43. static const char *fcpio_status_str[] = {
  44. [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  45. [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  46. [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  47. [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  48. [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  49. [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  50. [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  51. [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  52. [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  53. [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  54. [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  55. [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  56. [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  57. [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  58. [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  59. [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  60. [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  61. [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  62. [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  63. };
  64. const char *fnic_state_to_str(unsigned int state)
  65. {
  66. if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  67. return "unknown";
  68. return fnic_state_str[state];
  69. }
  70. static const char *fnic_ioreq_state_to_str(unsigned int state)
  71. {
  72. if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  73. !fnic_ioreq_state_str[state])
  74. return "unknown";
  75. return fnic_ioreq_state_str[state];
  76. }
  77. static const char *fnic_fcpio_status_to_str(unsigned int status)
  78. {
  79. if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
  80. return "unknown";
  81. return fcpio_status_str[status];
  82. }
  83. static void fnic_cleanup_io(struct fnic *fnic);
  84. /*
  85. * Unmap the data buffer and sense buffer for an io_req,
  86. * also unmap and free the device-private scatter/gather list.
  87. */
  88. static void fnic_release_ioreq_buf(struct fnic *fnic,
  89. struct fnic_io_req *io_req,
  90. struct scsi_cmnd *sc)
  91. {
  92. if (io_req->sgl_list_pa)
  93. dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
  94. sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
  95. DMA_TO_DEVICE);
  96. scsi_dma_unmap(sc);
  97. if (io_req->sgl_cnt)
  98. mempool_free(io_req->sgl_list_alloc,
  99. fnic->io_sgl_pool[io_req->sgl_type]);
  100. if (io_req->sense_buf_pa)
  101. dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
  102. SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
  103. }
  104. /* Free up Copy Wq descriptors. Called with copy_wq lock held */
  105. static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
  106. {
  107. /* if no Ack received from firmware, then nothing to clean */
  108. if (!fnic->fw_ack_recd[hwq])
  109. return 1;
  110. /*
  111. * Update desc_available count based on number of freed descriptors
  112. * Account for wraparound
  113. */
  114. if (wq->to_clean_index <= fnic->fw_ack_index[hwq])
  115. wq->ring.desc_avail += (fnic->fw_ack_index[hwq]
  116. - wq->to_clean_index + 1);
  117. else
  118. wq->ring.desc_avail += (wq->ring.desc_count
  119. - wq->to_clean_index
  120. + fnic->fw_ack_index[hwq] + 1);
  121. /*
  122. * just bump clean index to ack_index+1 accounting for wraparound
  123. * this will essentially free up all descriptors between
  124. * to_clean_index and fw_ack_index, both inclusive
  125. */
  126. wq->to_clean_index =
  127. (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count;
  128. /* we have processed the acks received so far */
  129. fnic->fw_ack_recd[hwq] = 0;
  130. return 0;
  131. }
  132. /*
  133. * __fnic_set_state_flags
  134. * Sets/Clears bits in fnic's state_flags
  135. **/
  136. void
  137. __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
  138. unsigned long clearbits)
  139. {
  140. unsigned long flags = 0;
  141. spin_lock_irqsave(&fnic->fnic_lock, flags);
  142. if (clearbits)
  143. fnic->state_flags &= ~st_flags;
  144. else
  145. fnic->state_flags |= st_flags;
  146. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  147. return;
  148. }
  149. /*
  150. * fnic_fw_reset_handler
  151. * Routine to send reset msg to fw
  152. */
  153. int fnic_fw_reset_handler(struct fnic *fnic)
  154. {
  155. struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
  156. int ret = 0;
  157. unsigned long flags;
  158. /* indicate fwreset to io path */
  159. fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
  160. skb_queue_purge(&fnic->frame_queue);
  161. skb_queue_purge(&fnic->tx_queue);
  162. /* wait for io cmpl */
  163. while (atomic_read(&fnic->in_flight))
  164. schedule_timeout(msecs_to_jiffies(1));
  165. spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
  166. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  167. free_wq_copy_descs(fnic, wq, 0);
  168. if (!vnic_wq_copy_desc_avail(wq))
  169. ret = -EAGAIN;
  170. else {
  171. fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
  172. atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
  173. if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
  174. atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
  175. atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
  176. atomic64_read(
  177. &fnic->fnic_stats.fw_stats.active_fw_reqs));
  178. }
  179. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  180. if (!ret) {
  181. atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
  182. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  183. "Issued fw reset\n");
  184. } else {
  185. fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
  186. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  187. "Failed to issue fw reset\n");
  188. }
  189. return ret;
  190. }
  191. /*
  192. * fnic_flogi_reg_handler
  193. * Routine to send flogi register msg to fw
  194. */
  195. int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
  196. {
  197. struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
  198. enum fcpio_flogi_reg_format_type format;
  199. struct fc_lport *lp = fnic->lport;
  200. u8 gw_mac[ETH_ALEN];
  201. int ret = 0;
  202. unsigned long flags;
  203. spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
  204. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  205. free_wq_copy_descs(fnic, wq, 0);
  206. if (!vnic_wq_copy_desc_avail(wq)) {
  207. ret = -EAGAIN;
  208. goto flogi_reg_ioreq_end;
  209. }
  210. if (fnic->ctlr.map_dest) {
  211. eth_broadcast_addr(gw_mac);
  212. format = FCPIO_FLOGI_REG_DEF_DEST;
  213. } else {
  214. memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
  215. format = FCPIO_FLOGI_REG_GW_DEST;
  216. }
  217. if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
  218. fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
  219. fc_id, gw_mac,
  220. fnic->data_src_addr,
  221. lp->r_a_tov, lp->e_d_tov);
  222. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  223. "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
  224. fc_id, fnic->data_src_addr, gw_mac);
  225. } else {
  226. fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
  227. format, fc_id, gw_mac);
  228. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  229. "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n",
  230. fc_id, fnic->ctlr.map_dest, gw_mac);
  231. }
  232. atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
  233. if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
  234. atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
  235. atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
  236. atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
  237. flogi_reg_ioreq_end:
  238. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  239. return ret;
  240. }
  241. /*
  242. * fnic_queue_wq_copy_desc
  243. * Routine to enqueue a wq copy desc
  244. */
  245. static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
  246. struct vnic_wq_copy *wq,
  247. struct fnic_io_req *io_req,
  248. struct scsi_cmnd *sc,
  249. int sg_count,
  250. uint32_t mqtag,
  251. uint16_t hwq)
  252. {
  253. struct scatterlist *sg;
  254. struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
  255. struct fc_rport_libfc_priv *rp = rport->dd_data;
  256. struct host_sg_desc *desc;
  257. struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
  258. unsigned int i;
  259. int flags;
  260. u8 exch_flags;
  261. struct scsi_lun fc_lun;
  262. if (sg_count) {
  263. /* For each SGE, create a device desc entry */
  264. desc = io_req->sgl_list;
  265. for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
  266. desc->addr = cpu_to_le64(sg_dma_address(sg));
  267. desc->len = cpu_to_le32(sg_dma_len(sg));
  268. desc->_resvd = 0;
  269. desc++;
  270. }
  271. io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
  272. io_req->sgl_list,
  273. sizeof(io_req->sgl_list[0]) * sg_count,
  274. DMA_TO_DEVICE);
  275. if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
  276. printk(KERN_ERR "DMA mapping failed\n");
  277. return SCSI_MLQUEUE_HOST_BUSY;
  278. }
  279. }
  280. io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
  281. sc->sense_buffer,
  282. SCSI_SENSE_BUFFERSIZE,
  283. DMA_FROM_DEVICE);
  284. if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
  285. dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
  286. sizeof(io_req->sgl_list[0]) * sg_count,
  287. DMA_TO_DEVICE);
  288. printk(KERN_ERR "DMA mapping failed\n");
  289. return SCSI_MLQUEUE_HOST_BUSY;
  290. }
  291. int_to_scsilun(sc->device->lun, &fc_lun);
  292. /* Enqueue the descriptor in the Copy WQ */
  293. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
  294. free_wq_copy_descs(fnic, wq, hwq);
  295. if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
  296. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  297. "fnic_queue_wq_copy_desc failure - no descriptors\n");
  298. atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
  299. return SCSI_MLQUEUE_HOST_BUSY;
  300. }
  301. flags = 0;
  302. if (sc->sc_data_direction == DMA_FROM_DEVICE)
  303. flags = FCPIO_ICMND_RDDATA;
  304. else if (sc->sc_data_direction == DMA_TO_DEVICE)
  305. flags = FCPIO_ICMND_WRDATA;
  306. exch_flags = 0;
  307. if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
  308. (rp->flags & FC_RP_FLAGS_RETRY))
  309. exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
  310. fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
  311. 0, exch_flags, io_req->sgl_cnt,
  312. SCSI_SENSE_BUFFERSIZE,
  313. io_req->sgl_list_pa,
  314. io_req->sense_buf_pa,
  315. 0, /* scsi cmd ref, always 0 */
  316. FCPIO_ICMND_PTA_SIMPLE,
  317. /* scsi pri and tag */
  318. flags, /* command flags */
  319. sc->cmnd, sc->cmd_len,
  320. scsi_bufflen(sc),
  321. fc_lun.scsi_lun, io_req->port_id,
  322. rport->maxframe_size, rp->r_a_tov,
  323. rp->e_d_tov);
  324. atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
  325. if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
  326. atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
  327. atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
  328. atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
  329. return 0;
  330. }
  331. int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
  332. {
  333. struct request *const rq = scsi_cmd_to_rq(sc);
  334. uint32_t mqtag = 0;
  335. void (*done)(struct scsi_cmnd *) = scsi_done;
  336. struct fc_lport *lp = shost_priv(sc->device->host);
  337. struct fc_rport *rport;
  338. struct fnic_io_req *io_req = NULL;
  339. struct fnic *fnic = lport_priv(lp);
  340. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  341. struct vnic_wq_copy *wq;
  342. int ret = 1;
  343. u64 cmd_trace;
  344. int sg_count = 0;
  345. unsigned long flags = 0;
  346. unsigned long ptr;
  347. int io_lock_acquired = 0;
  348. struct fc_rport_libfc_priv *rp;
  349. uint16_t hwq = 0;
  350. mqtag = blk_mq_unique_tag(rq);
  351. spin_lock_irqsave(&fnic->fnic_lock, flags);
  352. if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
  353. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  354. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  355. "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
  356. fnic->state_flags);
  357. return SCSI_MLQUEUE_HOST_BUSY;
  358. }
  359. if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
  360. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  361. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  362. "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
  363. fnic->state_flags);
  364. return SCSI_MLQUEUE_HOST_BUSY;
  365. }
  366. rport = starget_to_rport(scsi_target(sc->device));
  367. if (!rport) {
  368. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  369. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  370. "returning DID_NO_CONNECT for IO as rport is NULL\n");
  371. sc->result = DID_NO_CONNECT << 16;
  372. done(sc);
  373. return 0;
  374. }
  375. ret = fc_remote_port_chkready(rport);
  376. if (ret) {
  377. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  378. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  379. "rport is not ready\n");
  380. atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
  381. sc->result = ret;
  382. done(sc);
  383. return 0;
  384. }
  385. rp = rport->dd_data;
  386. if (!rp || rp->rp_state == RPORT_ST_DELETE) {
  387. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  388. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  389. "rport 0x%x removed, returning DID_NO_CONNECT\n",
  390. rport->port_id);
  391. atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
  392. sc->result = DID_NO_CONNECT<<16;
  393. done(sc);
  394. return 0;
  395. }
  396. if (rp->rp_state != RPORT_ST_READY) {
  397. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  398. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  399. "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
  400. rport->port_id, rp->rp_state);
  401. sc->result = DID_IMM_RETRY << 16;
  402. done(sc);
  403. return 0;
  404. }
  405. if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
  406. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  407. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  408. "state not ready: %d/link not up: %d Returning HOST_BUSY\n",
  409. lp->state, lp->link_up);
  410. return SCSI_MLQUEUE_HOST_BUSY;
  411. }
  412. atomic_inc(&fnic->in_flight);
  413. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  414. fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
  415. fnic_priv(sc)->flags = FNIC_NO_FLAGS;
  416. /* Get a new io_req for this SCSI IO */
  417. io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
  418. if (!io_req) {
  419. atomic64_inc(&fnic_stats->io_stats.alloc_failures);
  420. ret = SCSI_MLQUEUE_HOST_BUSY;
  421. goto out;
  422. }
  423. memset(io_req, 0, sizeof(*io_req));
  424. /* Map the data buffer */
  425. sg_count = scsi_dma_map(sc);
  426. if (sg_count < 0) {
  427. FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
  428. mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
  429. mempool_free(io_req, fnic->io_req_pool);
  430. goto out;
  431. }
  432. /* Determine the type of scatter/gather list we need */
  433. io_req->sgl_cnt = sg_count;
  434. io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
  435. if (sg_count > FNIC_DFLT_SG_DESC_CNT)
  436. io_req->sgl_type = FNIC_SGL_CACHE_MAX;
  437. if (sg_count) {
  438. io_req->sgl_list =
  439. mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
  440. GFP_ATOMIC);
  441. if (!io_req->sgl_list) {
  442. atomic64_inc(&fnic_stats->io_stats.alloc_failures);
  443. ret = SCSI_MLQUEUE_HOST_BUSY;
  444. scsi_dma_unmap(sc);
  445. mempool_free(io_req, fnic->io_req_pool);
  446. goto out;
  447. }
  448. /* Cache sgl list allocated address before alignment */
  449. io_req->sgl_list_alloc = io_req->sgl_list;
  450. ptr = (unsigned long) io_req->sgl_list;
  451. if (ptr % FNIC_SG_DESC_ALIGN) {
  452. io_req->sgl_list = (struct host_sg_desc *)
  453. (((unsigned long) ptr
  454. + FNIC_SG_DESC_ALIGN - 1)
  455. & ~(FNIC_SG_DESC_ALIGN - 1));
  456. }
  457. }
  458. /*
  459. * Will acquire lock before setting to IO initialized.
  460. */
  461. hwq = blk_mq_unique_tag_to_hwq(mqtag);
  462. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  463. /* initialize rest of io_req */
  464. io_lock_acquired = 1;
  465. io_req->port_id = rport->port_id;
  466. io_req->start_time = jiffies;
  467. fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
  468. fnic_priv(sc)->io_req = io_req;
  469. fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
  470. io_req->sc = sc;
  471. if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
  472. WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
  473. fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
  474. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  475. return SCSI_MLQUEUE_HOST_BUSY;
  476. }
  477. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req;
  478. io_req->tag = mqtag;
  479. /* create copy wq desc and enqueue it */
  480. wq = &fnic->hw_copy_wq[hwq];
  481. atomic64_inc(&fnic_stats->io_stats.ios[hwq]);
  482. ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq);
  483. if (ret) {
  484. /*
  485. * In case another thread cancelled the request,
  486. * refetch the pointer under the lock.
  487. */
  488. FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
  489. mqtag, sc, 0, 0, 0, fnic_flags_and_state(sc));
  490. io_req = fnic_priv(sc)->io_req;
  491. fnic_priv(sc)->io_req = NULL;
  492. if (io_req)
  493. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
  494. fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
  495. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  496. if (io_req) {
  497. fnic_release_ioreq_buf(fnic, io_req, sc);
  498. mempool_free(io_req, fnic->io_req_pool);
  499. }
  500. atomic_dec(&fnic->in_flight);
  501. return ret;
  502. } else {
  503. atomic64_inc(&fnic_stats->io_stats.active_ios);
  504. atomic64_inc(&fnic_stats->io_stats.num_ios);
  505. if (atomic64_read(&fnic_stats->io_stats.active_ios) >
  506. atomic64_read(&fnic_stats->io_stats.max_active_ios))
  507. atomic64_set(&fnic_stats->io_stats.max_active_ios,
  508. atomic64_read(&fnic_stats->io_stats.active_ios));
  509. /* REVISIT: Use per IO lock in the final code */
  510. fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
  511. }
  512. out:
  513. cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
  514. (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
  515. (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
  516. sc->cmnd[5]);
  517. FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
  518. mqtag, sc, io_req, sg_count, cmd_trace,
  519. fnic_flags_and_state(sc));
  520. /* if only we issued IO, will we have the io lock */
  521. if (io_lock_acquired)
  522. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  523. atomic_dec(&fnic->in_flight);
  524. return ret;
  525. }
  526. /*
  527. * fnic_fcpio_fw_reset_cmpl_handler
  528. * Routine to handle fw reset completion
  529. */
  530. static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
  531. struct fcpio_fw_req *desc)
  532. {
  533. u8 type;
  534. u8 hdr_status;
  535. struct fcpio_tag tag;
  536. int ret = 0;
  537. unsigned long flags;
  538. struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
  539. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
  540. atomic64_inc(&reset_stats->fw_reset_completions);
  541. /* Clean up all outstanding io requests */
  542. fnic_cleanup_io(fnic);
  543. atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
  544. atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
  545. atomic64_set(&fnic->io_cmpl_skip, 0);
  546. spin_lock_irqsave(&fnic->fnic_lock, flags);
  547. /* fnic should be in FC_TRANS_ETH_MODE */
  548. if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
  549. /* Check status of reset completion */
  550. if (!hdr_status) {
  551. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  552. "reset cmpl success\n");
  553. /* Ready to send flogi out */
  554. fnic->state = FNIC_IN_ETH_MODE;
  555. } else {
  556. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  557. "reset failed with header status: %s\n",
  558. fnic_fcpio_status_to_str(hdr_status));
  559. /*
  560. * Unable to change to eth mode, cannot send out flogi
  561. * Change state to fc mode, so that subsequent Flogi
  562. * requests from libFC will cause more attempts to
  563. * reset the firmware. Free the cached flogi
  564. */
  565. fnic->state = FNIC_IN_FC_MODE;
  566. atomic64_inc(&reset_stats->fw_reset_failures);
  567. ret = -1;
  568. }
  569. } else {
  570. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  571. "Unexpected state while processing reset completion: %s\n",
  572. fnic_state_to_str(fnic->state));
  573. atomic64_inc(&reset_stats->fw_reset_failures);
  574. ret = -1;
  575. }
  576. /* Thread removing device blocks till firmware reset is complete */
  577. if (fnic->remove_wait)
  578. complete(fnic->remove_wait);
  579. /*
  580. * If fnic is being removed, or fw reset failed
  581. * free the flogi frame. Else, send it out
  582. */
  583. if (fnic->remove_wait || ret) {
  584. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  585. skb_queue_purge(&fnic->tx_queue);
  586. goto reset_cmpl_handler_end;
  587. }
  588. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  589. queue_work(fnic_event_queue, &fnic->flush_work);
  590. reset_cmpl_handler_end:
  591. fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
  592. return ret;
  593. }
  594. /*
  595. * fnic_fcpio_flogi_reg_cmpl_handler
  596. * Routine to handle flogi register completion
  597. */
  598. static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
  599. struct fcpio_fw_req *desc)
  600. {
  601. u8 type;
  602. u8 hdr_status;
  603. struct fcpio_tag tag;
  604. int ret = 0;
  605. unsigned long flags;
  606. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
  607. /* Update fnic state based on status of flogi reg completion */
  608. spin_lock_irqsave(&fnic->fnic_lock, flags);
  609. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
  610. /* Check flogi registration completion status */
  611. if (!hdr_status) {
  612. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  613. "flog reg succeeded\n");
  614. fnic->state = FNIC_IN_FC_MODE;
  615. } else {
  616. FNIC_SCSI_DBG(KERN_DEBUG,
  617. fnic->lport->host, fnic->fnic_num,
  618. "fnic flogi reg :failed %s\n",
  619. fnic_fcpio_status_to_str(hdr_status));
  620. fnic->state = FNIC_IN_ETH_MODE;
  621. ret = -1;
  622. }
  623. } else {
  624. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  625. "Unexpected fnic state %s while"
  626. " processing flogi reg completion\n",
  627. fnic_state_to_str(fnic->state));
  628. ret = -1;
  629. }
  630. if (!ret) {
  631. if (fnic->stop_rx_link_events) {
  632. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  633. goto reg_cmpl_handler_end;
  634. }
  635. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  636. queue_work(fnic_event_queue, &fnic->flush_work);
  637. queue_work(fnic_event_queue, &fnic->frame_work);
  638. } else {
  639. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  640. }
  641. reg_cmpl_handler_end:
  642. return ret;
  643. }
  644. static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
  645. u16 request_out)
  646. {
  647. if (wq->to_clean_index <= wq->to_use_index) {
  648. /* out of range, stale request_out index */
  649. if (request_out < wq->to_clean_index ||
  650. request_out >= wq->to_use_index)
  651. return 0;
  652. } else {
  653. /* out of range, stale request_out index */
  654. if (request_out < wq->to_clean_index &&
  655. request_out >= wq->to_use_index)
  656. return 0;
  657. }
  658. /* request_out index is in range */
  659. return 1;
  660. }
  661. /*
  662. * Mark that ack received and store the Ack index. If there are multiple
  663. * acks received before Tx thread cleans it up, the latest value will be
  664. * used which is correct behavior. This state should be in the copy Wq
  665. * instead of in the fnic
  666. */
  667. static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
  668. unsigned int cq_index,
  669. struct fcpio_fw_req *desc)
  670. {
  671. struct vnic_wq_copy *wq;
  672. u16 request_out = desc->u.ack.request_out;
  673. unsigned long flags;
  674. u64 *ox_id_tag = (u64 *)(void *)desc;
  675. unsigned int wq_index = cq_index;
  676. /* mark the ack state */
  677. wq = &fnic->hw_copy_wq[cq_index];
  678. spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags);
  679. fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
  680. if (is_ack_index_in_range(wq, request_out)) {
  681. fnic->fw_ack_index[wq_index] = request_out;
  682. fnic->fw_ack_recd[wq_index] = 1;
  683. } else
  684. atomic64_inc(
  685. &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
  686. spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
  687. FNIC_TRACE(fnic_fcpio_ack_handler,
  688. fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
  689. ox_id_tag[4], ox_id_tag[5]);
  690. }
  691. /*
  692. * fnic_fcpio_icmnd_cmpl_handler
  693. * Routine to handle icmnd completions
  694. */
  695. static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
  696. struct fcpio_fw_req *desc)
  697. {
  698. u8 type;
  699. u8 hdr_status;
  700. struct fcpio_tag ftag;
  701. u32 id;
  702. u64 xfer_len = 0;
  703. struct fcpio_icmnd_cmpl *icmnd_cmpl;
  704. struct fnic_io_req *io_req;
  705. struct scsi_cmnd *sc;
  706. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  707. unsigned long flags;
  708. u64 cmd_trace;
  709. unsigned long start_time;
  710. unsigned long io_duration_time;
  711. unsigned int hwq = 0;
  712. unsigned int mqtag = 0;
  713. unsigned int tag = 0;
  714. /* Decode the cmpl description to get the io_req id */
  715. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
  716. fcpio_tag_id_dec(&ftag, &id);
  717. icmnd_cmpl = &desc->u.icmnd_cmpl;
  718. mqtag = id;
  719. tag = blk_mq_unique_tag_to_tag(mqtag);
  720. hwq = blk_mq_unique_tag_to_hwq(mqtag);
  721. if (hwq != cq_index) {
  722. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  723. "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
  724. hwq, mqtag, tag, cq_index);
  725. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  726. "hdr status: %s icmnd completion on the wrong queue\n",
  727. fnic_fcpio_status_to_str(hdr_status));
  728. }
  729. if (tag >= fnic->fnic_max_tag_id) {
  730. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  731. "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
  732. hwq, mqtag, tag, cq_index);
  733. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  734. "hdr status: %s Out of range tag\n",
  735. fnic_fcpio_status_to_str(hdr_status));
  736. return;
  737. }
  738. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  739. sc = scsi_host_find_tag(fnic->lport->host, id);
  740. WARN_ON_ONCE(!sc);
  741. if (!sc) {
  742. atomic64_inc(&fnic_stats->io_stats.sc_null);
  743. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  744. shost_printk(KERN_ERR, fnic->lport->host,
  745. "icmnd_cmpl sc is null - "
  746. "hdr status = %s tag = 0x%x desc = 0x%p\n",
  747. fnic_fcpio_status_to_str(hdr_status), id, desc);
  748. FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
  749. fnic->lport->host->host_no, id,
  750. ((u64)icmnd_cmpl->_resvd0[1] << 16 |
  751. (u64)icmnd_cmpl->_resvd0[0]),
  752. ((u64)hdr_status << 16 |
  753. (u64)icmnd_cmpl->scsi_status << 8 |
  754. (u64)icmnd_cmpl->flags), desc,
  755. (u64)icmnd_cmpl->residual, 0);
  756. return;
  757. }
  758. io_req = fnic_priv(sc)->io_req;
  759. if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) {
  760. WARN(1, "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x io_req tag mismatch\n",
  761. __func__, __LINE__, hwq, mqtag, tag);
  762. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  763. return;
  764. }
  765. WARN_ON_ONCE(!io_req);
  766. if (!io_req) {
  767. atomic64_inc(&fnic_stats->io_stats.ioreq_null);
  768. fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
  769. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  770. shost_printk(KERN_ERR, fnic->lport->host,
  771. "icmnd_cmpl io_req is null - "
  772. "hdr status = %s tag = 0x%x sc 0x%p\n",
  773. fnic_fcpio_status_to_str(hdr_status), id, sc);
  774. return;
  775. }
  776. start_time = io_req->start_time;
  777. /* firmware completed the io */
  778. io_req->io_completed = 1;
  779. /*
  780. * if SCSI-ML has already issued abort on this command,
  781. * set completion of the IO. The abts path will clean it up
  782. */
  783. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
  784. /*
  785. * set the FNIC_IO_DONE so that this doesn't get
  786. * flagged as 'out of order' if it was not aborted
  787. */
  788. fnic_priv(sc)->flags |= FNIC_IO_DONE;
  789. fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
  790. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  791. if(FCPIO_ABORTED == hdr_status)
  792. fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
  793. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  794. "icmnd_cmpl abts pending "
  795. "hdr status = %s tag = 0x%x sc = 0x%p "
  796. "scsi_status = %x residual = %d\n",
  797. fnic_fcpio_status_to_str(hdr_status),
  798. id, sc,
  799. icmnd_cmpl->scsi_status,
  800. icmnd_cmpl->residual);
  801. return;
  802. }
  803. /* Mark the IO as complete */
  804. fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
  805. icmnd_cmpl = &desc->u.icmnd_cmpl;
  806. switch (hdr_status) {
  807. case FCPIO_SUCCESS:
  808. sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
  809. xfer_len = scsi_bufflen(sc);
  810. if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
  811. xfer_len -= icmnd_cmpl->residual;
  812. scsi_set_resid(sc, icmnd_cmpl->residual);
  813. }
  814. if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
  815. atomic64_inc(&fnic_stats->misc_stats.check_condition);
  816. if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
  817. atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
  818. break;
  819. case FCPIO_TIMEOUT: /* request was timed out */
  820. atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
  821. sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
  822. break;
  823. case FCPIO_ABORTED: /* request was aborted */
  824. atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
  825. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  826. break;
  827. case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
  828. atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
  829. scsi_set_resid(sc, icmnd_cmpl->residual);
  830. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  831. break;
  832. case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
  833. atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
  834. sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
  835. break;
  836. case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
  837. atomic64_inc(&fnic_stats->io_stats.io_not_found);
  838. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  839. break;
  840. case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
  841. atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
  842. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  843. break;
  844. case FCPIO_FW_ERR: /* request was terminated due fw error */
  845. atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
  846. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  847. break;
  848. case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
  849. atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
  850. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  851. break;
  852. case FCPIO_INVALID_HEADER: /* header contains invalid data */
  853. case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
  854. case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
  855. default:
  856. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  857. break;
  858. }
  859. /* Break link with the SCSI command */
  860. fnic_priv(sc)->io_req = NULL;
  861. io_req->sc = NULL;
  862. fnic_priv(sc)->flags |= FNIC_IO_DONE;
  863. fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
  864. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  865. if (hdr_status != FCPIO_SUCCESS) {
  866. atomic64_inc(&fnic_stats->io_stats.io_failures);
  867. shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
  868. fnic_fcpio_status_to_str(hdr_status));
  869. }
  870. fnic_release_ioreq_buf(fnic, io_req, sc);
  871. cmd_trace = ((u64)hdr_status << 56) |
  872. (u64)icmnd_cmpl->scsi_status << 48 |
  873. (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
  874. (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
  875. (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
  876. FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
  877. sc->device->host->host_no, id, sc,
  878. ((u64)icmnd_cmpl->_resvd0[1] << 56 |
  879. (u64)icmnd_cmpl->_resvd0[0] << 48 |
  880. jiffies_to_msecs(jiffies - start_time)),
  881. desc, cmd_trace, fnic_flags_and_state(sc));
  882. if (sc->sc_data_direction == DMA_FROM_DEVICE) {
  883. fnic->lport->host_stats.fcp_input_requests++;
  884. fnic->fcp_input_bytes += xfer_len;
  885. } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
  886. fnic->lport->host_stats.fcp_output_requests++;
  887. fnic->fcp_output_bytes += xfer_len;
  888. } else
  889. fnic->lport->host_stats.fcp_control_requests++;
  890. /* Call SCSI completion function to complete the IO */
  891. scsi_done(sc);
  892. mempool_free(io_req, fnic->io_req_pool);
  893. atomic64_dec(&fnic_stats->io_stats.active_ios);
  894. if (atomic64_read(&fnic->io_cmpl_skip))
  895. atomic64_dec(&fnic->io_cmpl_skip);
  896. else
  897. atomic64_inc(&fnic_stats->io_stats.io_completions);
  898. io_duration_time = jiffies_to_msecs(jiffies) -
  899. jiffies_to_msecs(start_time);
  900. if(io_duration_time <= 10)
  901. atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
  902. else if(io_duration_time <= 100)
  903. atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
  904. else if(io_duration_time <= 500)
  905. atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
  906. else if(io_duration_time <= 5000)
  907. atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
  908. else if(io_duration_time <= 10000)
  909. atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
  910. else if(io_duration_time <= 30000)
  911. atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
  912. else {
  913. atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
  914. if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
  915. atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
  916. }
  917. }
  918. /* fnic_fcpio_itmf_cmpl_handler
  919. * Routine to handle itmf completions
  920. */
  921. static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
  922. struct fcpio_fw_req *desc)
  923. {
  924. u8 type;
  925. u8 hdr_status;
  926. struct fcpio_tag ftag;
  927. u32 id;
  928. struct scsi_cmnd *sc = NULL;
  929. struct fnic_io_req *io_req;
  930. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  931. struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
  932. struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
  933. struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
  934. unsigned long flags;
  935. unsigned long start_time;
  936. unsigned int hwq = cq_index;
  937. unsigned int mqtag;
  938. unsigned int tag;
  939. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
  940. fcpio_tag_id_dec(&ftag, &id);
  941. mqtag = id & FNIC_TAG_MASK;
  942. tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK);
  943. hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
  944. if (hwq != cq_index) {
  945. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  946. "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
  947. hwq, mqtag, tag, cq_index);
  948. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  949. "hdr status: %s ITMF completion on the wrong queue\n",
  950. fnic_fcpio_status_to_str(hdr_status));
  951. }
  952. if (tag > fnic->fnic_max_tag_id) {
  953. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  954. "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
  955. hwq, mqtag, tag, cq_index);
  956. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  957. "hdr status: %s Tag out of range\n",
  958. fnic_fcpio_status_to_str(hdr_status));
  959. return;
  960. } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
  961. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  962. "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
  963. hwq, mqtag, tag, cq_index);
  964. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  965. "hdr status: %s Tag out of range\n",
  966. fnic_fcpio_status_to_str(hdr_status));
  967. return;
  968. }
  969. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  970. /* If it is sg3utils allocated SC then tag_id
  971. * is max_tag_id and SC is retrieved from io_req
  972. */
  973. if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
  974. io_req = fnic->sw_copy_wq[hwq].io_req_table[tag];
  975. if (io_req)
  976. sc = io_req->sc;
  977. } else {
  978. sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
  979. }
  980. WARN_ON_ONCE(!sc);
  981. if (!sc) {
  982. atomic64_inc(&fnic_stats->io_stats.sc_null);
  983. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  984. shost_printk(KERN_ERR, fnic->lport->host,
  985. "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
  986. fnic_fcpio_status_to_str(hdr_status), tag);
  987. return;
  988. }
  989. io_req = fnic_priv(sc)->io_req;
  990. WARN_ON_ONCE(!io_req);
  991. if (!io_req) {
  992. atomic64_inc(&fnic_stats->io_stats.ioreq_null);
  993. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  994. fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
  995. shost_printk(KERN_ERR, fnic->lport->host,
  996. "itmf_cmpl io_req is null - "
  997. "hdr status = %s tag = 0x%x sc 0x%p\n",
  998. fnic_fcpio_status_to_str(hdr_status), tag, sc);
  999. return;
  1000. }
  1001. start_time = io_req->start_time;
  1002. if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
  1003. /* Abort and terminate completion of device reset req */
  1004. /* REVISIT : Add asserts about various flags */
  1005. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1006. "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
  1007. hwq, mqtag, tag,
  1008. fnic_fcpio_status_to_str(hdr_status));
  1009. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
  1010. fnic_priv(sc)->abts_status = hdr_status;
  1011. fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
  1012. if (io_req->abts_done)
  1013. complete(io_req->abts_done);
  1014. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1015. } else if (id & FNIC_TAG_ABORT) {
  1016. /* Completion of abort cmd */
  1017. shost_printk(KERN_DEBUG, fnic->lport->host,
  1018. "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
  1019. hwq, mqtag, tag,
  1020. fnic_fcpio_status_to_str(hdr_status));
  1021. switch (hdr_status) {
  1022. case FCPIO_SUCCESS:
  1023. break;
  1024. case FCPIO_TIMEOUT:
  1025. if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
  1026. atomic64_inc(&abts_stats->abort_fw_timeouts);
  1027. else
  1028. atomic64_inc(
  1029. &term_stats->terminate_fw_timeouts);
  1030. break;
  1031. case FCPIO_ITMF_REJECTED:
  1032. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1033. "abort reject recd. id %d\n",
  1034. (int)(id & FNIC_TAG_MASK));
  1035. break;
  1036. case FCPIO_IO_NOT_FOUND:
  1037. if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
  1038. atomic64_inc(&abts_stats->abort_io_not_found);
  1039. else
  1040. atomic64_inc(
  1041. &term_stats->terminate_io_not_found);
  1042. break;
  1043. default:
  1044. if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
  1045. atomic64_inc(&abts_stats->abort_failures);
  1046. else
  1047. atomic64_inc(
  1048. &term_stats->terminate_failures);
  1049. break;
  1050. }
  1051. if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
  1052. /* This is a late completion. Ignore it */
  1053. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1054. return;
  1055. }
  1056. fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
  1057. fnic_priv(sc)->abts_status = hdr_status;
  1058. /* If the status is IO not found consider it as success */
  1059. if (hdr_status == FCPIO_IO_NOT_FOUND)
  1060. fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
  1061. if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
  1062. atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
  1063. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1064. "abts cmpl recd. id %d status %s\n",
  1065. (int)(id & FNIC_TAG_MASK),
  1066. fnic_fcpio_status_to_str(hdr_status));
  1067. /*
  1068. * If scsi_eh thread is blocked waiting for abts to complete,
  1069. * signal completion to it. IO will be cleaned in the thread
  1070. * else clean it in this context
  1071. */
  1072. if (io_req->abts_done) {
  1073. complete(io_req->abts_done);
  1074. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1075. shost_printk(KERN_INFO, fnic->lport->host,
  1076. "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
  1077. hwq, mqtag, tag);
  1078. } else {
  1079. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1080. "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
  1081. hwq, mqtag,
  1082. tag, fnic_fcpio_status_to_str(hdr_status));
  1083. fnic_priv(sc)->io_req = NULL;
  1084. sc->result = (DID_ERROR << 16);
  1085. fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
  1086. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1087. fnic_release_ioreq_buf(fnic, io_req, sc);
  1088. mempool_free(io_req, fnic->io_req_pool);
  1089. FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
  1090. sc->device->host->host_no, id,
  1091. sc,
  1092. jiffies_to_msecs(jiffies - start_time),
  1093. desc,
  1094. (((u64)hdr_status << 40) |
  1095. (u64)sc->cmnd[0] << 32 |
  1096. (u64)sc->cmnd[2] << 24 |
  1097. (u64)sc->cmnd[3] << 16 |
  1098. (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
  1099. fnic_flags_and_state(sc));
  1100. scsi_done(sc);
  1101. atomic64_dec(&fnic_stats->io_stats.active_ios);
  1102. if (atomic64_read(&fnic->io_cmpl_skip))
  1103. atomic64_dec(&fnic->io_cmpl_skip);
  1104. else
  1105. atomic64_inc(&fnic_stats->io_stats.io_completions);
  1106. }
  1107. } else if (id & FNIC_TAG_DEV_RST) {
  1108. /* Completion of device reset */
  1109. shost_printk(KERN_INFO, fnic->lport->host,
  1110. "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
  1111. hwq, mqtag,
  1112. tag, fnic_fcpio_status_to_str(hdr_status));
  1113. fnic_priv(sc)->lr_status = hdr_status;
  1114. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
  1115. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1116. fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
  1117. FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
  1118. sc->device->host->host_no, id, sc,
  1119. jiffies_to_msecs(jiffies - start_time),
  1120. desc, 0, fnic_flags_and_state(sc));
  1121. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1122. "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
  1123. hwq, mqtag,
  1124. tag, fnic_fcpio_status_to_str(hdr_status));
  1125. return;
  1126. }
  1127. if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
  1128. /* Need to wait for terminate completion */
  1129. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1130. FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
  1131. sc->device->host->host_no, id, sc,
  1132. jiffies_to_msecs(jiffies - start_time),
  1133. desc, 0, fnic_flags_and_state(sc));
  1134. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1135. "dev reset cmpl recd after time out. "
  1136. "id %d status %s\n",
  1137. (int)(id & FNIC_TAG_MASK),
  1138. fnic_fcpio_status_to_str(hdr_status));
  1139. return;
  1140. }
  1141. fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
  1142. fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
  1143. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1144. "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
  1145. hwq, mqtag,
  1146. tag, fnic_fcpio_status_to_str(hdr_status));
  1147. if (io_req->dr_done)
  1148. complete(io_req->dr_done);
  1149. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1150. } else {
  1151. shost_printk(KERN_ERR, fnic->lport->host,
  1152. "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
  1153. __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
  1154. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1155. }
  1156. }
  1157. /*
  1158. * fnic_fcpio_cmpl_handler
  1159. * Routine to service the cq for wq_copy
  1160. */
  1161. static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
  1162. unsigned int cq_index,
  1163. struct fcpio_fw_req *desc)
  1164. {
  1165. struct fnic *fnic = vnic_dev_priv(vdev);
  1166. switch (desc->hdr.type) {
  1167. case FCPIO_ICMND_CMPL: /* fw completed a command */
  1168. case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
  1169. case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
  1170. case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
  1171. case FCPIO_RESET_CMPL: /* fw completed reset */
  1172. atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
  1173. break;
  1174. default:
  1175. break;
  1176. }
  1177. cq_index -= fnic->copy_wq_base;
  1178. switch (desc->hdr.type) {
  1179. case FCPIO_ACK: /* fw copied copy wq desc to its queue */
  1180. fnic_fcpio_ack_handler(fnic, cq_index, desc);
  1181. break;
  1182. case FCPIO_ICMND_CMPL: /* fw completed a command */
  1183. fnic_fcpio_icmnd_cmpl_handler(fnic, cq_index, desc);
  1184. break;
  1185. case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
  1186. fnic_fcpio_itmf_cmpl_handler(fnic, cq_index, desc);
  1187. break;
  1188. case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
  1189. case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
  1190. fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
  1191. break;
  1192. case FCPIO_RESET_CMPL: /* fw completed reset */
  1193. fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
  1194. break;
  1195. default:
  1196. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1197. "firmware completion type %d\n",
  1198. desc->hdr.type);
  1199. break;
  1200. }
  1201. return 0;
  1202. }
  1203. /*
  1204. * fnic_wq_copy_cmpl_handler
  1205. * Routine to process wq copy
  1206. */
  1207. int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index)
  1208. {
  1209. unsigned int cur_work_done;
  1210. struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
  1211. u64 start_jiffies = 0;
  1212. u64 end_jiffies = 0;
  1213. u64 delta_jiffies = 0;
  1214. u64 delta_ms = 0;
  1215. start_jiffies = jiffies;
  1216. cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
  1217. fnic_fcpio_cmpl_handler,
  1218. copy_work_to_do);
  1219. end_jiffies = jiffies;
  1220. delta_jiffies = end_jiffies - start_jiffies;
  1221. if (delta_jiffies > (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
  1222. atomic64_set(&misc_stats->max_isr_jiffies, delta_jiffies);
  1223. delta_ms = jiffies_to_msecs(delta_jiffies);
  1224. atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
  1225. atomic64_set(&misc_stats->corr_work_done, cur_work_done);
  1226. }
  1227. return cur_work_done;
  1228. }
  1229. static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
  1230. {
  1231. struct request *const rq = scsi_cmd_to_rq(sc);
  1232. struct fnic *fnic = data;
  1233. struct fnic_io_req *io_req;
  1234. unsigned long flags = 0;
  1235. unsigned long start_time = 0;
  1236. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1237. uint16_t hwq = 0;
  1238. int tag;
  1239. int mqtag;
  1240. mqtag = blk_mq_unique_tag(rq);
  1241. hwq = blk_mq_unique_tag_to_hwq(mqtag);
  1242. tag = blk_mq_unique_tag_to_tag(mqtag);
  1243. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1244. fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
  1245. io_req = fnic_priv(sc)->io_req;
  1246. if (!io_req) {
  1247. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1248. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  1249. "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
  1250. hwq, mqtag, tag, fnic_priv(sc)->flags);
  1251. return true;
  1252. }
  1253. if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
  1254. !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
  1255. /*
  1256. * We will be here only when FW completes reset
  1257. * without sending completions for outstanding ios.
  1258. */
  1259. fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
  1260. if (io_req && io_req->dr_done)
  1261. complete(io_req->dr_done);
  1262. else if (io_req && io_req->abts_done)
  1263. complete(io_req->abts_done);
  1264. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1265. return true;
  1266. } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
  1267. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1268. return true;
  1269. }
  1270. fnic_priv(sc)->io_req = NULL;
  1271. io_req->sc = NULL;
  1272. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1273. /*
  1274. * If there is a scsi_cmnd associated with this io_req, then
  1275. * free the corresponding state
  1276. */
  1277. start_time = io_req->start_time;
  1278. fnic_release_ioreq_buf(fnic, io_req, sc);
  1279. mempool_free(io_req, fnic->io_req_pool);
  1280. sc->result = DID_TRANSPORT_DISRUPTED << 16;
  1281. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  1282. "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
  1283. mqtag, tag, sc, (jiffies - start_time));
  1284. if (atomic64_read(&fnic->io_cmpl_skip))
  1285. atomic64_dec(&fnic->io_cmpl_skip);
  1286. else
  1287. atomic64_inc(&fnic_stats->io_stats.io_completions);
  1288. FNIC_TRACE(fnic_cleanup_io,
  1289. sc->device->host->host_no, tag, sc,
  1290. jiffies_to_msecs(jiffies - start_time),
  1291. 0, ((u64)sc->cmnd[0] << 32 |
  1292. (u64)sc->cmnd[2] << 24 |
  1293. (u64)sc->cmnd[3] << 16 |
  1294. (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
  1295. fnic_flags_and_state(sc));
  1296. scsi_done(sc);
  1297. return true;
  1298. }
  1299. static void fnic_cleanup_io(struct fnic *fnic)
  1300. {
  1301. scsi_host_busy_iter(fnic->lport->host,
  1302. fnic_cleanup_io_iter, fnic);
  1303. }
  1304. void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
  1305. struct fcpio_host_req *desc)
  1306. {
  1307. u32 id;
  1308. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1309. struct fnic_io_req *io_req;
  1310. struct scsi_cmnd *sc;
  1311. unsigned long flags;
  1312. unsigned long start_time = 0;
  1313. uint16_t hwq;
  1314. /* get the tag reference */
  1315. fcpio_tag_id_dec(&desc->hdr.tag, &id);
  1316. id &= FNIC_TAG_MASK;
  1317. if (id >= fnic->fnic_max_tag_id)
  1318. return;
  1319. sc = scsi_host_find_tag(fnic->lport->host, id);
  1320. if (!sc)
  1321. return;
  1322. hwq = blk_mq_unique_tag_to_hwq(id);
  1323. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1324. /* Get the IO context which this desc refers to */
  1325. io_req = fnic_priv(sc)->io_req;
  1326. /* fnic interrupts are turned off by now */
  1327. if (!io_req) {
  1328. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1329. goto wq_copy_cleanup_scsi_cmd;
  1330. }
  1331. fnic_priv(sc)->io_req = NULL;
  1332. io_req->sc = NULL;
  1333. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL;
  1334. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1335. start_time = io_req->start_time;
  1336. fnic_release_ioreq_buf(fnic, io_req, sc);
  1337. mempool_free(io_req, fnic->io_req_pool);
  1338. wq_copy_cleanup_scsi_cmd:
  1339. sc->result = DID_NO_CONNECT << 16;
  1340. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
  1341. " DID_NO_CONNECT\n");
  1342. FNIC_TRACE(fnic_wq_copy_cleanup_handler,
  1343. sc->device->host->host_no, id, sc,
  1344. jiffies_to_msecs(jiffies - start_time),
  1345. 0, ((u64)sc->cmnd[0] << 32 |
  1346. (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
  1347. (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
  1348. fnic_flags_and_state(sc));
  1349. scsi_done(sc);
  1350. }
  1351. static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
  1352. u32 task_req, u8 *fc_lun,
  1353. struct fnic_io_req *io_req,
  1354. unsigned int hwq)
  1355. {
  1356. struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
  1357. struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
  1358. unsigned long flags;
  1359. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1360. if (unlikely(fnic_chk_state_flags_locked(fnic,
  1361. FNIC_FLAGS_IO_BLOCKED))) {
  1362. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1363. return 1;
  1364. } else
  1365. atomic_inc(&fnic->in_flight);
  1366. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1367. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1368. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
  1369. free_wq_copy_descs(fnic, wq, hwq);
  1370. if (!vnic_wq_copy_desc_avail(wq)) {
  1371. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1372. atomic_dec(&fnic->in_flight);
  1373. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1374. "fnic_queue_abort_io_req: failure: no descriptors\n");
  1375. atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
  1376. return 1;
  1377. }
  1378. fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
  1379. 0, task_req, tag, fc_lun, io_req->port_id,
  1380. fnic->config.ra_tov, fnic->config.ed_tov);
  1381. atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
  1382. if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
  1383. atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
  1384. atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
  1385. atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
  1386. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1387. atomic_dec(&fnic->in_flight);
  1388. return 0;
  1389. }
  1390. struct fnic_rport_abort_io_iter_data {
  1391. struct fnic *fnic;
  1392. u32 port_id;
  1393. int term_cnt;
  1394. };
  1395. static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
  1396. {
  1397. struct request *const rq = scsi_cmd_to_rq(sc);
  1398. struct fnic_rport_abort_io_iter_data *iter_data = data;
  1399. struct fnic *fnic = iter_data->fnic;
  1400. int abt_tag = 0;
  1401. struct fnic_io_req *io_req;
  1402. unsigned long flags;
  1403. struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
  1404. struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
  1405. struct scsi_lun fc_lun;
  1406. enum fnic_ioreq_state old_ioreq_state;
  1407. uint16_t hwq = 0;
  1408. abt_tag = blk_mq_unique_tag(rq);
  1409. hwq = blk_mq_unique_tag_to_hwq(abt_tag);
  1410. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1411. io_req = fnic_priv(sc)->io_req;
  1412. if (!io_req || io_req->port_id != iter_data->port_id) {
  1413. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1414. return true;
  1415. }
  1416. if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
  1417. !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
  1418. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  1419. "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
  1420. hwq, abt_tag, fnic_priv(sc)->flags);
  1421. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1422. return true;
  1423. }
  1424. /*
  1425. * Found IO that is still pending with firmware and
  1426. * belongs to rport that went away
  1427. */
  1428. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
  1429. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1430. return true;
  1431. }
  1432. if (io_req->abts_done) {
  1433. shost_printk(KERN_ERR, fnic->lport->host,
  1434. "fnic_rport_exch_reset: io_req->abts_done is set "
  1435. "state is %s\n",
  1436. fnic_ioreq_state_to_str(fnic_priv(sc)->state));
  1437. }
  1438. if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
  1439. shost_printk(KERN_ERR, fnic->lport->host,
  1440. "rport_exch_reset "
  1441. "IO not yet issued %p tag 0x%x flags "
  1442. "%x state %d\n",
  1443. sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
  1444. }
  1445. old_ioreq_state = fnic_priv(sc)->state;
  1446. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
  1447. fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
  1448. if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
  1449. atomic64_inc(&reset_stats->device_reset_terminates);
  1450. abt_tag |= FNIC_TAG_DEV_RST;
  1451. }
  1452. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1453. "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
  1454. BUG_ON(io_req->abts_done);
  1455. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1456. "fnic_rport_reset_exch: Issuing abts\n");
  1457. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1458. /* Now queue the abort command to firmware */
  1459. int_to_scsilun(sc->device->lun, &fc_lun);
  1460. if (fnic_queue_abort_io_req(fnic, abt_tag,
  1461. FCPIO_ITMF_ABT_TASK_TERM,
  1462. fc_lun.scsi_lun, io_req, hwq)) {
  1463. /*
  1464. * Revert the cmd state back to old state, if
  1465. * it hasn't changed in between. This cmd will get
  1466. * aborted later by scsi_eh, or cleaned up during
  1467. * lun reset
  1468. */
  1469. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1470. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  1471. "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
  1472. hwq, abt_tag, fnic_priv(sc)->flags);
  1473. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
  1474. fnic_priv(sc)->state = old_ioreq_state;
  1475. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1476. } else {
  1477. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1478. if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
  1479. fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
  1480. else
  1481. fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
  1482. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1483. atomic64_inc(&term_stats->terminates);
  1484. iter_data->term_cnt++;
  1485. }
  1486. return true;
  1487. }
  1488. static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
  1489. {
  1490. struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
  1491. struct fnic_rport_abort_io_iter_data iter_data = {
  1492. .fnic = fnic,
  1493. .port_id = port_id,
  1494. .term_cnt = 0,
  1495. };
  1496. FNIC_SCSI_DBG(KERN_DEBUG,
  1497. fnic->lport->host, fnic->fnic_num,
  1498. "fnic_rport_exch_reset called portid 0x%06x\n",
  1499. port_id);
  1500. if (fnic->in_remove)
  1501. return;
  1502. scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
  1503. &iter_data);
  1504. if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
  1505. atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
  1506. }
  1507. void fnic_terminate_rport_io(struct fc_rport *rport)
  1508. {
  1509. struct fc_rport_libfc_priv *rdata;
  1510. struct fc_lport *lport;
  1511. struct fnic *fnic;
  1512. if (!rport) {
  1513. printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
  1514. return;
  1515. }
  1516. rdata = rport->dd_data;
  1517. if (!rdata) {
  1518. printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
  1519. return;
  1520. }
  1521. lport = rdata->local_port;
  1522. if (!lport) {
  1523. printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
  1524. return;
  1525. }
  1526. fnic = lport_priv(lport);
  1527. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1528. "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
  1529. rport->port_name, rport->node_name, rport,
  1530. rport->port_id);
  1531. if (fnic->in_remove)
  1532. return;
  1533. fnic_rport_exch_reset(fnic, rport->port_id);
  1534. }
  1535. /*
  1536. * This function is exported to SCSI for sending abort cmnds.
  1537. * A SCSI IO is represented by a io_req in the driver.
  1538. * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
  1539. */
  1540. int fnic_abort_cmd(struct scsi_cmnd *sc)
  1541. {
  1542. struct request *const rq = scsi_cmd_to_rq(sc);
  1543. struct fc_lport *lp;
  1544. struct fnic *fnic;
  1545. struct fnic_io_req *io_req = NULL;
  1546. struct fc_rport *rport;
  1547. unsigned long flags;
  1548. unsigned long start_time = 0;
  1549. int ret = SUCCESS;
  1550. u32 task_req = 0;
  1551. struct scsi_lun fc_lun;
  1552. struct fnic_stats *fnic_stats;
  1553. struct abort_stats *abts_stats;
  1554. struct terminate_stats *term_stats;
  1555. enum fnic_ioreq_state old_ioreq_state;
  1556. int mqtag;
  1557. unsigned long abt_issued_time;
  1558. uint16_t hwq = 0;
  1559. DECLARE_COMPLETION_ONSTACK(tm_done);
  1560. /* Wait for rport to unblock */
  1561. fc_block_scsi_eh(sc);
  1562. /* Get local-port, check ready and link up */
  1563. lp = shost_priv(sc->device->host);
  1564. fnic = lport_priv(lp);
  1565. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1566. fnic_stats = &fnic->fnic_stats;
  1567. abts_stats = &fnic->fnic_stats.abts_stats;
  1568. term_stats = &fnic->fnic_stats.term_stats;
  1569. rport = starget_to_rport(scsi_target(sc->device));
  1570. mqtag = blk_mq_unique_tag(rq);
  1571. hwq = blk_mq_unique_tag_to_hwq(mqtag);
  1572. fnic_priv(sc)->flags = FNIC_NO_FLAGS;
  1573. if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
  1574. ret = FAILED;
  1575. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1576. goto fnic_abort_cmd_end;
  1577. }
  1578. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1579. /*
  1580. * Avoid a race between SCSI issuing the abort and the device
  1581. * completing the command.
  1582. *
  1583. * If the command is already completed by the fw cmpl code,
  1584. * we just return SUCCESS from here. This means that the abort
  1585. * succeeded. In the SCSI ML, since the timeout for command has
  1586. * happened, the completion wont actually complete the command
  1587. * and it will be considered as an aborted command
  1588. *
  1589. * .io_req will not be cleared except while holding io_req_lock.
  1590. */
  1591. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1592. io_req = fnic_priv(sc)->io_req;
  1593. if (!io_req) {
  1594. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1595. goto fnic_abort_cmd_end;
  1596. }
  1597. io_req->abts_done = &tm_done;
  1598. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
  1599. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1600. goto wait_pending;
  1601. }
  1602. abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
  1603. if (abt_issued_time <= 6000)
  1604. atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
  1605. else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
  1606. atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
  1607. else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
  1608. atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
  1609. else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
  1610. atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
  1611. else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
  1612. atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
  1613. else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
  1614. atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
  1615. else
  1616. atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
  1617. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1618. "CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
  1619. sc->cmnd[0], abt_issued_time);
  1620. /*
  1621. * Command is still pending, need to abort it
  1622. * If the firmware completes the command after this point,
  1623. * the completion wont be done till mid-layer, since abort
  1624. * has already started.
  1625. */
  1626. old_ioreq_state = fnic_priv(sc)->state;
  1627. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
  1628. fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
  1629. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1630. /*
  1631. * Check readiness of the remote port. If the path to remote
  1632. * port is up, then send abts to the remote port to terminate
  1633. * the IO. Else, just locally terminate the IO in the firmware
  1634. */
  1635. if (fc_remote_port_chkready(rport) == 0)
  1636. task_req = FCPIO_ITMF_ABT_TASK;
  1637. else {
  1638. atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
  1639. task_req = FCPIO_ITMF_ABT_TASK_TERM;
  1640. }
  1641. /* Now queue the abort command to firmware */
  1642. int_to_scsilun(sc->device->lun, &fc_lun);
  1643. if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun,
  1644. io_req, hwq)) {
  1645. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1646. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
  1647. fnic_priv(sc)->state = old_ioreq_state;
  1648. io_req = fnic_priv(sc)->io_req;
  1649. if (io_req)
  1650. io_req->abts_done = NULL;
  1651. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1652. ret = FAILED;
  1653. goto fnic_abort_cmd_end;
  1654. }
  1655. if (task_req == FCPIO_ITMF_ABT_TASK) {
  1656. fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
  1657. atomic64_inc(&fnic_stats->abts_stats.aborts);
  1658. } else {
  1659. fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
  1660. atomic64_inc(&fnic_stats->term_stats.terminates);
  1661. }
  1662. /*
  1663. * We queued an abort IO, wait for its completion.
  1664. * Once the firmware completes the abort command, it will
  1665. * wake up this thread.
  1666. */
  1667. wait_pending:
  1668. wait_for_completion_timeout(&tm_done,
  1669. msecs_to_jiffies
  1670. (2 * fnic->config.ra_tov +
  1671. fnic->config.ed_tov));
  1672. /* Check the abort status */
  1673. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1674. io_req = fnic_priv(sc)->io_req;
  1675. if (!io_req) {
  1676. atomic64_inc(&fnic_stats->io_stats.ioreq_null);
  1677. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1678. fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
  1679. ret = FAILED;
  1680. goto fnic_abort_cmd_end;
  1681. }
  1682. io_req->abts_done = NULL;
  1683. /* fw did not complete abort, timed out */
  1684. if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
  1685. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1686. if (task_req == FCPIO_ITMF_ABT_TASK) {
  1687. atomic64_inc(&abts_stats->abort_drv_timeouts);
  1688. } else {
  1689. atomic64_inc(&term_stats->terminate_drv_timeouts);
  1690. }
  1691. fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
  1692. ret = FAILED;
  1693. goto fnic_abort_cmd_end;
  1694. }
  1695. /* IO out of order */
  1696. if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
  1697. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1698. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  1699. "Issuing host reset due to out of order IO\n");
  1700. ret = FAILED;
  1701. goto fnic_abort_cmd_end;
  1702. }
  1703. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
  1704. start_time = io_req->start_time;
  1705. /*
  1706. * firmware completed the abort, check the status,
  1707. * free the io_req if successful. If abort fails,
  1708. * Device reset will clean the I/O.
  1709. */
  1710. if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS ||
  1711. (fnic_priv(sc)->abts_status == FCPIO_ABORTED)) {
  1712. fnic_priv(sc)->io_req = NULL;
  1713. io_req->sc = NULL;
  1714. } else {
  1715. ret = FAILED;
  1716. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1717. goto fnic_abort_cmd_end;
  1718. }
  1719. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
  1720. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1721. fnic_release_ioreq_buf(fnic, io_req, sc);
  1722. mempool_free(io_req, fnic->io_req_pool);
  1723. /* Call SCSI completion function to complete the IO */
  1724. sc->result = DID_ABORT << 16;
  1725. scsi_done(sc);
  1726. atomic64_dec(&fnic_stats->io_stats.active_ios);
  1727. if (atomic64_read(&fnic->io_cmpl_skip))
  1728. atomic64_dec(&fnic->io_cmpl_skip);
  1729. else
  1730. atomic64_inc(&fnic_stats->io_stats.io_completions);
  1731. fnic_abort_cmd_end:
  1732. FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc,
  1733. jiffies_to_msecs(jiffies - start_time),
  1734. 0, ((u64)sc->cmnd[0] << 32 |
  1735. (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
  1736. (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
  1737. fnic_flags_and_state(sc));
  1738. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1739. "Returning from abort cmd type %x %s\n", task_req,
  1740. (ret == SUCCESS) ?
  1741. "SUCCESS" : "FAILED");
  1742. return ret;
  1743. }
  1744. static inline int fnic_queue_dr_io_req(struct fnic *fnic,
  1745. struct scsi_cmnd *sc,
  1746. struct fnic_io_req *io_req)
  1747. {
  1748. struct vnic_wq_copy *wq;
  1749. struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
  1750. struct scsi_lun fc_lun;
  1751. int ret = 0;
  1752. unsigned long flags;
  1753. uint16_t hwq = 0;
  1754. uint32_t tag = 0;
  1755. tag = io_req->tag;
  1756. hwq = blk_mq_unique_tag_to_hwq(tag);
  1757. wq = &fnic->hw_copy_wq[hwq];
  1758. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1759. if (unlikely(fnic_chk_state_flags_locked(fnic,
  1760. FNIC_FLAGS_IO_BLOCKED))) {
  1761. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1762. return FAILED;
  1763. } else
  1764. atomic_inc(&fnic->in_flight);
  1765. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1766. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1767. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
  1768. free_wq_copy_descs(fnic, wq, hwq);
  1769. if (!vnic_wq_copy_desc_avail(wq)) {
  1770. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1771. "queue_dr_io_req failure - no descriptors\n");
  1772. atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
  1773. ret = -EAGAIN;
  1774. goto lr_io_req_end;
  1775. }
  1776. /* fill in the lun info */
  1777. int_to_scsilun(sc->device->lun, &fc_lun);
  1778. tag |= FNIC_TAG_DEV_RST;
  1779. fnic_queue_wq_copy_desc_itmf(wq, tag,
  1780. 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
  1781. fc_lun.scsi_lun, io_req->port_id,
  1782. fnic->config.ra_tov, fnic->config.ed_tov);
  1783. atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
  1784. if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
  1785. atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
  1786. atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
  1787. atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
  1788. lr_io_req_end:
  1789. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1790. atomic_dec(&fnic->in_flight);
  1791. return ret;
  1792. }
  1793. struct fnic_pending_aborts_iter_data {
  1794. struct fnic *fnic;
  1795. struct scsi_cmnd *lr_sc;
  1796. struct scsi_device *lun_dev;
  1797. int ret;
  1798. };
  1799. static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
  1800. {
  1801. struct request *const rq = scsi_cmd_to_rq(sc);
  1802. struct fnic_pending_aborts_iter_data *iter_data = data;
  1803. struct fnic *fnic = iter_data->fnic;
  1804. struct scsi_device *lun_dev = iter_data->lun_dev;
  1805. unsigned long abt_tag = 0;
  1806. uint16_t hwq = 0;
  1807. struct fnic_io_req *io_req;
  1808. unsigned long flags;
  1809. struct scsi_lun fc_lun;
  1810. DECLARE_COMPLETION_ONSTACK(tm_done);
  1811. enum fnic_ioreq_state old_ioreq_state;
  1812. if (sc == iter_data->lr_sc || sc->device != lun_dev)
  1813. return true;
  1814. abt_tag = blk_mq_unique_tag(rq);
  1815. hwq = blk_mq_unique_tag_to_hwq(abt_tag);
  1816. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1817. io_req = fnic_priv(sc)->io_req;
  1818. if (!io_req) {
  1819. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1820. return true;
  1821. }
  1822. /*
  1823. * Found IO that is still pending with firmware and
  1824. * belongs to the LUN that we are resetting
  1825. */
  1826. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1827. "Found IO in %s on lun\n",
  1828. fnic_ioreq_state_to_str(fnic_priv(sc)->state));
  1829. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
  1830. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1831. return true;
  1832. }
  1833. if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
  1834. (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
  1835. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1836. "dev rst not pending sc 0x%p\n", sc);
  1837. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1838. return true;
  1839. }
  1840. if (io_req->abts_done)
  1841. shost_printk(KERN_ERR, fnic->lport->host,
  1842. "%s: io_req->abts_done is set state is %s\n",
  1843. __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
  1844. old_ioreq_state = fnic_priv(sc)->state;
  1845. /*
  1846. * Any pending IO issued prior to reset is expected to be
  1847. * in abts pending state, if not we need to set
  1848. * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
  1849. * When IO is completed, the IO will be handed over and
  1850. * handled in this function.
  1851. */
  1852. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
  1853. BUG_ON(io_req->abts_done);
  1854. if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
  1855. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1856. "dev rst sc 0x%p\n", sc);
  1857. }
  1858. fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
  1859. io_req->abts_done = &tm_done;
  1860. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1861. /* Now queue the abort command to firmware */
  1862. int_to_scsilun(sc->device->lun, &fc_lun);
  1863. if (fnic_queue_abort_io_req(fnic, abt_tag,
  1864. FCPIO_ITMF_ABT_TASK_TERM,
  1865. fc_lun.scsi_lun, io_req, hwq)) {
  1866. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1867. io_req = fnic_priv(sc)->io_req;
  1868. if (io_req)
  1869. io_req->abts_done = NULL;
  1870. if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
  1871. fnic_priv(sc)->state = old_ioreq_state;
  1872. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1873. iter_data->ret = FAILED;
  1874. FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  1875. "hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
  1876. hwq, abt_tag);
  1877. return false;
  1878. } else {
  1879. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1880. if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
  1881. fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
  1882. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1883. }
  1884. fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
  1885. wait_for_completion_timeout(&tm_done, msecs_to_jiffies
  1886. (fnic->config.ed_tov));
  1887. /* Recheck cmd state to check if it is now aborted */
  1888. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  1889. io_req = fnic_priv(sc)->io_req;
  1890. if (!io_req) {
  1891. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1892. fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
  1893. return true;
  1894. }
  1895. io_req->abts_done = NULL;
  1896. /* if abort is still pending with fw, fail */
  1897. if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
  1898. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1899. fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
  1900. iter_data->ret = FAILED;
  1901. return false;
  1902. }
  1903. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
  1904. /* original sc used for lr is handled by dev reset code */
  1905. if (sc != iter_data->lr_sc) {
  1906. fnic_priv(sc)->io_req = NULL;
  1907. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL;
  1908. }
  1909. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  1910. /* original sc used for lr is handled by dev reset code */
  1911. if (sc != iter_data->lr_sc) {
  1912. fnic_release_ioreq_buf(fnic, io_req, sc);
  1913. mempool_free(io_req, fnic->io_req_pool);
  1914. }
  1915. /*
  1916. * Any IO is returned during reset, it needs to call scsi_done
  1917. * to return the scsi_cmnd to upper layer.
  1918. */
  1919. /* Set result to let upper SCSI layer retry */
  1920. sc->result = DID_RESET << 16;
  1921. scsi_done(sc);
  1922. return true;
  1923. }
  1924. /*
  1925. * Clean up any pending aborts on the lun
  1926. * For each outstanding IO on this lun, whose abort is not completed by fw,
  1927. * issue a local abort. Wait for abort to complete. Return 0 if all commands
  1928. * successfully aborted, 1 otherwise
  1929. */
  1930. static int fnic_clean_pending_aborts(struct fnic *fnic,
  1931. struct scsi_cmnd *lr_sc,
  1932. bool new_sc)
  1933. {
  1934. int ret = 0;
  1935. struct fnic_pending_aborts_iter_data iter_data = {
  1936. .fnic = fnic,
  1937. .lun_dev = lr_sc->device,
  1938. .ret = SUCCESS,
  1939. };
  1940. iter_data.lr_sc = lr_sc;
  1941. scsi_host_busy_iter(fnic->lport->host,
  1942. fnic_pending_aborts_iter, &iter_data);
  1943. if (iter_data.ret == FAILED) {
  1944. ret = iter_data.ret;
  1945. goto clean_pending_aborts_end;
  1946. }
  1947. schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
  1948. /* walk again to check, if IOs are still pending in fw */
  1949. if (fnic_is_abts_pending(fnic, lr_sc))
  1950. ret = 1;
  1951. clean_pending_aborts_end:
  1952. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1953. "exit status: %d\n", ret);
  1954. return ret;
  1955. }
  1956. /*
  1957. * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
  1958. * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
  1959. * on the LUN.
  1960. */
  1961. int fnic_device_reset(struct scsi_cmnd *sc)
  1962. {
  1963. struct request *rq = scsi_cmd_to_rq(sc);
  1964. struct fc_lport *lp;
  1965. struct fnic *fnic;
  1966. struct fnic_io_req *io_req = NULL;
  1967. struct fc_rport *rport;
  1968. int status;
  1969. int ret = FAILED;
  1970. unsigned long flags;
  1971. unsigned long start_time = 0;
  1972. struct scsi_lun fc_lun;
  1973. struct fnic_stats *fnic_stats;
  1974. struct reset_stats *reset_stats;
  1975. int mqtag = rq->tag;
  1976. DECLARE_COMPLETION_ONSTACK(tm_done);
  1977. bool new_sc = 0;
  1978. uint16_t hwq = 0;
  1979. /* Wait for rport to unblock */
  1980. fc_block_scsi_eh(sc);
  1981. /* Get local-port, check ready and link up */
  1982. lp = shost_priv(sc->device->host);
  1983. fnic = lport_priv(lp);
  1984. fnic_stats = &fnic->fnic_stats;
  1985. reset_stats = &fnic->fnic_stats.reset_stats;
  1986. atomic64_inc(&reset_stats->device_resets);
  1987. rport = starget_to_rport(scsi_target(sc->device));
  1988. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1989. "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
  1990. rport->port_id, sc->device->lun, hwq, mqtag,
  1991. fnic_priv(sc)->flags);
  1992. if (lp->state != LPORT_ST_READY || !(lp->link_up))
  1993. goto fnic_device_reset_end;
  1994. /* Check if remote port up */
  1995. if (fc_remote_port_chkready(rport)) {
  1996. atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
  1997. goto fnic_device_reset_end;
  1998. }
  1999. fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
  2000. if (unlikely(mqtag < 0)) {
  2001. /*
  2002. * For device reset issued through sg3utils, we let
  2003. * only one LUN_RESET to go through and use a special
  2004. * tag equal to max_tag_id so that we don't have to allocate
  2005. * or free it. It won't interact with tags
  2006. * allocated by mid layer.
  2007. */
  2008. mutex_lock(&fnic->sgreset_mutex);
  2009. mqtag = fnic->fnic_max_tag_id;
  2010. new_sc = 1;
  2011. } else {
  2012. mqtag = blk_mq_unique_tag(rq);
  2013. hwq = blk_mq_unique_tag_to_hwq(mqtag);
  2014. }
  2015. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2016. io_req = fnic_priv(sc)->io_req;
  2017. /*
  2018. * If there is a io_req attached to this command, then use it,
  2019. * else allocate a new one.
  2020. */
  2021. if (!io_req) {
  2022. io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
  2023. if (!io_req) {
  2024. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2025. goto fnic_device_reset_end;
  2026. }
  2027. memset(io_req, 0, sizeof(*io_req));
  2028. io_req->port_id = rport->port_id;
  2029. io_req->tag = mqtag;
  2030. fnic_priv(sc)->io_req = io_req;
  2031. io_req->sc = sc;
  2032. if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
  2033. WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n",
  2034. fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag));
  2035. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] =
  2036. io_req;
  2037. }
  2038. io_req->dr_done = &tm_done;
  2039. fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
  2040. fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
  2041. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2042. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag);
  2043. /*
  2044. * issue the device reset, if enqueue failed, clean up the ioreq
  2045. * and break assoc with scsi cmd
  2046. */
  2047. if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
  2048. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2049. io_req = fnic_priv(sc)->io_req;
  2050. if (io_req)
  2051. io_req->dr_done = NULL;
  2052. goto fnic_device_reset_clean;
  2053. }
  2054. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2055. fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
  2056. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2057. /*
  2058. * Wait on the local completion for LUN reset. The io_req may be
  2059. * freed while we wait since we hold no lock.
  2060. */
  2061. wait_for_completion_timeout(&tm_done,
  2062. msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
  2063. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2064. io_req = fnic_priv(sc)->io_req;
  2065. if (!io_req) {
  2066. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2067. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2068. "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
  2069. goto fnic_device_reset_end;
  2070. }
  2071. io_req->dr_done = NULL;
  2072. status = fnic_priv(sc)->lr_status;
  2073. /*
  2074. * If lun reset not completed, bail out with failed. io_req
  2075. * gets cleaned up during higher levels of EH
  2076. */
  2077. if (status == FCPIO_INVALID_CODE) {
  2078. atomic64_inc(&reset_stats->device_reset_timeouts);
  2079. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2080. "Device reset timed out\n");
  2081. fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
  2082. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2083. int_to_scsilun(sc->device->lun, &fc_lun);
  2084. /*
  2085. * Issue abort and terminate on device reset request.
  2086. * If q'ing of terminate fails, retry it after a delay.
  2087. */
  2088. while (1) {
  2089. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2090. if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
  2091. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2092. break;
  2093. }
  2094. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2095. if (fnic_queue_abort_io_req(fnic,
  2096. mqtag | FNIC_TAG_DEV_RST,
  2097. FCPIO_ITMF_ABT_TASK_TERM,
  2098. fc_lun.scsi_lun, io_req, hwq)) {
  2099. wait_for_completion_timeout(&tm_done,
  2100. msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
  2101. } else {
  2102. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2103. fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
  2104. fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
  2105. io_req->abts_done = &tm_done;
  2106. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2107. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2108. "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n",
  2109. mqtag, sc);
  2110. break;
  2111. }
  2112. }
  2113. while (1) {
  2114. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2115. if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
  2116. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2117. wait_for_completion_timeout(&tm_done,
  2118. msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
  2119. break;
  2120. } else {
  2121. io_req = fnic_priv(sc)->io_req;
  2122. io_req->abts_done = NULL;
  2123. goto fnic_device_reset_clean;
  2124. }
  2125. }
  2126. } else {
  2127. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2128. }
  2129. /* Completed, but not successful, clean up the io_req, return fail */
  2130. if (status != FCPIO_SUCCESS) {
  2131. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2132. FNIC_SCSI_DBG(KERN_DEBUG,
  2133. fnic->lport->host, fnic->fnic_num,
  2134. "Device reset completed - failed\n");
  2135. io_req = fnic_priv(sc)->io_req;
  2136. goto fnic_device_reset_clean;
  2137. }
  2138. /*
  2139. * Clean up any aborts on this lun that have still not
  2140. * completed. If any of these fail, then LUN reset fails.
  2141. * clean_pending_aborts cleans all cmds on this lun except
  2142. * the lun reset cmd. If all cmds get cleaned, the lun reset
  2143. * succeeds
  2144. */
  2145. if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
  2146. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2147. io_req = fnic_priv(sc)->io_req;
  2148. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2149. "Device reset failed"
  2150. " since could not abort all IOs\n");
  2151. goto fnic_device_reset_clean;
  2152. }
  2153. /* Clean lun reset command */
  2154. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2155. io_req = fnic_priv(sc)->io_req;
  2156. if (io_req)
  2157. /* Completed, and successful */
  2158. ret = SUCCESS;
  2159. fnic_device_reset_clean:
  2160. if (io_req) {
  2161. fnic_priv(sc)->io_req = NULL;
  2162. io_req->sc = NULL;
  2163. fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL;
  2164. }
  2165. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2166. if (io_req) {
  2167. start_time = io_req->start_time;
  2168. fnic_release_ioreq_buf(fnic, io_req, sc);
  2169. mempool_free(io_req, fnic->io_req_pool);
  2170. }
  2171. fnic_device_reset_end:
  2172. FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
  2173. jiffies_to_msecs(jiffies - start_time),
  2174. 0, ((u64)sc->cmnd[0] << 32 |
  2175. (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
  2176. (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
  2177. fnic_flags_and_state(sc));
  2178. if (new_sc) {
  2179. fnic->sgreset_sc = NULL;
  2180. mutex_unlock(&fnic->sgreset_mutex);
  2181. }
  2182. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2183. "Returning from device reset %s\n",
  2184. (ret == SUCCESS) ?
  2185. "SUCCESS" : "FAILED");
  2186. if (ret == FAILED)
  2187. atomic64_inc(&reset_stats->device_reset_failures);
  2188. return ret;
  2189. }
  2190. /* Clean up all IOs, clean up libFC local port */
  2191. int fnic_reset(struct Scsi_Host *shost)
  2192. {
  2193. struct fc_lport *lp;
  2194. struct fnic *fnic;
  2195. int ret = 0;
  2196. struct reset_stats *reset_stats;
  2197. lp = shost_priv(shost);
  2198. fnic = lport_priv(lp);
  2199. reset_stats = &fnic->fnic_stats.reset_stats;
  2200. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  2201. "Issuing fnic reset\n");
  2202. atomic64_inc(&reset_stats->fnic_resets);
  2203. /*
  2204. * Reset local port, this will clean up libFC exchanges,
  2205. * reset remote port sessions, and if link is up, begin flogi
  2206. */
  2207. ret = fc_lport_reset(lp);
  2208. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  2209. "Returning from fnic reset with: %s\n",
  2210. (ret == 0) ? "SUCCESS" : "FAILED");
  2211. if (ret == 0)
  2212. atomic64_inc(&reset_stats->fnic_reset_completions);
  2213. else
  2214. atomic64_inc(&reset_stats->fnic_reset_failures);
  2215. return ret;
  2216. }
  2217. /*
  2218. * SCSI Error handling calls driver's eh_host_reset if all prior
  2219. * error handling levels return FAILED. If host reset completes
  2220. * successfully, and if link is up, then Fabric login begins.
  2221. *
  2222. * Host Reset is the highest level of error recovery. If this fails, then
  2223. * host is offlined by SCSI.
  2224. *
  2225. */
  2226. int fnic_host_reset(struct scsi_cmnd *sc)
  2227. {
  2228. int ret;
  2229. unsigned long wait_host_tmo;
  2230. struct Scsi_Host *shost = sc->device->host;
  2231. struct fc_lport *lp = shost_priv(shost);
  2232. struct fnic *fnic = lport_priv(lp);
  2233. unsigned long flags;
  2234. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2235. if (!fnic->internal_reset_inprogress) {
  2236. fnic->internal_reset_inprogress = true;
  2237. } else {
  2238. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2239. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2240. "host reset in progress skipping another host reset\n");
  2241. return SUCCESS;
  2242. }
  2243. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2244. /*
  2245. * If fnic_reset is successful, wait for fabric login to complete
  2246. * scsi-ml tries to send a TUR to every device if host reset is
  2247. * successful, so before returning to scsi, fabric should be up
  2248. */
  2249. ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
  2250. if (ret == SUCCESS) {
  2251. wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
  2252. ret = FAILED;
  2253. while (time_before(jiffies, wait_host_tmo)) {
  2254. if ((lp->state == LPORT_ST_READY) &&
  2255. (lp->link_up)) {
  2256. ret = SUCCESS;
  2257. break;
  2258. }
  2259. ssleep(1);
  2260. }
  2261. }
  2262. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2263. fnic->internal_reset_inprogress = false;
  2264. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2265. return ret;
  2266. }
  2267. /*
  2268. * This fxn is called from libFC when host is removed
  2269. */
  2270. void fnic_scsi_abort_io(struct fc_lport *lp)
  2271. {
  2272. int err = 0;
  2273. unsigned long flags;
  2274. enum fnic_state old_state;
  2275. struct fnic *fnic = lport_priv(lp);
  2276. DECLARE_COMPLETION_ONSTACK(remove_wait);
  2277. /* Issue firmware reset for fnic, wait for reset to complete */
  2278. retry_fw_reset:
  2279. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2280. if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
  2281. fnic->link_events) {
  2282. /* fw reset is in progress, poll for its completion */
  2283. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2284. schedule_timeout(msecs_to_jiffies(100));
  2285. goto retry_fw_reset;
  2286. }
  2287. fnic->remove_wait = &remove_wait;
  2288. old_state = fnic->state;
  2289. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  2290. fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
  2291. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2292. err = fnic_fw_reset_handler(fnic);
  2293. if (err) {
  2294. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2295. if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
  2296. fnic->state = old_state;
  2297. fnic->remove_wait = NULL;
  2298. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2299. return;
  2300. }
  2301. /* Wait for firmware reset to complete */
  2302. wait_for_completion_timeout(&remove_wait,
  2303. msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
  2304. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2305. fnic->remove_wait = NULL;
  2306. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  2307. "fnic_scsi_abort_io %s\n",
  2308. (fnic->state == FNIC_IN_ETH_MODE) ?
  2309. "SUCCESS" : "FAILED");
  2310. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2311. }
  2312. /*
  2313. * This fxn called from libFC to clean up driver IO state on link down
  2314. */
  2315. void fnic_scsi_cleanup(struct fc_lport *lp)
  2316. {
  2317. unsigned long flags;
  2318. enum fnic_state old_state;
  2319. struct fnic *fnic = lport_priv(lp);
  2320. /* issue fw reset */
  2321. retry_fw_reset:
  2322. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2323. if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
  2324. /* fw reset is in progress, poll for its completion */
  2325. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2326. schedule_timeout(msecs_to_jiffies(100));
  2327. goto retry_fw_reset;
  2328. }
  2329. old_state = fnic->state;
  2330. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  2331. fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
  2332. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2333. if (fnic_fw_reset_handler(fnic)) {
  2334. spin_lock_irqsave(&fnic->fnic_lock, flags);
  2335. if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
  2336. fnic->state = old_state;
  2337. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  2338. }
  2339. }
  2340. void fnic_empty_scsi_cleanup(struct fc_lport *lp)
  2341. {
  2342. }
  2343. void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
  2344. {
  2345. struct fnic *fnic = lport_priv(lp);
  2346. /* Non-zero sid, nothing to do */
  2347. if (sid)
  2348. goto call_fc_exch_mgr_reset;
  2349. if (did) {
  2350. fnic_rport_exch_reset(fnic, did);
  2351. goto call_fc_exch_mgr_reset;
  2352. }
  2353. /*
  2354. * sid = 0, did = 0
  2355. * link down or device being removed
  2356. */
  2357. if (!fnic->in_remove)
  2358. fnic_scsi_cleanup(lp);
  2359. else
  2360. fnic_scsi_abort_io(lp);
  2361. /* call libFC exch mgr reset to reset its exchanges */
  2362. call_fc_exch_mgr_reset:
  2363. fc_exch_mgr_reset(lp, sid, did);
  2364. }
  2365. static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
  2366. {
  2367. struct request *const rq = scsi_cmd_to_rq(sc);
  2368. struct fnic_pending_aborts_iter_data *iter_data = data;
  2369. struct fnic *fnic = iter_data->fnic;
  2370. int cmd_state;
  2371. struct fnic_io_req *io_req;
  2372. unsigned long flags;
  2373. uint16_t hwq = 0;
  2374. int tag;
  2375. tag = blk_mq_unique_tag(rq);
  2376. hwq = blk_mq_unique_tag_to_hwq(tag);
  2377. /*
  2378. * ignore this lun reset cmd or cmds that do not belong to
  2379. * this lun
  2380. */
  2381. if (iter_data->lr_sc && sc == iter_data->lr_sc)
  2382. return true;
  2383. if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
  2384. return true;
  2385. spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
  2386. io_req = fnic_priv(sc)->io_req;
  2387. if (!io_req) {
  2388. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2389. return true;
  2390. }
  2391. /*
  2392. * Found IO that is still pending with firmware and
  2393. * belongs to the LUN that we are resetting
  2394. */
  2395. FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  2396. "hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
  2397. hwq, tag,
  2398. fnic_ioreq_state_to_str(fnic_priv(sc)->state));
  2399. cmd_state = fnic_priv(sc)->state;
  2400. spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
  2401. if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
  2402. iter_data->ret = 1;
  2403. return iter_data->ret ? false : true;
  2404. }
  2405. /*
  2406. * fnic_is_abts_pending() is a helper function that
  2407. * walks through tag map to check if there is any IOs pending,if there is one,
  2408. * then it returns 1 (true), otherwise 0 (false)
  2409. * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
  2410. * otherwise, it checks for all IOs.
  2411. */
  2412. int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
  2413. {
  2414. struct fnic_pending_aborts_iter_data iter_data = {
  2415. .fnic = fnic,
  2416. .lun_dev = NULL,
  2417. .ret = 0,
  2418. };
  2419. if (lr_sc) {
  2420. iter_data.lun_dev = lr_sc->device;
  2421. iter_data.lr_sc = lr_sc;
  2422. }
  2423. /* walk again to check, if IOs are still pending in fw */
  2424. scsi_host_busy_iter(fnic->lport->host,
  2425. fnic_abts_pending_iter, &iter_data);
  2426. return iter_data.ret;
  2427. }