lpfc_nvmet.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channsel Host Bus Adapters. *
  4. * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. ********************************************************************/
  23. #include <linux/pci.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/delay.h>
  27. #include <asm/unaligned.h>
  28. #include <linux/crc-t10dif.h>
  29. #include <net/checksum.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_eh.h>
  33. #include <scsi/scsi_host.h>
  34. #include <scsi/scsi_tcq.h>
  35. #include <scsi/scsi_transport_fc.h>
  36. #include <scsi/fc/fc_fs.h>
  37. #include <linux/nvme.h>
  38. #include <linux/nvme-fc-driver.h>
  39. #include <linux/nvme-fc.h>
  40. #include "lpfc_version.h"
  41. #include "lpfc_hw4.h"
  42. #include "lpfc_hw.h"
  43. #include "lpfc_sli.h"
  44. #include "lpfc_sli4.h"
  45. #include "lpfc_nl.h"
  46. #include "lpfc_disc.h"
  47. #include "lpfc.h"
  48. #include "lpfc_scsi.h"
  49. #include "lpfc_nvme.h"
  50. #include "lpfc_nvmet.h"
  51. #include "lpfc_logmsg.h"
  52. #include "lpfc_crtn.h"
  53. #include "lpfc_vport.h"
  54. #include "lpfc_debugfs.h"
  55. static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
  56. struct lpfc_nvmet_rcv_ctx *,
  57. dma_addr_t rspbuf,
  58. uint16_t rspsize);
  59. static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
  60. struct lpfc_nvmet_rcv_ctx *);
  61. static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
  62. struct lpfc_nvmet_rcv_ctx *,
  63. uint32_t, uint16_t);
  64. static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
  65. struct lpfc_nvmet_rcv_ctx *,
  66. uint32_t, uint16_t);
  67. static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
  68. struct lpfc_nvmet_rcv_ctx *,
  69. uint32_t, uint16_t);
  70. static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
  71. struct lpfc_nvmet_rcv_ctx *);
  72. static union lpfc_wqe128 lpfc_tsend_cmd_template;
  73. static union lpfc_wqe128 lpfc_treceive_cmd_template;
  74. static union lpfc_wqe128 lpfc_trsp_cmd_template;
  75. /* Setup WQE templates for NVME IOs */
  76. void
  77. lpfc_nvmet_cmd_template(void)
  78. {
  79. union lpfc_wqe128 *wqe;
  80. /* TSEND template */
  81. wqe = &lpfc_tsend_cmd_template;
  82. memset(wqe, 0, sizeof(union lpfc_wqe128));
  83. /* Word 0, 1, 2 - BDE is variable */
  84. /* Word 3 - payload_offset_len is zero */
  85. /* Word 4 - relative_offset is variable */
  86. /* Word 5 - is zero */
  87. /* Word 6 - ctxt_tag, xri_tag is variable */
  88. /* Word 7 - wqe_ar is variable */
  89. bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
  90. bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
  91. bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
  92. bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
  93. bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
  94. /* Word 8 - abort_tag is variable */
  95. /* Word 9 - reqtag, rcvoxid is variable */
  96. /* Word 10 - wqes, xc is variable */
  97. bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
  98. bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
  99. bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
  100. bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
  101. bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
  102. bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
  103. /* Word 11 - sup, irsp, irsplen is variable */
  104. bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
  105. bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  106. bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
  107. bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
  108. bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
  109. bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
  110. /* Word 12 - fcp_data_len is variable */
  111. /* Word 13, 14, 15 - PBDE is zero */
  112. /* TRECEIVE template */
  113. wqe = &lpfc_treceive_cmd_template;
  114. memset(wqe, 0, sizeof(union lpfc_wqe128));
  115. /* Word 0, 1, 2 - BDE is variable */
  116. /* Word 3 */
  117. wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
  118. /* Word 4 - relative_offset is variable */
  119. /* Word 5 - is zero */
  120. /* Word 6 - ctxt_tag, xri_tag is variable */
  121. /* Word 7 */
  122. bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
  123. bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
  124. bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
  125. bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
  126. bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
  127. /* Word 8 - abort_tag is variable */
  128. /* Word 9 - reqtag, rcvoxid is variable */
  129. /* Word 10 - xc is variable */
  130. bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
  131. bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
  132. bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
  133. bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
  134. bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
  135. bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
  136. /* Word 11 - pbde is variable */
  137. bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
  138. bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  139. bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
  140. bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
  141. bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
  142. bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
  143. /* Word 12 - fcp_data_len is variable */
  144. /* Word 13, 14, 15 - PBDE is variable */
  145. /* TRSP template */
  146. wqe = &lpfc_trsp_cmd_template;
  147. memset(wqe, 0, sizeof(union lpfc_wqe128));
  148. /* Word 0, 1, 2 - BDE is variable */
  149. /* Word 3 - response_len is variable */
  150. /* Word 4, 5 - is zero */
  151. /* Word 6 - ctxt_tag, xri_tag is variable */
  152. /* Word 7 */
  153. bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
  154. bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
  155. bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
  156. bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
  157. bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
  158. /* Word 8 - abort_tag is variable */
  159. /* Word 9 - reqtag is variable */
  160. /* Word 10 wqes, xc is variable */
  161. bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
  162. bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
  163. bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
  164. bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
  165. bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
  166. bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
  167. /* Word 11 irsp, irsplen is variable */
  168. bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
  169. bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  170. bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
  171. bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
  172. bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
  173. bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
  174. /* Word 12, 13, 14, 15 - is zero */
  175. }
  176. void
  177. lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
  178. {
  179. unsigned long iflag;
  180. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  181. "6313 NVMET Defer ctx release xri x%x flg x%x\n",
  182. ctxp->oxid, ctxp->flag);
  183. spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
  184. if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
  185. spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
  186. iflag);
  187. return;
  188. }
  189. ctxp->flag |= LPFC_NVMET_CTX_RLS;
  190. list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  191. spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
  192. }
  193. /**
  194. * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
  195. * @phba: Pointer to HBA context object.
  196. * @cmdwqe: Pointer to driver command WQE object.
  197. * @wcqe: Pointer to driver response CQE object.
  198. *
  199. * The function is called from SLI ring event handler with no
  200. * lock held. This function is the completion handler for NVME LS commands
  201. * The function frees memory resources used for the NVME commands.
  202. **/
  203. static void
  204. lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
  205. struct lpfc_wcqe_complete *wcqe)
  206. {
  207. struct lpfc_nvmet_tgtport *tgtp;
  208. struct nvmefc_tgt_ls_req *rsp;
  209. struct lpfc_nvmet_rcv_ctx *ctxp;
  210. uint32_t status, result;
  211. status = bf_get(lpfc_wcqe_c_status, wcqe);
  212. result = wcqe->parameter;
  213. ctxp = cmdwqe->context2;
  214. if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
  215. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  216. "6410 NVMET LS cmpl state mismatch IO x%x: "
  217. "%d %d\n",
  218. ctxp->oxid, ctxp->state, ctxp->entry_cnt);
  219. }
  220. if (!phba->targetport)
  221. goto out;
  222. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  223. if (tgtp) {
  224. if (status) {
  225. atomic_inc(&tgtp->xmt_ls_rsp_error);
  226. if (result == IOERR_ABORT_REQUESTED)
  227. atomic_inc(&tgtp->xmt_ls_rsp_aborted);
  228. if (bf_get(lpfc_wcqe_c_xb, wcqe))
  229. atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
  230. } else {
  231. atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
  232. }
  233. }
  234. out:
  235. rsp = &ctxp->ctx.ls_req;
  236. lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
  237. ctxp->oxid, status, result);
  238. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
  239. "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
  240. status, result, ctxp->oxid);
  241. lpfc_nlp_put(cmdwqe->context1);
  242. cmdwqe->context2 = NULL;
  243. cmdwqe->context3 = NULL;
  244. lpfc_sli_release_iocbq(phba, cmdwqe);
  245. rsp->done(rsp);
  246. kfree(ctxp);
  247. }
  248. /**
  249. * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
  250. * @phba: HBA buffer is associated with
  251. * @ctxp: context to clean up
  252. * @mp: Buffer to free
  253. *
  254. * Description: Frees the given DMA buffer in the appropriate way given by
  255. * reposting it to its associated RQ so it can be reused.
  256. *
  257. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  258. *
  259. * Returns: None
  260. **/
  261. void
  262. lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
  263. {
  264. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  265. struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
  266. struct lpfc_nvmet_tgtport *tgtp;
  267. struct fc_frame_header *fc_hdr;
  268. struct rqb_dmabuf *nvmebuf;
  269. struct lpfc_nvmet_ctx_info *infop;
  270. uint32_t *payload;
  271. uint32_t size, oxid, sid, rc;
  272. int cpu;
  273. unsigned long iflag;
  274. if (ctxp->txrdy) {
  275. dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
  276. ctxp->txrdy_phys);
  277. ctxp->txrdy = NULL;
  278. ctxp->txrdy_phys = 0;
  279. }
  280. if (ctxp->state == LPFC_NVMET_STE_FREE) {
  281. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  282. "6411 NVMET free, already free IO x%x: %d %d\n",
  283. ctxp->oxid, ctxp->state, ctxp->entry_cnt);
  284. }
  285. ctxp->state = LPFC_NVMET_STE_FREE;
  286. spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
  287. if (phba->sli4_hba.nvmet_io_wait_cnt) {
  288. list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
  289. nvmebuf, struct rqb_dmabuf,
  290. hbuf.list);
  291. phba->sli4_hba.nvmet_io_wait_cnt--;
  292. spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
  293. iflag);
  294. fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
  295. oxid = be16_to_cpu(fc_hdr->fh_ox_id);
  296. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  297. payload = (uint32_t *)(nvmebuf->dbuf.virt);
  298. size = nvmebuf->bytes_recv;
  299. sid = sli4_sid_from_fc_hdr(fc_hdr);
  300. ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
  301. ctxp->wqeq = NULL;
  302. ctxp->txrdy = NULL;
  303. ctxp->offset = 0;
  304. ctxp->phba = phba;
  305. ctxp->size = size;
  306. ctxp->oxid = oxid;
  307. ctxp->sid = sid;
  308. ctxp->state = LPFC_NVMET_STE_RCV;
  309. ctxp->entry_cnt = 1;
  310. ctxp->flag = 0;
  311. ctxp->ctxbuf = ctx_buf;
  312. ctxp->rqb_buffer = (void *)nvmebuf;
  313. spin_lock_init(&ctxp->ctxlock);
  314. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  315. if (ctxp->ts_cmd_nvme) {
  316. ctxp->ts_cmd_nvme = ktime_get_ns();
  317. ctxp->ts_nvme_data = 0;
  318. ctxp->ts_data_wqput = 0;
  319. ctxp->ts_isr_data = 0;
  320. ctxp->ts_data_nvme = 0;
  321. ctxp->ts_nvme_status = 0;
  322. ctxp->ts_status_wqput = 0;
  323. ctxp->ts_isr_status = 0;
  324. ctxp->ts_status_nvme = 0;
  325. }
  326. #endif
  327. atomic_inc(&tgtp->rcv_fcp_cmd_in);
  328. /*
  329. * The calling sequence should be:
  330. * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
  331. * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
  332. * When we return from nvmet_fc_rcv_fcp_req, all relevant info
  333. * the NVME command / FC header is stored.
  334. * A buffer has already been reposted for this IO, so just free
  335. * the nvmebuf.
  336. */
  337. rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
  338. payload, size);
  339. /* Process FCP command */
  340. if (rc == 0) {
  341. ctxp->rqb_buffer = NULL;
  342. atomic_inc(&tgtp->rcv_fcp_cmd_out);
  343. nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
  344. return;
  345. }
  346. /* Processing of FCP command is deferred */
  347. if (rc == -EOVERFLOW) {
  348. lpfc_nvmeio_data(phba,
  349. "NVMET RCV BUSY: xri x%x sz %d "
  350. "from %06x\n",
  351. oxid, size, sid);
  352. atomic_inc(&tgtp->rcv_fcp_cmd_out);
  353. return;
  354. }
  355. atomic_inc(&tgtp->rcv_fcp_cmd_drop);
  356. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  357. "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
  358. ctxp->oxid, rc,
  359. atomic_read(&tgtp->rcv_fcp_cmd_in),
  360. atomic_read(&tgtp->rcv_fcp_cmd_out),
  361. atomic_read(&tgtp->xmt_fcp_release));
  362. lpfc_nvmet_defer_release(phba, ctxp);
  363. lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
  364. nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
  365. return;
  366. }
  367. spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
  368. /*
  369. * Use the CPU context list, from the MRQ the IO was received on
  370. * (ctxp->idx), to save context structure.
  371. */
  372. cpu = smp_processor_id();
  373. infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
  374. spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
  375. list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
  376. infop->nvmet_ctx_list_cnt++;
  377. spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
  378. #endif
  379. }
  380. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  381. static void
  382. lpfc_nvmet_ktime(struct lpfc_hba *phba,
  383. struct lpfc_nvmet_rcv_ctx *ctxp)
  384. {
  385. uint64_t seg1, seg2, seg3, seg4, seg5;
  386. uint64_t seg6, seg7, seg8, seg9, seg10;
  387. uint64_t segsum;
  388. if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
  389. !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
  390. !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
  391. !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
  392. !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
  393. return;
  394. if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
  395. return;
  396. if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
  397. return;
  398. if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
  399. return;
  400. if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
  401. return;
  402. if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
  403. return;
  404. if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
  405. return;
  406. if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
  407. return;
  408. if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
  409. return;
  410. if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
  411. return;
  412. if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
  413. return;
  414. /*
  415. * Segment 1 - Time from FCP command received by MSI-X ISR
  416. * to FCP command is passed to NVME Layer.
  417. * Segment 2 - Time from FCP command payload handed
  418. * off to NVME Layer to Driver receives a Command op
  419. * from NVME Layer.
  420. * Segment 3 - Time from Driver receives a Command op
  421. * from NVME Layer to Command is put on WQ.
  422. * Segment 4 - Time from Driver WQ put is done
  423. * to MSI-X ISR for Command cmpl.
  424. * Segment 5 - Time from MSI-X ISR for Command cmpl to
  425. * Command cmpl is passed to NVME Layer.
  426. * Segment 6 - Time from Command cmpl is passed to NVME
  427. * Layer to Driver receives a RSP op from NVME Layer.
  428. * Segment 7 - Time from Driver receives a RSP op from
  429. * NVME Layer to WQ put is done on TRSP FCP Status.
  430. * Segment 8 - Time from Driver WQ put is done on TRSP
  431. * FCP Status to MSI-X ISR for TRSP cmpl.
  432. * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
  433. * TRSP cmpl is passed to NVME Layer.
  434. * Segment 10 - Time from FCP command received by
  435. * MSI-X ISR to command is completed on wire.
  436. * (Segments 1 thru 8) for READDATA / WRITEDATA
  437. * (Segments 1 thru 4) for READDATA_RSP
  438. */
  439. seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
  440. segsum = seg1;
  441. seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
  442. if (segsum > seg2)
  443. return;
  444. seg2 -= segsum;
  445. segsum += seg2;
  446. seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
  447. if (segsum > seg3)
  448. return;
  449. seg3 -= segsum;
  450. segsum += seg3;
  451. seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
  452. if (segsum > seg4)
  453. return;
  454. seg4 -= segsum;
  455. segsum += seg4;
  456. seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
  457. if (segsum > seg5)
  458. return;
  459. seg5 -= segsum;
  460. segsum += seg5;
  461. /* For auto rsp commands seg6 thru seg10 will be 0 */
  462. if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
  463. seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
  464. if (segsum > seg6)
  465. return;
  466. seg6 -= segsum;
  467. segsum += seg6;
  468. seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
  469. if (segsum > seg7)
  470. return;
  471. seg7 -= segsum;
  472. segsum += seg7;
  473. seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
  474. if (segsum > seg8)
  475. return;
  476. seg8 -= segsum;
  477. segsum += seg8;
  478. seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
  479. if (segsum > seg9)
  480. return;
  481. seg9 -= segsum;
  482. segsum += seg9;
  483. if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
  484. return;
  485. seg10 = (ctxp->ts_isr_status -
  486. ctxp->ts_isr_cmd);
  487. } else {
  488. if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
  489. return;
  490. seg6 = 0;
  491. seg7 = 0;
  492. seg8 = 0;
  493. seg9 = 0;
  494. seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
  495. }
  496. phba->ktime_seg1_total += seg1;
  497. if (seg1 < phba->ktime_seg1_min)
  498. phba->ktime_seg1_min = seg1;
  499. else if (seg1 > phba->ktime_seg1_max)
  500. phba->ktime_seg1_max = seg1;
  501. phba->ktime_seg2_total += seg2;
  502. if (seg2 < phba->ktime_seg2_min)
  503. phba->ktime_seg2_min = seg2;
  504. else if (seg2 > phba->ktime_seg2_max)
  505. phba->ktime_seg2_max = seg2;
  506. phba->ktime_seg3_total += seg3;
  507. if (seg3 < phba->ktime_seg3_min)
  508. phba->ktime_seg3_min = seg3;
  509. else if (seg3 > phba->ktime_seg3_max)
  510. phba->ktime_seg3_max = seg3;
  511. phba->ktime_seg4_total += seg4;
  512. if (seg4 < phba->ktime_seg4_min)
  513. phba->ktime_seg4_min = seg4;
  514. else if (seg4 > phba->ktime_seg4_max)
  515. phba->ktime_seg4_max = seg4;
  516. phba->ktime_seg5_total += seg5;
  517. if (seg5 < phba->ktime_seg5_min)
  518. phba->ktime_seg5_min = seg5;
  519. else if (seg5 > phba->ktime_seg5_max)
  520. phba->ktime_seg5_max = seg5;
  521. phba->ktime_data_samples++;
  522. if (!seg6)
  523. goto out;
  524. phba->ktime_seg6_total += seg6;
  525. if (seg6 < phba->ktime_seg6_min)
  526. phba->ktime_seg6_min = seg6;
  527. else if (seg6 > phba->ktime_seg6_max)
  528. phba->ktime_seg6_max = seg6;
  529. phba->ktime_seg7_total += seg7;
  530. if (seg7 < phba->ktime_seg7_min)
  531. phba->ktime_seg7_min = seg7;
  532. else if (seg7 > phba->ktime_seg7_max)
  533. phba->ktime_seg7_max = seg7;
  534. phba->ktime_seg8_total += seg8;
  535. if (seg8 < phba->ktime_seg8_min)
  536. phba->ktime_seg8_min = seg8;
  537. else if (seg8 > phba->ktime_seg8_max)
  538. phba->ktime_seg8_max = seg8;
  539. phba->ktime_seg9_total += seg9;
  540. if (seg9 < phba->ktime_seg9_min)
  541. phba->ktime_seg9_min = seg9;
  542. else if (seg9 > phba->ktime_seg9_max)
  543. phba->ktime_seg9_max = seg9;
  544. out:
  545. phba->ktime_seg10_total += seg10;
  546. if (seg10 < phba->ktime_seg10_min)
  547. phba->ktime_seg10_min = seg10;
  548. else if (seg10 > phba->ktime_seg10_max)
  549. phba->ktime_seg10_max = seg10;
  550. phba->ktime_status_samples++;
  551. }
  552. #endif
  553. /**
  554. * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
  555. * @phba: Pointer to HBA context object.
  556. * @cmdwqe: Pointer to driver command WQE object.
  557. * @wcqe: Pointer to driver response CQE object.
  558. *
  559. * The function is called from SLI ring event handler with no
  560. * lock held. This function is the completion handler for NVME FCP commands
  561. * The function frees memory resources used for the NVME commands.
  562. **/
  563. static void
  564. lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
  565. struct lpfc_wcqe_complete *wcqe)
  566. {
  567. struct lpfc_nvmet_tgtport *tgtp;
  568. struct nvmefc_tgt_fcp_req *rsp;
  569. struct lpfc_nvmet_rcv_ctx *ctxp;
  570. uint32_t status, result, op, start_clean, logerr;
  571. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  572. uint32_t id;
  573. #endif
  574. ctxp = cmdwqe->context2;
  575. ctxp->flag &= ~LPFC_NVMET_IO_INP;
  576. rsp = &ctxp->ctx.fcp_req;
  577. op = rsp->op;
  578. status = bf_get(lpfc_wcqe_c_status, wcqe);
  579. result = wcqe->parameter;
  580. if (phba->targetport)
  581. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  582. else
  583. tgtp = NULL;
  584. lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
  585. ctxp->oxid, op, status);
  586. if (status) {
  587. rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
  588. rsp->transferred_length = 0;
  589. if (tgtp) {
  590. atomic_inc(&tgtp->xmt_fcp_rsp_error);
  591. if (result == IOERR_ABORT_REQUESTED)
  592. atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
  593. }
  594. logerr = LOG_NVME_IOERR;
  595. /* pick up SLI4 exhange busy condition */
  596. if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
  597. ctxp->flag |= LPFC_NVMET_XBUSY;
  598. logerr |= LOG_NVME_ABTS;
  599. if (tgtp)
  600. atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
  601. } else {
  602. ctxp->flag &= ~LPFC_NVMET_XBUSY;
  603. }
  604. lpfc_printf_log(phba, KERN_INFO, logerr,
  605. "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
  606. ctxp->oxid, status, result, ctxp->flag);
  607. } else {
  608. rsp->fcp_error = NVME_SC_SUCCESS;
  609. if (op == NVMET_FCOP_RSP)
  610. rsp->transferred_length = rsp->rsplen;
  611. else
  612. rsp->transferred_length = rsp->transfer_length;
  613. if (tgtp)
  614. atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
  615. }
  616. if ((op == NVMET_FCOP_READDATA_RSP) ||
  617. (op == NVMET_FCOP_RSP)) {
  618. /* Sanity check */
  619. ctxp->state = LPFC_NVMET_STE_DONE;
  620. ctxp->entry_cnt++;
  621. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  622. if (ctxp->ts_cmd_nvme) {
  623. if (rsp->op == NVMET_FCOP_READDATA_RSP) {
  624. ctxp->ts_isr_data =
  625. cmdwqe->isr_timestamp;
  626. ctxp->ts_data_nvme =
  627. ktime_get_ns();
  628. ctxp->ts_nvme_status =
  629. ctxp->ts_data_nvme;
  630. ctxp->ts_status_wqput =
  631. ctxp->ts_data_nvme;
  632. ctxp->ts_isr_status =
  633. ctxp->ts_data_nvme;
  634. ctxp->ts_status_nvme =
  635. ctxp->ts_data_nvme;
  636. } else {
  637. ctxp->ts_isr_status =
  638. cmdwqe->isr_timestamp;
  639. ctxp->ts_status_nvme =
  640. ktime_get_ns();
  641. }
  642. }
  643. if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
  644. id = smp_processor_id();
  645. if (ctxp->cpu != id)
  646. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  647. "6703 CPU Check cmpl: "
  648. "cpu %d expect %d\n",
  649. id, ctxp->cpu);
  650. if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
  651. phba->cpucheck_cmpl_io[id]++;
  652. }
  653. #endif
  654. rsp->done(rsp);
  655. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  656. if (ctxp->ts_cmd_nvme)
  657. lpfc_nvmet_ktime(phba, ctxp);
  658. #endif
  659. /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
  660. } else {
  661. ctxp->entry_cnt++;
  662. start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
  663. memset(((char *)cmdwqe) + start_clean, 0,
  664. (sizeof(struct lpfc_iocbq) - start_clean));
  665. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  666. if (ctxp->ts_cmd_nvme) {
  667. ctxp->ts_isr_data = cmdwqe->isr_timestamp;
  668. ctxp->ts_data_nvme = ktime_get_ns();
  669. }
  670. if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
  671. id = smp_processor_id();
  672. if (ctxp->cpu != id)
  673. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  674. "6704 CPU Check cmdcmpl: "
  675. "cpu %d expect %d\n",
  676. id, ctxp->cpu);
  677. if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
  678. phba->cpucheck_ccmpl_io[id]++;
  679. }
  680. #endif
  681. rsp->done(rsp);
  682. }
  683. }
  684. static int
  685. lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
  686. struct nvmefc_tgt_ls_req *rsp)
  687. {
  688. struct lpfc_nvmet_rcv_ctx *ctxp =
  689. container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
  690. struct lpfc_hba *phba = ctxp->phba;
  691. struct hbq_dmabuf *nvmebuf =
  692. (struct hbq_dmabuf *)ctxp->rqb_buffer;
  693. struct lpfc_iocbq *nvmewqeq;
  694. struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
  695. struct lpfc_dmabuf dmabuf;
  696. struct ulp_bde64 bpl;
  697. int rc;
  698. if (phba->pport->load_flag & FC_UNLOADING)
  699. return -ENODEV;
  700. if (phba->pport->load_flag & FC_UNLOADING)
  701. return -ENODEV;
  702. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
  703. "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
  704. if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
  705. (ctxp->entry_cnt != 1)) {
  706. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  707. "6412 NVMET LS rsp state mismatch "
  708. "oxid x%x: %d %d\n",
  709. ctxp->oxid, ctxp->state, ctxp->entry_cnt);
  710. }
  711. ctxp->state = LPFC_NVMET_STE_LS_RSP;
  712. ctxp->entry_cnt++;
  713. nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
  714. rsp->rsplen);
  715. if (nvmewqeq == NULL) {
  716. atomic_inc(&nvmep->xmt_ls_drop);
  717. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  718. "6150 LS Drop IO x%x: Prep\n",
  719. ctxp->oxid);
  720. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  721. atomic_inc(&nvmep->xmt_ls_abort);
  722. lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
  723. ctxp->sid, ctxp->oxid);
  724. return -ENOMEM;
  725. }
  726. /* Save numBdes for bpl2sgl */
  727. nvmewqeq->rsvd2 = 1;
  728. nvmewqeq->hba_wqidx = 0;
  729. nvmewqeq->context3 = &dmabuf;
  730. dmabuf.virt = &bpl;
  731. bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
  732. bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
  733. bpl.tus.f.bdeSize = rsp->rsplen;
  734. bpl.tus.f.bdeFlags = 0;
  735. bpl.tus.w = le32_to_cpu(bpl.tus.w);
  736. nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
  737. nvmewqeq->iocb_cmpl = NULL;
  738. nvmewqeq->context2 = ctxp;
  739. lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
  740. ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
  741. rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
  742. if (rc == WQE_SUCCESS) {
  743. /*
  744. * Okay to repost buffer here, but wait till cmpl
  745. * before freeing ctxp and iocbq.
  746. */
  747. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  748. ctxp->rqb_buffer = 0;
  749. atomic_inc(&nvmep->xmt_ls_rsp);
  750. return 0;
  751. }
  752. /* Give back resources */
  753. atomic_inc(&nvmep->xmt_ls_drop);
  754. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  755. "6151 LS Drop IO x%x: Issue %d\n",
  756. ctxp->oxid, rc);
  757. lpfc_nlp_put(nvmewqeq->context1);
  758. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  759. atomic_inc(&nvmep->xmt_ls_abort);
  760. lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
  761. return -ENXIO;
  762. }
  763. static int
  764. lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
  765. struct nvmefc_tgt_fcp_req *rsp)
  766. {
  767. struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
  768. struct lpfc_nvmet_rcv_ctx *ctxp =
  769. container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
  770. struct lpfc_hba *phba = ctxp->phba;
  771. struct lpfc_queue *wq;
  772. struct lpfc_iocbq *nvmewqeq;
  773. struct lpfc_sli_ring *pring;
  774. unsigned long iflags;
  775. int rc;
  776. if (phba->pport->load_flag & FC_UNLOADING) {
  777. rc = -ENODEV;
  778. goto aerr;
  779. }
  780. if (phba->pport->load_flag & FC_UNLOADING) {
  781. rc = -ENODEV;
  782. goto aerr;
  783. }
  784. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  785. if (ctxp->ts_cmd_nvme) {
  786. if (rsp->op == NVMET_FCOP_RSP)
  787. ctxp->ts_nvme_status = ktime_get_ns();
  788. else
  789. ctxp->ts_nvme_data = ktime_get_ns();
  790. }
  791. if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
  792. int id = smp_processor_id();
  793. ctxp->cpu = id;
  794. if (id < LPFC_CHECK_CPU_CNT)
  795. phba->cpucheck_xmt_io[id]++;
  796. if (rsp->hwqid != id) {
  797. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  798. "6705 CPU Check OP: "
  799. "cpu %d expect %d\n",
  800. id, rsp->hwqid);
  801. ctxp->cpu = rsp->hwqid;
  802. }
  803. }
  804. #endif
  805. /* Sanity check */
  806. if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
  807. (ctxp->state == LPFC_NVMET_STE_ABORT)) {
  808. atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
  809. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  810. "6102 IO xri x%x aborted\n",
  811. ctxp->oxid);
  812. rc = -ENXIO;
  813. goto aerr;
  814. }
  815. nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
  816. if (nvmewqeq == NULL) {
  817. atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
  818. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  819. "6152 FCP Drop IO x%x: Prep\n",
  820. ctxp->oxid);
  821. rc = -ENXIO;
  822. goto aerr;
  823. }
  824. nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
  825. nvmewqeq->iocb_cmpl = NULL;
  826. nvmewqeq->context2 = ctxp;
  827. nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
  828. ctxp->wqeq->hba_wqidx = rsp->hwqid;
  829. lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
  830. ctxp->oxid, rsp->op, rsp->rsplen);
  831. ctxp->flag |= LPFC_NVMET_IO_INP;
  832. rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
  833. if (rc == WQE_SUCCESS) {
  834. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  835. if (!ctxp->ts_cmd_nvme)
  836. return 0;
  837. if (rsp->op == NVMET_FCOP_RSP)
  838. ctxp->ts_status_wqput = ktime_get_ns();
  839. else
  840. ctxp->ts_data_wqput = ktime_get_ns();
  841. #endif
  842. return 0;
  843. }
  844. if (rc == -EBUSY) {
  845. /*
  846. * WQ was full, so queue nvmewqeq to be sent after
  847. * WQE release CQE
  848. */
  849. ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
  850. wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
  851. pring = wq->pring;
  852. spin_lock_irqsave(&pring->ring_lock, iflags);
  853. list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
  854. wq->q_flag |= HBA_NVMET_WQFULL;
  855. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  856. atomic_inc(&lpfc_nvmep->defer_wqfull);
  857. return 0;
  858. }
  859. /* Give back resources */
  860. atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
  861. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  862. "6153 FCP Drop IO x%x: Issue: %d\n",
  863. ctxp->oxid, rc);
  864. ctxp->wqeq->hba_wqidx = 0;
  865. nvmewqeq->context2 = NULL;
  866. nvmewqeq->context3 = NULL;
  867. rc = -EBUSY;
  868. aerr:
  869. return rc;
  870. }
  871. static void
  872. lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
  873. {
  874. struct lpfc_nvmet_tgtport *tport = targetport->private;
  875. /* release any threads waiting for the unreg to complete */
  876. if (tport->phba->targetport)
  877. complete(tport->tport_unreg_cmp);
  878. }
  879. static void
  880. lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  881. struct nvmefc_tgt_fcp_req *req)
  882. {
  883. struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
  884. struct lpfc_nvmet_rcv_ctx *ctxp =
  885. container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
  886. struct lpfc_hba *phba = ctxp->phba;
  887. struct lpfc_queue *wq;
  888. unsigned long flags;
  889. if (phba->pport->load_flag & FC_UNLOADING)
  890. return;
  891. if (phba->pport->load_flag & FC_UNLOADING)
  892. return;
  893. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  894. "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
  895. ctxp->oxid, ctxp->flag, ctxp->state);
  896. lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
  897. ctxp->oxid, ctxp->flag, ctxp->state);
  898. atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
  899. spin_lock_irqsave(&ctxp->ctxlock, flags);
  900. ctxp->state = LPFC_NVMET_STE_ABORT;
  901. /* Since iaab/iaar are NOT set, we need to check
  902. * if the firmware is in process of aborting IO
  903. */
  904. if (ctxp->flag & LPFC_NVMET_XBUSY) {
  905. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  906. return;
  907. }
  908. ctxp->flag |= LPFC_NVMET_ABORT_OP;
  909. if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
  910. lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
  911. ctxp->oxid);
  912. wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
  913. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  914. lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
  915. return;
  916. }
  917. /* An state of LPFC_NVMET_STE_RCV means we have just received
  918. * the NVME command and have not started processing it.
  919. * (by issuing any IO WQEs on this exchange yet)
  920. */
  921. if (ctxp->state == LPFC_NVMET_STE_RCV)
  922. lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
  923. ctxp->oxid);
  924. else
  925. lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
  926. ctxp->oxid);
  927. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  928. }
  929. static void
  930. lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
  931. struct nvmefc_tgt_fcp_req *rsp)
  932. {
  933. struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
  934. struct lpfc_nvmet_rcv_ctx *ctxp =
  935. container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
  936. struct lpfc_hba *phba = ctxp->phba;
  937. unsigned long flags;
  938. bool aborting = false;
  939. if (ctxp->state != LPFC_NVMET_STE_DONE &&
  940. ctxp->state != LPFC_NVMET_STE_ABORT) {
  941. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  942. "6413 NVMET release bad state %d %d oxid x%x\n",
  943. ctxp->state, ctxp->entry_cnt, ctxp->oxid);
  944. }
  945. spin_lock_irqsave(&ctxp->ctxlock, flags);
  946. if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
  947. (ctxp->flag & LPFC_NVMET_XBUSY)) {
  948. aborting = true;
  949. /* let the abort path do the real release */
  950. lpfc_nvmet_defer_release(phba, ctxp);
  951. }
  952. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  953. lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
  954. ctxp->state, aborting);
  955. atomic_inc(&lpfc_nvmep->xmt_fcp_release);
  956. if (aborting)
  957. return;
  958. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  959. }
  960. static void
  961. lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
  962. struct nvmefc_tgt_fcp_req *rsp)
  963. {
  964. struct lpfc_nvmet_tgtport *tgtp;
  965. struct lpfc_nvmet_rcv_ctx *ctxp =
  966. container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
  967. struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
  968. struct lpfc_hba *phba = ctxp->phba;
  969. lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
  970. ctxp->oxid, ctxp->size, smp_processor_id());
  971. if (!nvmebuf) {
  972. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
  973. "6425 Defer rcv: no buffer xri x%x: "
  974. "flg %x ste %x\n",
  975. ctxp->oxid, ctxp->flag, ctxp->state);
  976. return;
  977. }
  978. tgtp = phba->targetport->private;
  979. if (tgtp)
  980. atomic_inc(&tgtp->rcv_fcp_cmd_defer);
  981. /* Free the nvmebuf since a new buffer already replaced it */
  982. nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
  983. }
  984. static struct nvmet_fc_target_template lpfc_tgttemplate = {
  985. .targetport_delete = lpfc_nvmet_targetport_delete,
  986. .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
  987. .fcp_op = lpfc_nvmet_xmt_fcp_op,
  988. .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
  989. .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
  990. .defer_rcv = lpfc_nvmet_defer_rcv,
  991. .max_hw_queues = 1,
  992. .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
  993. .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
  994. .dma_boundary = 0xFFFFFFFF,
  995. /* optional features */
  996. .target_features = 0,
  997. /* sizes of additional private data for data structures */
  998. .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
  999. };
  1000. static void
  1001. __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
  1002. struct lpfc_nvmet_ctx_info *infop)
  1003. {
  1004. struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
  1005. unsigned long flags;
  1006. spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
  1007. list_for_each_entry_safe(ctx_buf, next_ctx_buf,
  1008. &infop->nvmet_ctx_list, list) {
  1009. spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1010. list_del_init(&ctx_buf->list);
  1011. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1012. __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
  1013. ctx_buf->sglq->state = SGL_FREED;
  1014. ctx_buf->sglq->ndlp = NULL;
  1015. spin_lock(&phba->sli4_hba.sgl_list_lock);
  1016. list_add_tail(&ctx_buf->sglq->list,
  1017. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  1018. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  1019. lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
  1020. kfree(ctx_buf->context);
  1021. }
  1022. spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
  1023. }
  1024. static void
  1025. lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
  1026. {
  1027. struct lpfc_nvmet_ctx_info *infop;
  1028. int i, j;
  1029. /* The first context list, MRQ 0 CPU 0 */
  1030. infop = phba->sli4_hba.nvmet_ctx_info;
  1031. if (!infop)
  1032. return;
  1033. /* Cycle the the entire CPU context list for every MRQ */
  1034. for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
  1035. for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
  1036. __lpfc_nvmet_clean_io_for_cpu(phba, infop);
  1037. infop++; /* next */
  1038. }
  1039. }
  1040. kfree(phba->sli4_hba.nvmet_ctx_info);
  1041. phba->sli4_hba.nvmet_ctx_info = NULL;
  1042. }
  1043. static int
  1044. lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
  1045. {
  1046. struct lpfc_nvmet_ctxbuf *ctx_buf;
  1047. struct lpfc_iocbq *nvmewqe;
  1048. union lpfc_wqe128 *wqe;
  1049. struct lpfc_nvmet_ctx_info *last_infop;
  1050. struct lpfc_nvmet_ctx_info *infop;
  1051. int i, j, idx;
  1052. lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
  1053. "6403 Allocate NVMET resources for %d XRIs\n",
  1054. phba->sli4_hba.nvmet_xri_cnt);
  1055. phba->sli4_hba.nvmet_ctx_info = kcalloc(
  1056. phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
  1057. sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
  1058. if (!phba->sli4_hba.nvmet_ctx_info) {
  1059. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1060. "6419 Failed allocate memory for "
  1061. "nvmet context lists\n");
  1062. return -ENOMEM;
  1063. }
  1064. /*
  1065. * Assuming X CPUs in the system, and Y MRQs, allocate some
  1066. * lpfc_nvmet_ctx_info structures as follows:
  1067. *
  1068. * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
  1069. * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
  1070. * ...
  1071. * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
  1072. *
  1073. * Each line represents a MRQ "silo" containing an entry for
  1074. * every CPU.
  1075. *
  1076. * MRQ X is initially assumed to be associated with CPU X, thus
  1077. * contexts are initially distributed across all MRQs using
  1078. * the MRQ index (N) as follows cpuN/mrqN. When contexts are
  1079. * freed, the are freed to the MRQ silo based on the CPU number
  1080. * of the IO completion. Thus a context that was allocated for MRQ A
  1081. * whose IO completed on CPU B will be freed to cpuB/mrqA.
  1082. */
  1083. infop = phba->sli4_hba.nvmet_ctx_info;
  1084. for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
  1085. for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
  1086. INIT_LIST_HEAD(&infop->nvmet_ctx_list);
  1087. spin_lock_init(&infop->nvmet_ctx_list_lock);
  1088. infop->nvmet_ctx_list_cnt = 0;
  1089. infop++;
  1090. }
  1091. }
  1092. /*
  1093. * Setup the next CPU context info ptr for each MRQ.
  1094. * MRQ 0 will cycle thru CPUs 0 - X separately from
  1095. * MRQ 1 cycling thru CPUs 0 - X, and so on.
  1096. */
  1097. for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
  1098. last_infop = lpfc_get_ctx_list(phba, 0, j);
  1099. for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
  1100. infop = lpfc_get_ctx_list(phba, i, j);
  1101. infop->nvmet_ctx_next_cpu = last_infop;
  1102. last_infop = infop;
  1103. }
  1104. }
  1105. /* For all nvmet xris, allocate resources needed to process a
  1106. * received command on a per xri basis.
  1107. */
  1108. idx = 0;
  1109. for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
  1110. ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
  1111. if (!ctx_buf) {
  1112. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  1113. "6404 Ran out of memory for NVMET\n");
  1114. return -ENOMEM;
  1115. }
  1116. ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
  1117. GFP_KERNEL);
  1118. if (!ctx_buf->context) {
  1119. kfree(ctx_buf);
  1120. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  1121. "6405 Ran out of NVMET "
  1122. "context memory\n");
  1123. return -ENOMEM;
  1124. }
  1125. ctx_buf->context->ctxbuf = ctx_buf;
  1126. ctx_buf->context->state = LPFC_NVMET_STE_FREE;
  1127. ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
  1128. if (!ctx_buf->iocbq) {
  1129. kfree(ctx_buf->context);
  1130. kfree(ctx_buf);
  1131. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  1132. "6406 Ran out of NVMET iocb/WQEs\n");
  1133. return -ENOMEM;
  1134. }
  1135. ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
  1136. nvmewqe = ctx_buf->iocbq;
  1137. wqe = &nvmewqe->wqe;
  1138. /* Initialize WQE */
  1139. memset(wqe, 0, sizeof(union lpfc_wqe));
  1140. ctx_buf->iocbq->context1 = NULL;
  1141. spin_lock(&phba->sli4_hba.sgl_list_lock);
  1142. ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
  1143. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  1144. if (!ctx_buf->sglq) {
  1145. lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
  1146. kfree(ctx_buf->context);
  1147. kfree(ctx_buf);
  1148. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  1149. "6407 Ran out of NVMET XRIs\n");
  1150. return -ENOMEM;
  1151. }
  1152. /*
  1153. * Add ctx to MRQidx context list. Our initial assumption
  1154. * is MRQidx will be associated with CPUidx. This association
  1155. * can change on the fly.
  1156. */
  1157. infop = lpfc_get_ctx_list(phba, idx, idx);
  1158. spin_lock(&infop->nvmet_ctx_list_lock);
  1159. list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
  1160. infop->nvmet_ctx_list_cnt++;
  1161. spin_unlock(&infop->nvmet_ctx_list_lock);
  1162. /* Spread ctx structures evenly across all MRQs */
  1163. idx++;
  1164. if (idx >= phba->cfg_nvmet_mrq)
  1165. idx = 0;
  1166. }
  1167. for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
  1168. for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
  1169. infop = lpfc_get_ctx_list(phba, i, j);
  1170. lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
  1171. "6408 TOTAL NVMET ctx for CPU %d "
  1172. "MRQ %d: cnt %d nextcpu %p\n",
  1173. i, j, infop->nvmet_ctx_list_cnt,
  1174. infop->nvmet_ctx_next_cpu);
  1175. }
  1176. }
  1177. return 0;
  1178. }
  1179. int
  1180. lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
  1181. {
  1182. struct lpfc_vport *vport = phba->pport;
  1183. struct lpfc_nvmet_tgtport *tgtp;
  1184. struct nvmet_fc_port_info pinfo;
  1185. int error;
  1186. if (phba->targetport)
  1187. return 0;
  1188. error = lpfc_nvmet_setup_io_context(phba);
  1189. if (error)
  1190. return error;
  1191. memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
  1192. pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
  1193. pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
  1194. pinfo.port_id = vport->fc_myDID;
  1195. /* Limit to LPFC_MAX_NVME_SEG_CNT.
  1196. * For now need + 1 to get around NVME transport logic.
  1197. */
  1198. if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
  1199. lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
  1200. "6400 Reducing sg segment cnt to %d\n",
  1201. LPFC_MAX_NVME_SEG_CNT);
  1202. phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
  1203. } else {
  1204. phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
  1205. }
  1206. lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
  1207. lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
  1208. lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
  1209. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1210. error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
  1211. &phba->pcidev->dev,
  1212. &phba->targetport);
  1213. #else
  1214. error = -ENOENT;
  1215. #endif
  1216. if (error) {
  1217. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
  1218. "6025 Cannot register NVME targetport x%x: "
  1219. "portnm %llx nodenm %llx segs %d qs %d\n",
  1220. error,
  1221. pinfo.port_name, pinfo.node_name,
  1222. lpfc_tgttemplate.max_sgl_segments,
  1223. lpfc_tgttemplate.max_hw_queues);
  1224. phba->targetport = NULL;
  1225. phba->nvmet_support = 0;
  1226. lpfc_nvmet_cleanup_io_context(phba);
  1227. } else {
  1228. tgtp = (struct lpfc_nvmet_tgtport *)
  1229. phba->targetport->private;
  1230. tgtp->phba = phba;
  1231. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
  1232. "6026 Registered NVME "
  1233. "targetport: %p, private %p "
  1234. "portnm %llx nodenm %llx segs %d qs %d\n",
  1235. phba->targetport, tgtp,
  1236. pinfo.port_name, pinfo.node_name,
  1237. lpfc_tgttemplate.max_sgl_segments,
  1238. lpfc_tgttemplate.max_hw_queues);
  1239. atomic_set(&tgtp->rcv_ls_req_in, 0);
  1240. atomic_set(&tgtp->rcv_ls_req_out, 0);
  1241. atomic_set(&tgtp->rcv_ls_req_drop, 0);
  1242. atomic_set(&tgtp->xmt_ls_abort, 0);
  1243. atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
  1244. atomic_set(&tgtp->xmt_ls_rsp, 0);
  1245. atomic_set(&tgtp->xmt_ls_drop, 0);
  1246. atomic_set(&tgtp->xmt_ls_rsp_error, 0);
  1247. atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
  1248. atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
  1249. atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
  1250. atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
  1251. atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
  1252. atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
  1253. atomic_set(&tgtp->xmt_fcp_drop, 0);
  1254. atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
  1255. atomic_set(&tgtp->xmt_fcp_read, 0);
  1256. atomic_set(&tgtp->xmt_fcp_write, 0);
  1257. atomic_set(&tgtp->xmt_fcp_rsp, 0);
  1258. atomic_set(&tgtp->xmt_fcp_release, 0);
  1259. atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
  1260. atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
  1261. atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
  1262. atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
  1263. atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
  1264. atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
  1265. atomic_set(&tgtp->xmt_fcp_abort, 0);
  1266. atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
  1267. atomic_set(&tgtp->xmt_abort_unsol, 0);
  1268. atomic_set(&tgtp->xmt_abort_sol, 0);
  1269. atomic_set(&tgtp->xmt_abort_rsp, 0);
  1270. atomic_set(&tgtp->xmt_abort_rsp_error, 0);
  1271. atomic_set(&tgtp->defer_ctx, 0);
  1272. atomic_set(&tgtp->defer_fod, 0);
  1273. atomic_set(&tgtp->defer_wqfull, 0);
  1274. }
  1275. return error;
  1276. }
  1277. int
  1278. lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
  1279. {
  1280. struct lpfc_vport *vport = phba->pport;
  1281. if (!phba->targetport)
  1282. return 0;
  1283. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
  1284. "6007 Update NVMET port %p did x%x\n",
  1285. phba->targetport, vport->fc_myDID);
  1286. phba->targetport->port_id = vport->fc_myDID;
  1287. return 0;
  1288. }
  1289. /**
  1290. * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
  1291. * @phba: pointer to lpfc hba data structure.
  1292. * @axri: pointer to the nvmet xri abort wcqe structure.
  1293. *
  1294. * This routine is invoked by the worker thread to process a SLI4 fast-path
  1295. * NVMET aborted xri.
  1296. **/
  1297. void
  1298. lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
  1299. struct sli4_wcqe_xri_aborted *axri)
  1300. {
  1301. uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
  1302. uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
  1303. struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
  1304. struct lpfc_nvmet_tgtport *tgtp;
  1305. struct lpfc_nodelist *ndlp;
  1306. unsigned long iflag = 0;
  1307. int rrq_empty = 0;
  1308. bool released = false;
  1309. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  1310. "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
  1311. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  1312. return;
  1313. if (phba->targetport) {
  1314. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  1315. atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
  1316. }
  1317. spin_lock_irqsave(&phba->hbalock, iflag);
  1318. spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1319. list_for_each_entry_safe(ctxp, next_ctxp,
  1320. &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  1321. list) {
  1322. if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
  1323. continue;
  1324. /* Check if we already received a free context call
  1325. * and we have completed processing an abort situation.
  1326. */
  1327. if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
  1328. !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
  1329. list_del(&ctxp->list);
  1330. released = true;
  1331. }
  1332. ctxp->flag &= ~LPFC_NVMET_XBUSY;
  1333. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1334. rrq_empty = list_empty(&phba->active_rrq_list);
  1335. spin_unlock_irqrestore(&phba->hbalock, iflag);
  1336. ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
  1337. if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
  1338. (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
  1339. ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
  1340. lpfc_set_rrq_active(phba, ndlp,
  1341. ctxp->ctxbuf->sglq->sli4_lxritag,
  1342. rxid, 1);
  1343. lpfc_sli4_abts_err_handler(phba, ndlp, axri);
  1344. }
  1345. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  1346. "6318 XB aborted oxid %x flg x%x (%x)\n",
  1347. ctxp->oxid, ctxp->flag, released);
  1348. if (released)
  1349. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  1350. if (rrq_empty)
  1351. lpfc_worker_wake_up(phba);
  1352. return;
  1353. }
  1354. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1355. spin_unlock_irqrestore(&phba->hbalock, iflag);
  1356. }
  1357. int
  1358. lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
  1359. struct fc_frame_header *fc_hdr)
  1360. {
  1361. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1362. struct lpfc_hba *phba = vport->phba;
  1363. struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
  1364. struct nvmefc_tgt_fcp_req *rsp;
  1365. uint16_t xri;
  1366. unsigned long iflag = 0;
  1367. xri = be16_to_cpu(fc_hdr->fh_ox_id);
  1368. spin_lock_irqsave(&phba->hbalock, iflag);
  1369. spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1370. list_for_each_entry_safe(ctxp, next_ctxp,
  1371. &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  1372. list) {
  1373. if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
  1374. continue;
  1375. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1376. spin_unlock_irqrestore(&phba->hbalock, iflag);
  1377. spin_lock_irqsave(&ctxp->ctxlock, iflag);
  1378. ctxp->flag |= LPFC_NVMET_ABTS_RCV;
  1379. spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
  1380. lpfc_nvmeio_data(phba,
  1381. "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
  1382. xri, smp_processor_id(), 0);
  1383. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  1384. "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
  1385. rsp = &ctxp->ctx.fcp_req;
  1386. nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
  1387. /* Respond with BA_ACC accordingly */
  1388. lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
  1389. return 0;
  1390. }
  1391. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  1392. spin_unlock_irqrestore(&phba->hbalock, iflag);
  1393. lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
  1394. xri, smp_processor_id(), 1);
  1395. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  1396. "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
  1397. /* Respond with BA_RJT accordingly */
  1398. lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
  1399. #endif
  1400. return 0;
  1401. }
  1402. static void
  1403. lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
  1404. struct lpfc_nvmet_rcv_ctx *ctxp)
  1405. {
  1406. struct lpfc_sli_ring *pring;
  1407. struct lpfc_iocbq *nvmewqeq;
  1408. struct lpfc_iocbq *next_nvmewqeq;
  1409. unsigned long iflags;
  1410. struct lpfc_wcqe_complete wcqe;
  1411. struct lpfc_wcqe_complete *wcqep;
  1412. pring = wq->pring;
  1413. wcqep = &wcqe;
  1414. /* Fake an ABORT error code back to cmpl routine */
  1415. memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
  1416. bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
  1417. wcqep->parameter = IOERR_ABORT_REQUESTED;
  1418. spin_lock_irqsave(&pring->ring_lock, iflags);
  1419. list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
  1420. &wq->wqfull_list, list) {
  1421. if (ctxp) {
  1422. /* Checking for a specific IO to flush */
  1423. if (nvmewqeq->context2 == ctxp) {
  1424. list_del(&nvmewqeq->list);
  1425. spin_unlock_irqrestore(&pring->ring_lock,
  1426. iflags);
  1427. lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
  1428. wcqep);
  1429. return;
  1430. }
  1431. continue;
  1432. } else {
  1433. /* Flush all IOs */
  1434. list_del(&nvmewqeq->list);
  1435. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  1436. lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
  1437. spin_lock_irqsave(&pring->ring_lock, iflags);
  1438. }
  1439. }
  1440. if (!ctxp)
  1441. wq->q_flag &= ~HBA_NVMET_WQFULL;
  1442. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  1443. }
  1444. void
  1445. lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
  1446. struct lpfc_queue *wq)
  1447. {
  1448. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1449. struct lpfc_sli_ring *pring;
  1450. struct lpfc_iocbq *nvmewqeq;
  1451. unsigned long iflags;
  1452. int rc;
  1453. /*
  1454. * Some WQE slots are available, so try to re-issue anything
  1455. * on the WQ wqfull_list.
  1456. */
  1457. pring = wq->pring;
  1458. spin_lock_irqsave(&pring->ring_lock, iflags);
  1459. while (!list_empty(&wq->wqfull_list)) {
  1460. list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
  1461. list);
  1462. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  1463. rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
  1464. spin_lock_irqsave(&pring->ring_lock, iflags);
  1465. if (rc == -EBUSY) {
  1466. /* WQ was full again, so put it back on the list */
  1467. list_add(&nvmewqeq->list, &wq->wqfull_list);
  1468. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  1469. return;
  1470. }
  1471. }
  1472. wq->q_flag &= ~HBA_NVMET_WQFULL;
  1473. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  1474. #endif
  1475. }
  1476. void
  1477. lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
  1478. {
  1479. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1480. struct lpfc_nvmet_tgtport *tgtp;
  1481. struct lpfc_queue *wq;
  1482. uint32_t qidx;
  1483. DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
  1484. if (phba->nvmet_support == 0)
  1485. return;
  1486. if (phba->targetport) {
  1487. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  1488. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
  1489. wq = phba->sli4_hba.nvme_wq[qidx];
  1490. lpfc_nvmet_wqfull_flush(phba, wq, NULL);
  1491. }
  1492. tgtp->tport_unreg_cmp = &tport_unreg_cmp;
  1493. nvmet_fc_unregister_targetport(phba->targetport);
  1494. if (!wait_for_completion_timeout(&tport_unreg_cmp,
  1495. msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
  1496. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  1497. "6179 Unreg targetport %p timeout "
  1498. "reached.\n", phba->targetport);
  1499. lpfc_nvmet_cleanup_io_context(phba);
  1500. }
  1501. phba->targetport = NULL;
  1502. #endif
  1503. }
  1504. /**
  1505. * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
  1506. * @phba: pointer to lpfc hba data structure.
  1507. * @pring: pointer to a SLI ring.
  1508. * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
  1509. *
  1510. * This routine is used for processing the WQE associated with a unsolicited
  1511. * event. It first determines whether there is an existing ndlp that matches
  1512. * the DID from the unsolicited WQE. If not, it will create a new one with
  1513. * the DID from the unsolicited WQE. The ELS command from the unsolicited
  1514. * WQE is then used to invoke the proper routine and to set up proper state
  1515. * of the discovery state machine.
  1516. **/
  1517. static void
  1518. lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  1519. struct hbq_dmabuf *nvmebuf)
  1520. {
  1521. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1522. struct lpfc_nvmet_tgtport *tgtp;
  1523. struct fc_frame_header *fc_hdr;
  1524. struct lpfc_nvmet_rcv_ctx *ctxp;
  1525. uint32_t *payload;
  1526. uint32_t size, oxid, sid, rc;
  1527. fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
  1528. oxid = be16_to_cpu(fc_hdr->fh_ox_id);
  1529. if (!phba->targetport) {
  1530. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  1531. "6154 LS Drop IO x%x\n", oxid);
  1532. oxid = 0;
  1533. size = 0;
  1534. sid = 0;
  1535. ctxp = NULL;
  1536. goto dropit;
  1537. }
  1538. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  1539. payload = (uint32_t *)(nvmebuf->dbuf.virt);
  1540. size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
  1541. sid = sli4_sid_from_fc_hdr(fc_hdr);
  1542. ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
  1543. if (ctxp == NULL) {
  1544. atomic_inc(&tgtp->rcv_ls_req_drop);
  1545. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  1546. "6155 LS Drop IO x%x: Alloc\n",
  1547. oxid);
  1548. dropit:
  1549. lpfc_nvmeio_data(phba, "NVMET LS DROP: "
  1550. "xri x%x sz %d from %06x\n",
  1551. oxid, size, sid);
  1552. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  1553. return;
  1554. }
  1555. ctxp->phba = phba;
  1556. ctxp->size = size;
  1557. ctxp->oxid = oxid;
  1558. ctxp->sid = sid;
  1559. ctxp->wqeq = NULL;
  1560. ctxp->state = LPFC_NVMET_STE_LS_RCV;
  1561. ctxp->entry_cnt = 1;
  1562. ctxp->rqb_buffer = (void *)nvmebuf;
  1563. lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
  1564. oxid, size, sid);
  1565. /*
  1566. * The calling sequence should be:
  1567. * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
  1568. * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
  1569. */
  1570. atomic_inc(&tgtp->rcv_ls_req_in);
  1571. rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
  1572. payload, size);
  1573. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
  1574. "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
  1575. "%08x %08x %08x\n", size, rc,
  1576. *payload, *(payload+1), *(payload+2),
  1577. *(payload+3), *(payload+4), *(payload+5));
  1578. if (rc == 0) {
  1579. atomic_inc(&tgtp->rcv_ls_req_out);
  1580. return;
  1581. }
  1582. lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
  1583. oxid, size, sid);
  1584. atomic_inc(&tgtp->rcv_ls_req_drop);
  1585. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  1586. "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
  1587. ctxp->oxid, rc);
  1588. /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
  1589. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  1590. atomic_inc(&tgtp->xmt_ls_abort);
  1591. lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
  1592. #endif
  1593. }
  1594. static struct lpfc_nvmet_ctxbuf *
  1595. lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
  1596. struct lpfc_nvmet_ctx_info *current_infop)
  1597. {
  1598. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1599. struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
  1600. struct lpfc_nvmet_ctx_info *get_infop;
  1601. int i;
  1602. /*
  1603. * The current_infop for the MRQ a NVME command IU was received
  1604. * on is empty. Our goal is to replenish this MRQs context
  1605. * list from a another CPUs.
  1606. *
  1607. * First we need to pick a context list to start looking on.
  1608. * nvmet_ctx_start_cpu has available context the last time
  1609. * we needed to replenish this CPU where nvmet_ctx_next_cpu
  1610. * is just the next sequential CPU for this MRQ.
  1611. */
  1612. if (current_infop->nvmet_ctx_start_cpu)
  1613. get_infop = current_infop->nvmet_ctx_start_cpu;
  1614. else
  1615. get_infop = current_infop->nvmet_ctx_next_cpu;
  1616. for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
  1617. if (get_infop == current_infop) {
  1618. get_infop = get_infop->nvmet_ctx_next_cpu;
  1619. continue;
  1620. }
  1621. spin_lock(&get_infop->nvmet_ctx_list_lock);
  1622. /* Just take the entire context list, if there are any */
  1623. if (get_infop->nvmet_ctx_list_cnt) {
  1624. list_splice_init(&get_infop->nvmet_ctx_list,
  1625. &current_infop->nvmet_ctx_list);
  1626. current_infop->nvmet_ctx_list_cnt =
  1627. get_infop->nvmet_ctx_list_cnt - 1;
  1628. get_infop->nvmet_ctx_list_cnt = 0;
  1629. spin_unlock(&get_infop->nvmet_ctx_list_lock);
  1630. current_infop->nvmet_ctx_start_cpu = get_infop;
  1631. list_remove_head(&current_infop->nvmet_ctx_list,
  1632. ctx_buf, struct lpfc_nvmet_ctxbuf,
  1633. list);
  1634. return ctx_buf;
  1635. }
  1636. /* Otherwise, move on to the next CPU for this MRQ */
  1637. spin_unlock(&get_infop->nvmet_ctx_list_lock);
  1638. get_infop = get_infop->nvmet_ctx_next_cpu;
  1639. }
  1640. #endif
  1641. /* Nothing found, all contexts for the MRQ are in-flight */
  1642. return NULL;
  1643. }
  1644. /**
  1645. * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
  1646. * @phba: pointer to lpfc hba data structure.
  1647. * @idx: relative index of MRQ vector
  1648. * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
  1649. *
  1650. * This routine is used for processing the WQE associated with a unsolicited
  1651. * event. It first determines whether there is an existing ndlp that matches
  1652. * the DID from the unsolicited WQE. If not, it will create a new one with
  1653. * the DID from the unsolicited WQE. The ELS command from the unsolicited
  1654. * WQE is then used to invoke the proper routine and to set up proper state
  1655. * of the discovery state machine.
  1656. **/
  1657. static void
  1658. lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
  1659. uint32_t idx,
  1660. struct rqb_dmabuf *nvmebuf,
  1661. uint64_t isr_timestamp)
  1662. {
  1663. struct lpfc_nvmet_rcv_ctx *ctxp;
  1664. struct lpfc_nvmet_tgtport *tgtp;
  1665. struct fc_frame_header *fc_hdr;
  1666. struct lpfc_nvmet_ctxbuf *ctx_buf;
  1667. struct lpfc_nvmet_ctx_info *current_infop;
  1668. uint32_t *payload;
  1669. uint32_t size, oxid, sid, rc, qno;
  1670. unsigned long iflag;
  1671. int current_cpu;
  1672. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1673. uint32_t id;
  1674. #endif
  1675. if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
  1676. return;
  1677. ctx_buf = NULL;
  1678. if (!nvmebuf || !phba->targetport) {
  1679. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  1680. "6157 NVMET FCP Drop IO\n");
  1681. oxid = 0;
  1682. size = 0;
  1683. sid = 0;
  1684. ctxp = NULL;
  1685. goto dropit;
  1686. }
  1687. /*
  1688. * Get a pointer to the context list for this MRQ based on
  1689. * the CPU this MRQ IRQ is associated with. If the CPU association
  1690. * changes from our initial assumption, the context list could
  1691. * be empty, thus it would need to be replenished with the
  1692. * context list from another CPU for this MRQ.
  1693. */
  1694. current_cpu = smp_processor_id();
  1695. current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
  1696. spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
  1697. if (current_infop->nvmet_ctx_list_cnt) {
  1698. list_remove_head(&current_infop->nvmet_ctx_list,
  1699. ctx_buf, struct lpfc_nvmet_ctxbuf, list);
  1700. current_infop->nvmet_ctx_list_cnt--;
  1701. } else {
  1702. ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
  1703. }
  1704. spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
  1705. fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
  1706. oxid = be16_to_cpu(fc_hdr->fh_ox_id);
  1707. size = nvmebuf->bytes_recv;
  1708. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1709. if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
  1710. id = smp_processor_id();
  1711. if (id < LPFC_CHECK_CPU_CNT)
  1712. phba->cpucheck_rcv_io[id]++;
  1713. }
  1714. #endif
  1715. lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
  1716. oxid, size, smp_processor_id());
  1717. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  1718. if (!ctx_buf) {
  1719. /* Queue this NVME IO to process later */
  1720. spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
  1721. list_add_tail(&nvmebuf->hbuf.list,
  1722. &phba->sli4_hba.lpfc_nvmet_io_wait_list);
  1723. phba->sli4_hba.nvmet_io_wait_cnt++;
  1724. phba->sli4_hba.nvmet_io_wait_total++;
  1725. spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
  1726. iflag);
  1727. /* Post a brand new DMA buffer to RQ */
  1728. qno = nvmebuf->idx;
  1729. lpfc_post_rq_buffer(
  1730. phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
  1731. phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
  1732. atomic_inc(&tgtp->defer_ctx);
  1733. return;
  1734. }
  1735. payload = (uint32_t *)(nvmebuf->dbuf.virt);
  1736. sid = sli4_sid_from_fc_hdr(fc_hdr);
  1737. ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
  1738. if (ctxp->state != LPFC_NVMET_STE_FREE) {
  1739. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  1740. "6414 NVMET Context corrupt %d %d oxid x%x\n",
  1741. ctxp->state, ctxp->entry_cnt, ctxp->oxid);
  1742. }
  1743. ctxp->wqeq = NULL;
  1744. ctxp->txrdy = NULL;
  1745. ctxp->offset = 0;
  1746. ctxp->phba = phba;
  1747. ctxp->size = size;
  1748. ctxp->oxid = oxid;
  1749. ctxp->sid = sid;
  1750. ctxp->idx = idx;
  1751. ctxp->state = LPFC_NVMET_STE_RCV;
  1752. ctxp->entry_cnt = 1;
  1753. ctxp->flag = 0;
  1754. ctxp->ctxbuf = ctx_buf;
  1755. ctxp->rqb_buffer = (void *)nvmebuf;
  1756. spin_lock_init(&ctxp->ctxlock);
  1757. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1758. if (isr_timestamp) {
  1759. ctxp->ts_isr_cmd = isr_timestamp;
  1760. ctxp->ts_cmd_nvme = ktime_get_ns();
  1761. ctxp->ts_nvme_data = 0;
  1762. ctxp->ts_data_wqput = 0;
  1763. ctxp->ts_isr_data = 0;
  1764. ctxp->ts_data_nvme = 0;
  1765. ctxp->ts_nvme_status = 0;
  1766. ctxp->ts_status_wqput = 0;
  1767. ctxp->ts_isr_status = 0;
  1768. ctxp->ts_status_nvme = 0;
  1769. } else {
  1770. ctxp->ts_cmd_nvme = 0;
  1771. }
  1772. #endif
  1773. atomic_inc(&tgtp->rcv_fcp_cmd_in);
  1774. /*
  1775. * The calling sequence should be:
  1776. * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
  1777. * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
  1778. * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
  1779. * the NVME command / FC header is stored, so we are free to repost
  1780. * the buffer.
  1781. */
  1782. rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
  1783. payload, size);
  1784. /* Process FCP command */
  1785. if (rc == 0) {
  1786. ctxp->rqb_buffer = NULL;
  1787. atomic_inc(&tgtp->rcv_fcp_cmd_out);
  1788. lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
  1789. return;
  1790. }
  1791. /* Processing of FCP command is deferred */
  1792. if (rc == -EOVERFLOW) {
  1793. /*
  1794. * Post a brand new DMA buffer to RQ and defer
  1795. * freeing rcv buffer till .defer_rcv callback
  1796. */
  1797. qno = nvmebuf->idx;
  1798. lpfc_post_rq_buffer(
  1799. phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
  1800. phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
  1801. lpfc_nvmeio_data(phba,
  1802. "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
  1803. oxid, size, sid);
  1804. atomic_inc(&tgtp->rcv_fcp_cmd_out);
  1805. atomic_inc(&tgtp->defer_fod);
  1806. return;
  1807. }
  1808. ctxp->rqb_buffer = nvmebuf;
  1809. atomic_inc(&tgtp->rcv_fcp_cmd_drop);
  1810. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  1811. "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
  1812. ctxp->oxid, rc,
  1813. atomic_read(&tgtp->rcv_fcp_cmd_in),
  1814. atomic_read(&tgtp->rcv_fcp_cmd_out),
  1815. atomic_read(&tgtp->xmt_fcp_release));
  1816. dropit:
  1817. lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
  1818. oxid, size, sid);
  1819. if (oxid) {
  1820. lpfc_nvmet_defer_release(phba, ctxp);
  1821. lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
  1822. lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
  1823. return;
  1824. }
  1825. if (ctx_buf)
  1826. lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
  1827. if (nvmebuf)
  1828. lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
  1829. }
  1830. /**
  1831. * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
  1832. * @phba: pointer to lpfc hba data structure.
  1833. * @pring: pointer to a SLI ring.
  1834. * @nvmebuf: pointer to received nvme data structure.
  1835. *
  1836. * This routine is used to process an unsolicited event received from a SLI
  1837. * (Service Level Interface) ring. The actual processing of the data buffer
  1838. * associated with the unsolicited event is done by invoking the routine
  1839. * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
  1840. * SLI RQ on which the unsolicited event was received.
  1841. **/
  1842. void
  1843. lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  1844. struct lpfc_iocbq *piocb)
  1845. {
  1846. struct lpfc_dmabuf *d_buf;
  1847. struct hbq_dmabuf *nvmebuf;
  1848. d_buf = piocb->context2;
  1849. nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  1850. if (phba->nvmet_support == 0) {
  1851. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  1852. return;
  1853. }
  1854. lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
  1855. }
  1856. /**
  1857. * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
  1858. * @phba: pointer to lpfc hba data structure.
  1859. * @idx: relative index of MRQ vector
  1860. * @nvmebuf: pointer to received nvme data structure.
  1861. *
  1862. * This routine is used to process an unsolicited event received from a SLI
  1863. * (Service Level Interface) ring. The actual processing of the data buffer
  1864. * associated with the unsolicited event is done by invoking the routine
  1865. * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
  1866. * SLI RQ on which the unsolicited event was received.
  1867. **/
  1868. void
  1869. lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
  1870. uint32_t idx,
  1871. struct rqb_dmabuf *nvmebuf,
  1872. uint64_t isr_timestamp)
  1873. {
  1874. if (phba->nvmet_support == 0) {
  1875. lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
  1876. return;
  1877. }
  1878. lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
  1879. isr_timestamp);
  1880. }
  1881. /**
  1882. * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
  1883. * @phba: pointer to a host N_Port data structure.
  1884. * @ctxp: Context info for NVME LS Request
  1885. * @rspbuf: DMA buffer of NVME command.
  1886. * @rspsize: size of the NVME command.
  1887. *
  1888. * This routine is used for allocating a lpfc-WQE data structure from
  1889. * the driver lpfc-WQE free-list and prepare the WQE with the parameters
  1890. * passed into the routine for discovery state machine to issue an Extended
  1891. * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
  1892. * and preparation routine that is used by all the discovery state machine
  1893. * routines and the NVME command-specific fields will be later set up by
  1894. * the individual discovery machine routines after calling this routine
  1895. * allocating and preparing a generic WQE data structure. It fills in the
  1896. * Buffer Descriptor Entries (BDEs), allocates buffers for both command
  1897. * payload and response payload (if expected). The reference count on the
  1898. * ndlp is incremented by 1 and the reference to the ndlp is put into
  1899. * context1 of the WQE data structure for this WQE to hold the ndlp
  1900. * reference for the command's callback function to access later.
  1901. *
  1902. * Return code
  1903. * Pointer to the newly allocated/prepared nvme wqe data structure
  1904. * NULL - when nvme wqe data structure allocation/preparation failed
  1905. **/
  1906. static struct lpfc_iocbq *
  1907. lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
  1908. struct lpfc_nvmet_rcv_ctx *ctxp,
  1909. dma_addr_t rspbuf, uint16_t rspsize)
  1910. {
  1911. struct lpfc_nodelist *ndlp;
  1912. struct lpfc_iocbq *nvmewqe;
  1913. union lpfc_wqe128 *wqe;
  1914. if (!lpfc_is_link_up(phba)) {
  1915. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
  1916. "6104 NVMET prep LS wqe: link err: "
  1917. "NPORT x%x oxid:x%x ste %d\n",
  1918. ctxp->sid, ctxp->oxid, ctxp->state);
  1919. return NULL;
  1920. }
  1921. /* Allocate buffer for command wqe */
  1922. nvmewqe = lpfc_sli_get_iocbq(phba);
  1923. if (nvmewqe == NULL) {
  1924. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
  1925. "6105 NVMET prep LS wqe: No WQE: "
  1926. "NPORT x%x oxid x%x ste %d\n",
  1927. ctxp->sid, ctxp->oxid, ctxp->state);
  1928. return NULL;
  1929. }
  1930. ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
  1931. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  1932. ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  1933. (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  1934. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
  1935. "6106 NVMET prep LS wqe: No ndlp: "
  1936. "NPORT x%x oxid x%x ste %d\n",
  1937. ctxp->sid, ctxp->oxid, ctxp->state);
  1938. goto nvme_wqe_free_wqeq_exit;
  1939. }
  1940. ctxp->wqeq = nvmewqe;
  1941. /* prevent preparing wqe with NULL ndlp reference */
  1942. nvmewqe->context1 = lpfc_nlp_get(ndlp);
  1943. if (nvmewqe->context1 == NULL)
  1944. goto nvme_wqe_free_wqeq_exit;
  1945. nvmewqe->context2 = ctxp;
  1946. wqe = &nvmewqe->wqe;
  1947. memset(wqe, 0, sizeof(union lpfc_wqe));
  1948. /* Words 0 - 2 */
  1949. wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  1950. wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
  1951. wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
  1952. wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
  1953. /* Word 3 */
  1954. /* Word 4 */
  1955. /* Word 5 */
  1956. bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
  1957. bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
  1958. bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
  1959. bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
  1960. bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
  1961. /* Word 6 */
  1962. bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
  1963. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  1964. bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
  1965. /* Word 7 */
  1966. bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
  1967. CMD_XMIT_SEQUENCE64_WQE);
  1968. bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
  1969. bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
  1970. bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
  1971. /* Word 8 */
  1972. wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
  1973. /* Word 9 */
  1974. bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
  1975. /* Needs to be set by caller */
  1976. bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
  1977. /* Word 10 */
  1978. bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
  1979. bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
  1980. bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
  1981. LPFC_WQE_LENLOC_WORD12);
  1982. bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
  1983. /* Word 11 */
  1984. bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
  1985. LPFC_WQE_CQ_ID_DEFAULT);
  1986. bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
  1987. OTHER_COMMAND);
  1988. /* Word 12 */
  1989. wqe->xmit_sequence.xmit_len = rspsize;
  1990. nvmewqe->retry = 1;
  1991. nvmewqe->vport = phba->pport;
  1992. nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
  1993. nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
  1994. /* Xmit NVMET response to remote NPORT <did> */
  1995. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
  1996. "6039 Xmit NVMET LS response to remote "
  1997. "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
  1998. ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
  1999. rspsize);
  2000. return nvmewqe;
  2001. nvme_wqe_free_wqeq_exit:
  2002. nvmewqe->context2 = NULL;
  2003. nvmewqe->context3 = NULL;
  2004. lpfc_sli_release_iocbq(phba, nvmewqe);
  2005. return NULL;
  2006. }
  2007. static struct lpfc_iocbq *
  2008. lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
  2009. struct lpfc_nvmet_rcv_ctx *ctxp)
  2010. {
  2011. struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
  2012. struct lpfc_nvmet_tgtport *tgtp;
  2013. struct sli4_sge *sgl;
  2014. struct lpfc_nodelist *ndlp;
  2015. struct lpfc_iocbq *nvmewqe;
  2016. struct scatterlist *sgel;
  2017. union lpfc_wqe128 *wqe;
  2018. struct ulp_bde64 *bde;
  2019. uint32_t *txrdy;
  2020. dma_addr_t physaddr;
  2021. int i, cnt;
  2022. int do_pbde;
  2023. int xc = 1;
  2024. if (!lpfc_is_link_up(phba)) {
  2025. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2026. "6107 NVMET prep FCP wqe: link err:"
  2027. "NPORT x%x oxid x%x ste %d\n",
  2028. ctxp->sid, ctxp->oxid, ctxp->state);
  2029. return NULL;
  2030. }
  2031. ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
  2032. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  2033. ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  2034. (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  2035. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2036. "6108 NVMET prep FCP wqe: no ndlp: "
  2037. "NPORT x%x oxid x%x ste %d\n",
  2038. ctxp->sid, ctxp->oxid, ctxp->state);
  2039. return NULL;
  2040. }
  2041. if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
  2042. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2043. "6109 NVMET prep FCP wqe: seg cnt err: "
  2044. "NPORT x%x oxid x%x ste %d cnt %d\n",
  2045. ctxp->sid, ctxp->oxid, ctxp->state,
  2046. phba->cfg_nvme_seg_cnt);
  2047. return NULL;
  2048. }
  2049. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2050. nvmewqe = ctxp->wqeq;
  2051. if (nvmewqe == NULL) {
  2052. /* Allocate buffer for command wqe */
  2053. nvmewqe = ctxp->ctxbuf->iocbq;
  2054. if (nvmewqe == NULL) {
  2055. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2056. "6110 NVMET prep FCP wqe: No "
  2057. "WQE: NPORT x%x oxid x%x ste %d\n",
  2058. ctxp->sid, ctxp->oxid, ctxp->state);
  2059. return NULL;
  2060. }
  2061. ctxp->wqeq = nvmewqe;
  2062. xc = 0; /* create new XRI */
  2063. nvmewqe->sli4_lxritag = NO_XRI;
  2064. nvmewqe->sli4_xritag = NO_XRI;
  2065. }
  2066. /* Sanity check */
  2067. if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
  2068. (ctxp->entry_cnt == 1)) ||
  2069. (ctxp->state == LPFC_NVMET_STE_DATA)) {
  2070. wqe = &nvmewqe->wqe;
  2071. } else {
  2072. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2073. "6111 Wrong state NVMET FCP: %d cnt %d\n",
  2074. ctxp->state, ctxp->entry_cnt);
  2075. return NULL;
  2076. }
  2077. sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
  2078. switch (rsp->op) {
  2079. case NVMET_FCOP_READDATA:
  2080. case NVMET_FCOP_READDATA_RSP:
  2081. /* From the tsend template, initialize words 7 - 11 */
  2082. memcpy(&wqe->words[7],
  2083. &lpfc_tsend_cmd_template.words[7],
  2084. sizeof(uint32_t) * 5);
  2085. /* Words 0 - 2 : The first sg segment */
  2086. sgel = &rsp->sg[0];
  2087. physaddr = sg_dma_address(sgel);
  2088. wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2089. wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
  2090. wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
  2091. wqe->fcp_tsend.bde.addrHigh =
  2092. cpu_to_le32(putPaddrHigh(physaddr));
  2093. /* Word 3 */
  2094. wqe->fcp_tsend.payload_offset_len = 0;
  2095. /* Word 4 */
  2096. wqe->fcp_tsend.relative_offset = ctxp->offset;
  2097. /* Word 5 */
  2098. wqe->fcp_tsend.reserved = 0;
  2099. /* Word 6 */
  2100. bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
  2101. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  2102. bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
  2103. nvmewqe->sli4_xritag);
  2104. /* Word 7 - set ar later */
  2105. /* Word 8 */
  2106. wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
  2107. /* Word 9 */
  2108. bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
  2109. bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
  2110. /* Word 10 - set wqes later, in template xc=1 */
  2111. if (!xc)
  2112. bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
  2113. /* Word 11 - set sup, irsp, irsplen later */
  2114. do_pbde = 0;
  2115. /* Word 12 */
  2116. wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
  2117. /* Setup 2 SKIP SGEs */
  2118. sgl->addr_hi = 0;
  2119. sgl->addr_lo = 0;
  2120. sgl->word2 = 0;
  2121. bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
  2122. sgl->word2 = cpu_to_le32(sgl->word2);
  2123. sgl->sge_len = 0;
  2124. sgl++;
  2125. sgl->addr_hi = 0;
  2126. sgl->addr_lo = 0;
  2127. sgl->word2 = 0;
  2128. bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
  2129. sgl->word2 = cpu_to_le32(sgl->word2);
  2130. sgl->sge_len = 0;
  2131. sgl++;
  2132. if (rsp->op == NVMET_FCOP_READDATA_RSP) {
  2133. atomic_inc(&tgtp->xmt_fcp_read_rsp);
  2134. /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
  2135. if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
  2136. if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
  2137. bf_set(wqe_sup,
  2138. &wqe->fcp_tsend.wqe_com, 1);
  2139. } else {
  2140. bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
  2141. bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
  2142. bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
  2143. ((rsp->rsplen >> 2) - 1));
  2144. memcpy(&wqe->words[16], rsp->rspaddr,
  2145. rsp->rsplen);
  2146. }
  2147. } else {
  2148. atomic_inc(&tgtp->xmt_fcp_read);
  2149. /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
  2150. bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
  2151. }
  2152. break;
  2153. case NVMET_FCOP_WRITEDATA:
  2154. /* From the treceive template, initialize words 3 - 11 */
  2155. memcpy(&wqe->words[3],
  2156. &lpfc_treceive_cmd_template.words[3],
  2157. sizeof(uint32_t) * 9);
  2158. /* Words 0 - 2 : The first sg segment */
  2159. txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
  2160. GFP_KERNEL, &physaddr);
  2161. if (!txrdy) {
  2162. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2163. "6041 Bad txrdy buffer: oxid x%x\n",
  2164. ctxp->oxid);
  2165. return NULL;
  2166. }
  2167. ctxp->txrdy = txrdy;
  2168. ctxp->txrdy_phys = physaddr;
  2169. wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2170. wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
  2171. wqe->fcp_treceive.bde.addrLow =
  2172. cpu_to_le32(putPaddrLow(physaddr));
  2173. wqe->fcp_treceive.bde.addrHigh =
  2174. cpu_to_le32(putPaddrHigh(physaddr));
  2175. /* Word 4 */
  2176. wqe->fcp_treceive.relative_offset = ctxp->offset;
  2177. /* Word 6 */
  2178. bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
  2179. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  2180. bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
  2181. nvmewqe->sli4_xritag);
  2182. /* Word 7 */
  2183. /* Word 8 */
  2184. wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
  2185. /* Word 9 */
  2186. bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
  2187. bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
  2188. /* Word 10 - in template xc=1 */
  2189. if (!xc)
  2190. bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
  2191. /* Word 11 - set pbde later */
  2192. if (phba->cfg_enable_pbde) {
  2193. do_pbde = 1;
  2194. } else {
  2195. bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
  2196. do_pbde = 0;
  2197. }
  2198. /* Word 12 */
  2199. wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
  2200. /* Setup 1 TXRDY and 1 SKIP SGE */
  2201. txrdy[0] = 0;
  2202. txrdy[1] = cpu_to_be32(rsp->transfer_length);
  2203. txrdy[2] = 0;
  2204. sgl->addr_hi = putPaddrHigh(physaddr);
  2205. sgl->addr_lo = putPaddrLow(physaddr);
  2206. sgl->word2 = 0;
  2207. bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
  2208. sgl->word2 = cpu_to_le32(sgl->word2);
  2209. sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
  2210. sgl++;
  2211. sgl->addr_hi = 0;
  2212. sgl->addr_lo = 0;
  2213. sgl->word2 = 0;
  2214. bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
  2215. sgl->word2 = cpu_to_le32(sgl->word2);
  2216. sgl->sge_len = 0;
  2217. sgl++;
  2218. atomic_inc(&tgtp->xmt_fcp_write);
  2219. break;
  2220. case NVMET_FCOP_RSP:
  2221. /* From the treceive template, initialize words 4 - 11 */
  2222. memcpy(&wqe->words[4],
  2223. &lpfc_trsp_cmd_template.words[4],
  2224. sizeof(uint32_t) * 8);
  2225. /* Words 0 - 2 */
  2226. physaddr = rsp->rspdma;
  2227. wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2228. wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
  2229. wqe->fcp_trsp.bde.addrLow =
  2230. cpu_to_le32(putPaddrLow(physaddr));
  2231. wqe->fcp_trsp.bde.addrHigh =
  2232. cpu_to_le32(putPaddrHigh(physaddr));
  2233. /* Word 3 */
  2234. wqe->fcp_trsp.response_len = rsp->rsplen;
  2235. /* Word 6 */
  2236. bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
  2237. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  2238. bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
  2239. nvmewqe->sli4_xritag);
  2240. /* Word 7 */
  2241. /* Word 8 */
  2242. wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
  2243. /* Word 9 */
  2244. bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
  2245. bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
  2246. /* Word 10 */
  2247. if (xc)
  2248. bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
  2249. /* Word 11 */
  2250. /* In template wqes=0 irsp=0 irsplen=0 - good response */
  2251. if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
  2252. /* Bad response - embed it */
  2253. bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
  2254. bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
  2255. bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
  2256. ((rsp->rsplen >> 2) - 1));
  2257. memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
  2258. }
  2259. do_pbde = 0;
  2260. /* Word 12 */
  2261. wqe->fcp_trsp.rsvd_12_15[0] = 0;
  2262. /* Use rspbuf, NOT sg list */
  2263. rsp->sg_cnt = 0;
  2264. sgl->word2 = 0;
  2265. atomic_inc(&tgtp->xmt_fcp_rsp);
  2266. break;
  2267. default:
  2268. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
  2269. "6064 Unknown Rsp Op %d\n",
  2270. rsp->op);
  2271. return NULL;
  2272. }
  2273. nvmewqe->retry = 1;
  2274. nvmewqe->vport = phba->pport;
  2275. nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
  2276. nvmewqe->context1 = ndlp;
  2277. for (i = 0; i < rsp->sg_cnt; i++) {
  2278. sgel = &rsp->sg[i];
  2279. physaddr = sg_dma_address(sgel);
  2280. cnt = sg_dma_len(sgel);
  2281. sgl->addr_hi = putPaddrHigh(physaddr);
  2282. sgl->addr_lo = putPaddrLow(physaddr);
  2283. sgl->word2 = 0;
  2284. bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
  2285. bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
  2286. if ((i+1) == rsp->sg_cnt)
  2287. bf_set(lpfc_sli4_sge_last, sgl, 1);
  2288. sgl->word2 = cpu_to_le32(sgl->word2);
  2289. sgl->sge_len = cpu_to_le32(cnt);
  2290. if (i == 0) {
  2291. bde = (struct ulp_bde64 *)&wqe->words[13];
  2292. if (do_pbde) {
  2293. /* Words 13-15 (PBDE) */
  2294. bde->addrLow = sgl->addr_lo;
  2295. bde->addrHigh = sgl->addr_hi;
  2296. bde->tus.f.bdeSize =
  2297. le32_to_cpu(sgl->sge_len);
  2298. bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2299. bde->tus.w = cpu_to_le32(bde->tus.w);
  2300. } else {
  2301. memset(bde, 0, sizeof(struct ulp_bde64));
  2302. }
  2303. }
  2304. sgl++;
  2305. ctxp->offset += cnt;
  2306. }
  2307. ctxp->state = LPFC_NVMET_STE_DATA;
  2308. ctxp->entry_cnt++;
  2309. return nvmewqe;
  2310. }
  2311. /**
  2312. * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
  2313. * @phba: Pointer to HBA context object.
  2314. * @cmdwqe: Pointer to driver command WQE object.
  2315. * @wcqe: Pointer to driver response CQE object.
  2316. *
  2317. * The function is called from SLI ring event handler with no
  2318. * lock held. This function is the completion handler for NVME ABTS for FCP cmds
  2319. * The function frees memory resources used for the NVME commands.
  2320. **/
  2321. static void
  2322. lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
  2323. struct lpfc_wcqe_complete *wcqe)
  2324. {
  2325. struct lpfc_nvmet_rcv_ctx *ctxp;
  2326. struct lpfc_nvmet_tgtport *tgtp;
  2327. uint32_t status, result;
  2328. unsigned long flags;
  2329. bool released = false;
  2330. ctxp = cmdwqe->context2;
  2331. status = bf_get(lpfc_wcqe_c_status, wcqe);
  2332. result = wcqe->parameter;
  2333. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2334. if (ctxp->flag & LPFC_NVMET_ABORT_OP)
  2335. atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
  2336. ctxp->state = LPFC_NVMET_STE_DONE;
  2337. /* Check if we already received a free context call
  2338. * and we have completed processing an abort situation.
  2339. */
  2340. spin_lock_irqsave(&ctxp->ctxlock, flags);
  2341. if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
  2342. !(ctxp->flag & LPFC_NVMET_XBUSY)) {
  2343. list_del(&ctxp->list);
  2344. released = true;
  2345. }
  2346. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2347. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  2348. atomic_inc(&tgtp->xmt_abort_rsp);
  2349. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2350. "6165 ABORT cmpl: xri x%x flg x%x (%d) "
  2351. "WCQE: %08x %08x %08x %08x\n",
  2352. ctxp->oxid, ctxp->flag, released,
  2353. wcqe->word0, wcqe->total_data_placed,
  2354. result, wcqe->word3);
  2355. cmdwqe->context2 = NULL;
  2356. cmdwqe->context3 = NULL;
  2357. /*
  2358. * if transport has released ctx, then can reuse it. Otherwise,
  2359. * will be recycled by transport release call.
  2360. */
  2361. if (released)
  2362. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  2363. /* This is the iocbq for the abort, not the command */
  2364. lpfc_sli_release_iocbq(phba, cmdwqe);
  2365. /* Since iaab/iaar are NOT set, there is no work left.
  2366. * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
  2367. * should have been called already.
  2368. */
  2369. }
  2370. /**
  2371. * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
  2372. * @phba: Pointer to HBA context object.
  2373. * @cmdwqe: Pointer to driver command WQE object.
  2374. * @wcqe: Pointer to driver response CQE object.
  2375. *
  2376. * The function is called from SLI ring event handler with no
  2377. * lock held. This function is the completion handler for NVME ABTS for FCP cmds
  2378. * The function frees memory resources used for the NVME commands.
  2379. **/
  2380. static void
  2381. lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
  2382. struct lpfc_wcqe_complete *wcqe)
  2383. {
  2384. struct lpfc_nvmet_rcv_ctx *ctxp;
  2385. struct lpfc_nvmet_tgtport *tgtp;
  2386. unsigned long flags;
  2387. uint32_t status, result;
  2388. bool released = false;
  2389. ctxp = cmdwqe->context2;
  2390. status = bf_get(lpfc_wcqe_c_status, wcqe);
  2391. result = wcqe->parameter;
  2392. if (!ctxp) {
  2393. /* if context is clear, related io alrady complete */
  2394. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2395. "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
  2396. wcqe->word0, wcqe->total_data_placed,
  2397. result, wcqe->word3);
  2398. return;
  2399. }
  2400. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2401. if (ctxp->flag & LPFC_NVMET_ABORT_OP)
  2402. atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
  2403. /* Sanity check */
  2404. if (ctxp->state != LPFC_NVMET_STE_ABORT) {
  2405. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2406. "6112 ABTS Wrong state:%d oxid x%x\n",
  2407. ctxp->state, ctxp->oxid);
  2408. }
  2409. /* Check if we already received a free context call
  2410. * and we have completed processing an abort situation.
  2411. */
  2412. ctxp->state = LPFC_NVMET_STE_DONE;
  2413. spin_lock_irqsave(&ctxp->ctxlock, flags);
  2414. if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
  2415. !(ctxp->flag & LPFC_NVMET_XBUSY)) {
  2416. list_del(&ctxp->list);
  2417. released = true;
  2418. }
  2419. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2420. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  2421. atomic_inc(&tgtp->xmt_abort_rsp);
  2422. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2423. "6316 ABTS cmpl xri x%x flg x%x (%x) "
  2424. "WCQE: %08x %08x %08x %08x\n",
  2425. ctxp->oxid, ctxp->flag, released,
  2426. wcqe->word0, wcqe->total_data_placed,
  2427. result, wcqe->word3);
  2428. cmdwqe->context2 = NULL;
  2429. cmdwqe->context3 = NULL;
  2430. /*
  2431. * if transport has released ctx, then can reuse it. Otherwise,
  2432. * will be recycled by transport release call.
  2433. */
  2434. if (released)
  2435. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  2436. /* Since iaab/iaar are NOT set, there is no work left.
  2437. * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
  2438. * should have been called already.
  2439. */
  2440. }
  2441. /**
  2442. * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
  2443. * @phba: Pointer to HBA context object.
  2444. * @cmdwqe: Pointer to driver command WQE object.
  2445. * @wcqe: Pointer to driver response CQE object.
  2446. *
  2447. * The function is called from SLI ring event handler with no
  2448. * lock held. This function is the completion handler for NVME ABTS for LS cmds
  2449. * The function frees memory resources used for the NVME commands.
  2450. **/
  2451. static void
  2452. lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
  2453. struct lpfc_wcqe_complete *wcqe)
  2454. {
  2455. struct lpfc_nvmet_rcv_ctx *ctxp;
  2456. struct lpfc_nvmet_tgtport *tgtp;
  2457. uint32_t status, result;
  2458. ctxp = cmdwqe->context2;
  2459. status = bf_get(lpfc_wcqe_c_status, wcqe);
  2460. result = wcqe->parameter;
  2461. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2462. atomic_inc(&tgtp->xmt_ls_abort_cmpl);
  2463. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2464. "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
  2465. ctxp, wcqe->word0, wcqe->total_data_placed,
  2466. result, wcqe->word3);
  2467. if (!ctxp) {
  2468. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2469. "6415 NVMET LS Abort No ctx: WCQE: "
  2470. "%08x %08x %08x %08x\n",
  2471. wcqe->word0, wcqe->total_data_placed,
  2472. result, wcqe->word3);
  2473. lpfc_sli_release_iocbq(phba, cmdwqe);
  2474. return;
  2475. }
  2476. if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
  2477. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2478. "6416 NVMET LS abort cmpl state mismatch: "
  2479. "oxid x%x: %d %d\n",
  2480. ctxp->oxid, ctxp->state, ctxp->entry_cnt);
  2481. }
  2482. cmdwqe->context2 = NULL;
  2483. cmdwqe->context3 = NULL;
  2484. lpfc_sli_release_iocbq(phba, cmdwqe);
  2485. kfree(ctxp);
  2486. }
  2487. static int
  2488. lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
  2489. struct lpfc_nvmet_rcv_ctx *ctxp,
  2490. uint32_t sid, uint16_t xri)
  2491. {
  2492. struct lpfc_nvmet_tgtport *tgtp;
  2493. struct lpfc_iocbq *abts_wqeq;
  2494. union lpfc_wqe128 *wqe_abts;
  2495. struct lpfc_nodelist *ndlp;
  2496. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2497. "6067 ABTS: sid %x xri x%x/x%x\n",
  2498. sid, xri, ctxp->wqeq->sli4_xritag);
  2499. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2500. ndlp = lpfc_findnode_did(phba->pport, sid);
  2501. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  2502. ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  2503. (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  2504. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2505. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2506. "6134 Drop ABTS - wrong NDLP state x%x.\n",
  2507. (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
  2508. /* No failure to an ABTS request. */
  2509. return 0;
  2510. }
  2511. abts_wqeq = ctxp->wqeq;
  2512. wqe_abts = &abts_wqeq->wqe;
  2513. /*
  2514. * Since we zero the whole WQE, we need to ensure we set the WQE fields
  2515. * that were initialized in lpfc_sli4_nvmet_alloc.
  2516. */
  2517. memset(wqe_abts, 0, sizeof(union lpfc_wqe));
  2518. /* Word 5 */
  2519. bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
  2520. bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
  2521. bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
  2522. bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
  2523. bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
  2524. /* Word 6 */
  2525. bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
  2526. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  2527. bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
  2528. abts_wqeq->sli4_xritag);
  2529. /* Word 7 */
  2530. bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
  2531. CMD_XMIT_SEQUENCE64_WQE);
  2532. bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
  2533. bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
  2534. bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
  2535. /* Word 8 */
  2536. wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
  2537. /* Word 9 */
  2538. bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
  2539. /* Needs to be set by caller */
  2540. bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
  2541. /* Word 10 */
  2542. bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
  2543. bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
  2544. LPFC_WQE_LENLOC_WORD12);
  2545. bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
  2546. bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
  2547. /* Word 11 */
  2548. bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
  2549. LPFC_WQE_CQ_ID_DEFAULT);
  2550. bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
  2551. OTHER_COMMAND);
  2552. abts_wqeq->vport = phba->pport;
  2553. abts_wqeq->context1 = ndlp;
  2554. abts_wqeq->context2 = ctxp;
  2555. abts_wqeq->context3 = NULL;
  2556. abts_wqeq->rsvd2 = 0;
  2557. /* hba_wqidx should already be setup from command we are aborting */
  2558. abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
  2559. abts_wqeq->iocb.ulpLe = 1;
  2560. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2561. "6069 Issue ABTS to xri x%x reqtag x%x\n",
  2562. xri, abts_wqeq->iotag);
  2563. return 1;
  2564. }
  2565. static int
  2566. lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
  2567. struct lpfc_nvmet_rcv_ctx *ctxp,
  2568. uint32_t sid, uint16_t xri)
  2569. {
  2570. struct lpfc_nvmet_tgtport *tgtp;
  2571. struct lpfc_iocbq *abts_wqeq;
  2572. union lpfc_wqe128 *abts_wqe;
  2573. struct lpfc_nodelist *ndlp;
  2574. unsigned long flags;
  2575. int rc;
  2576. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2577. if (!ctxp->wqeq) {
  2578. ctxp->wqeq = ctxp->ctxbuf->iocbq;
  2579. ctxp->wqeq->hba_wqidx = 0;
  2580. }
  2581. ndlp = lpfc_findnode_did(phba->pport, sid);
  2582. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  2583. ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  2584. (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  2585. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2586. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2587. "6160 Drop ABORT - wrong NDLP state x%x.\n",
  2588. (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
  2589. /* No failure to an ABTS request. */
  2590. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2591. return 0;
  2592. }
  2593. /* Issue ABTS for this WQE based on iotag */
  2594. ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
  2595. if (!ctxp->abort_wqeq) {
  2596. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2597. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2598. "6161 ABORT failed: No wqeqs: "
  2599. "xri: x%x\n", ctxp->oxid);
  2600. /* No failure to an ABTS request. */
  2601. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2602. return 0;
  2603. }
  2604. abts_wqeq = ctxp->abort_wqeq;
  2605. abts_wqe = &abts_wqeq->wqe;
  2606. ctxp->state = LPFC_NVMET_STE_ABORT;
  2607. /* Announce entry to new IO submit field. */
  2608. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2609. "6162 ABORT Request to rport DID x%06x "
  2610. "for xri x%x x%x\n",
  2611. ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
  2612. /* If the hba is getting reset, this flag is set. It is
  2613. * cleared when the reset is complete and rings reestablished.
  2614. */
  2615. spin_lock_irqsave(&phba->hbalock, flags);
  2616. /* driver queued commands are in process of being flushed */
  2617. if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
  2618. spin_unlock_irqrestore(&phba->hbalock, flags);
  2619. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2620. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  2621. "6163 Driver in reset cleanup - flushing "
  2622. "NVME Req now. hba_flag x%x oxid x%x\n",
  2623. phba->hba_flag, ctxp->oxid);
  2624. lpfc_sli_release_iocbq(phba, abts_wqeq);
  2625. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2626. return 0;
  2627. }
  2628. /* Outstanding abort is in progress */
  2629. if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
  2630. spin_unlock_irqrestore(&phba->hbalock, flags);
  2631. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2632. lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
  2633. "6164 Outstanding NVME I/O Abort Request "
  2634. "still pending on oxid x%x\n",
  2635. ctxp->oxid);
  2636. lpfc_sli_release_iocbq(phba, abts_wqeq);
  2637. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2638. return 0;
  2639. }
  2640. /* Ready - mark outstanding as aborted by driver. */
  2641. abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
  2642. /* WQEs are reused. Clear stale data and set key fields to
  2643. * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
  2644. */
  2645. memset(abts_wqe, 0, sizeof(union lpfc_wqe));
  2646. /* word 3 */
  2647. bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
  2648. /* word 7 */
  2649. bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
  2650. bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
  2651. /* word 8 - tell the FW to abort the IO associated with this
  2652. * outstanding exchange ID.
  2653. */
  2654. abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
  2655. /* word 9 - this is the iotag for the abts_wqe completion. */
  2656. bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
  2657. abts_wqeq->iotag);
  2658. /* word 10 */
  2659. bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
  2660. bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
  2661. /* word 11 */
  2662. bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
  2663. bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
  2664. bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  2665. /* ABTS WQE must go to the same WQ as the WQE to be aborted */
  2666. abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
  2667. abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
  2668. abts_wqeq->iocb_cmpl = 0;
  2669. abts_wqeq->iocb_flag |= LPFC_IO_NVME;
  2670. abts_wqeq->context2 = ctxp;
  2671. abts_wqeq->vport = phba->pport;
  2672. rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
  2673. spin_unlock_irqrestore(&phba->hbalock, flags);
  2674. if (rc == WQE_SUCCESS) {
  2675. atomic_inc(&tgtp->xmt_abort_sol);
  2676. return 0;
  2677. }
  2678. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2679. ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
  2680. lpfc_sli_release_iocbq(phba, abts_wqeq);
  2681. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2682. "6166 Failed ABORT issue_wqe with status x%x "
  2683. "for oxid x%x.\n",
  2684. rc, ctxp->oxid);
  2685. return 1;
  2686. }
  2687. static int
  2688. lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
  2689. struct lpfc_nvmet_rcv_ctx *ctxp,
  2690. uint32_t sid, uint16_t xri)
  2691. {
  2692. struct lpfc_nvmet_tgtport *tgtp;
  2693. struct lpfc_iocbq *abts_wqeq;
  2694. unsigned long flags;
  2695. int rc;
  2696. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2697. if (!ctxp->wqeq) {
  2698. ctxp->wqeq = ctxp->ctxbuf->iocbq;
  2699. ctxp->wqeq->hba_wqidx = 0;
  2700. }
  2701. if (ctxp->state == LPFC_NVMET_STE_FREE) {
  2702. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2703. "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
  2704. ctxp->state, ctxp->entry_cnt, ctxp->oxid);
  2705. rc = WQE_BUSY;
  2706. goto aerr;
  2707. }
  2708. ctxp->state = LPFC_NVMET_STE_ABORT;
  2709. ctxp->entry_cnt++;
  2710. rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
  2711. if (rc == 0)
  2712. goto aerr;
  2713. spin_lock_irqsave(&phba->hbalock, flags);
  2714. abts_wqeq = ctxp->wqeq;
  2715. abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
  2716. abts_wqeq->iocb_cmpl = NULL;
  2717. abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
  2718. rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
  2719. spin_unlock_irqrestore(&phba->hbalock, flags);
  2720. if (rc == WQE_SUCCESS) {
  2721. return 0;
  2722. }
  2723. aerr:
  2724. spin_lock_irqsave(&ctxp->ctxlock, flags);
  2725. if (ctxp->flag & LPFC_NVMET_CTX_RLS)
  2726. list_del(&ctxp->list);
  2727. ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
  2728. spin_unlock_irqrestore(&ctxp->ctxlock, flags);
  2729. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2730. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2731. "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
  2732. ctxp->oxid, rc);
  2733. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  2734. return 1;
  2735. }
  2736. static int
  2737. lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
  2738. struct lpfc_nvmet_rcv_ctx *ctxp,
  2739. uint32_t sid, uint16_t xri)
  2740. {
  2741. struct lpfc_nvmet_tgtport *tgtp;
  2742. struct lpfc_iocbq *abts_wqeq;
  2743. union lpfc_wqe128 *wqe_abts;
  2744. unsigned long flags;
  2745. int rc;
  2746. if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
  2747. (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
  2748. ctxp->state = LPFC_NVMET_STE_LS_ABORT;
  2749. ctxp->entry_cnt++;
  2750. } else {
  2751. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
  2752. "6418 NVMET LS abort state mismatch "
  2753. "IO x%x: %d %d\n",
  2754. ctxp->oxid, ctxp->state, ctxp->entry_cnt);
  2755. ctxp->state = LPFC_NVMET_STE_LS_ABORT;
  2756. }
  2757. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  2758. if (!ctxp->wqeq) {
  2759. /* Issue ABTS for this WQE based on iotag */
  2760. ctxp->wqeq = lpfc_sli_get_iocbq(phba);
  2761. if (!ctxp->wqeq) {
  2762. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2763. "6068 Abort failed: No wqeqs: "
  2764. "xri: x%x\n", xri);
  2765. /* No failure to an ABTS request. */
  2766. kfree(ctxp);
  2767. return 0;
  2768. }
  2769. }
  2770. abts_wqeq = ctxp->wqeq;
  2771. wqe_abts = &abts_wqeq->wqe;
  2772. if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
  2773. rc = WQE_BUSY;
  2774. goto out;
  2775. }
  2776. spin_lock_irqsave(&phba->hbalock, flags);
  2777. abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
  2778. abts_wqeq->iocb_cmpl = 0;
  2779. abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
  2780. rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
  2781. spin_unlock_irqrestore(&phba->hbalock, flags);
  2782. if (rc == WQE_SUCCESS) {
  2783. atomic_inc(&tgtp->xmt_abort_unsol);
  2784. return 0;
  2785. }
  2786. out:
  2787. atomic_inc(&tgtp->xmt_abort_rsp_error);
  2788. abts_wqeq->context2 = NULL;
  2789. abts_wqeq->context3 = NULL;
  2790. lpfc_sli_release_iocbq(phba, abts_wqeq);
  2791. kfree(ctxp);
  2792. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
  2793. "6056 Failed to Issue ABTS. Status x%x\n", rc);
  2794. return 0;
  2795. }