main.c 106 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * CXL Flash Device Driver
  4. *
  5. * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
  6. * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  7. *
  8. * Copyright (C) 2015 IBM Corporation
  9. */
  10. #include <linux/delay.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/pci.h>
  14. #include <linux/unaligned.h>
  15. #include <scsi/scsi_cmnd.h>
  16. #include <scsi/scsi_host.h>
  17. #include <uapi/scsi/cxlflash_ioctl.h>
  18. #include "main.h"
  19. #include "sislite.h"
  20. #include "common.h"
  21. MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  22. MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  23. MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  24. MODULE_LICENSE("GPL");
  25. static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
  26. static const struct class cxlflash_class = {
  27. .name = "cxlflash",
  28. .devnode = cxlflash_devnode,
  29. };
  30. static u32 cxlflash_major;
  31. static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  32. /**
  33. * process_cmd_err() - command error handler
  34. * @cmd: AFU command that experienced the error.
  35. * @scp: SCSI command associated with the AFU command in error.
  36. *
  37. * Translates error bits from AFU command to SCSI command results.
  38. */
  39. static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  40. {
  41. struct afu *afu = cmd->parent;
  42. struct cxlflash_cfg *cfg = afu->parent;
  43. struct device *dev = &cfg->dev->dev;
  44. struct sisl_ioasa *ioasa;
  45. u32 resid;
  46. ioasa = &(cmd->sa);
  47. if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  48. resid = ioasa->resid;
  49. scsi_set_resid(scp, resid);
  50. dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  51. __func__, cmd, scp, resid);
  52. }
  53. if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  54. dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  55. __func__, cmd, scp);
  56. scp->result = (DID_ERROR << 16);
  57. }
  58. dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  59. "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  60. ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  61. ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  62. if (ioasa->rc.scsi_rc) {
  63. /* We have a SCSI status */
  64. if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  65. memcpy(scp->sense_buffer, ioasa->sense_data,
  66. SISL_SENSE_DATA_LEN);
  67. scp->result = ioasa->rc.scsi_rc;
  68. } else
  69. scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  70. }
  71. /*
  72. * We encountered an error. Set scp->result based on nature
  73. * of error.
  74. */
  75. if (ioasa->rc.fc_rc) {
  76. /* We have an FC status */
  77. switch (ioasa->rc.fc_rc) {
  78. case SISL_FC_RC_LINKDOWN:
  79. scp->result = (DID_REQUEUE << 16);
  80. break;
  81. case SISL_FC_RC_RESID:
  82. /* This indicates an FCP resid underrun */
  83. if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
  84. /* If the SISL_RC_FLAGS_OVERRUN flag was set,
  85. * then we will handle this error else where.
  86. * If not then we must handle it here.
  87. * This is probably an AFU bug.
  88. */
  89. scp->result = (DID_ERROR << 16);
  90. }
  91. break;
  92. case SISL_FC_RC_RESIDERR:
  93. /* Resid mismatch between adapter and device */
  94. case SISL_FC_RC_TGTABORT:
  95. case SISL_FC_RC_ABORTOK:
  96. case SISL_FC_RC_ABORTFAIL:
  97. case SISL_FC_RC_NOLOGI:
  98. case SISL_FC_RC_ABORTPEND:
  99. case SISL_FC_RC_WRABORTPEND:
  100. case SISL_FC_RC_NOEXP:
  101. case SISL_FC_RC_INUSE:
  102. scp->result = (DID_ERROR << 16);
  103. break;
  104. }
  105. }
  106. if (ioasa->rc.afu_rc) {
  107. /* We have an AFU error */
  108. switch (ioasa->rc.afu_rc) {
  109. case SISL_AFU_RC_NO_CHANNELS:
  110. scp->result = (DID_NO_CONNECT << 16);
  111. break;
  112. case SISL_AFU_RC_DATA_DMA_ERR:
  113. switch (ioasa->afu_extra) {
  114. case SISL_AFU_DMA_ERR_PAGE_IN:
  115. /* Retry */
  116. scp->result = (DID_IMM_RETRY << 16);
  117. break;
  118. case SISL_AFU_DMA_ERR_INVALID_EA:
  119. default:
  120. scp->result = (DID_ERROR << 16);
  121. }
  122. break;
  123. case SISL_AFU_RC_OUT_OF_DATA_BUFS:
  124. /* Retry */
  125. scp->result = (DID_ERROR << 16);
  126. break;
  127. default:
  128. scp->result = (DID_ERROR << 16);
  129. }
  130. }
  131. }
  132. /**
  133. * cmd_complete() - command completion handler
  134. * @cmd: AFU command that has completed.
  135. *
  136. * For SCSI commands this routine prepares and submits commands that have
  137. * either completed or timed out to the SCSI stack. For internal commands
  138. * (TMF or AFU), this routine simply notifies the originator that the
  139. * command has completed.
  140. */
  141. static void cmd_complete(struct afu_cmd *cmd)
  142. {
  143. struct scsi_cmnd *scp;
  144. ulong lock_flags;
  145. struct afu *afu = cmd->parent;
  146. struct cxlflash_cfg *cfg = afu->parent;
  147. struct device *dev = &cfg->dev->dev;
  148. struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
  149. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  150. list_del(&cmd->list);
  151. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  152. if (cmd->scp) {
  153. scp = cmd->scp;
  154. if (unlikely(cmd->sa.ioasc))
  155. process_cmd_err(cmd, scp);
  156. else
  157. scp->result = (DID_OK << 16);
  158. dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
  159. __func__, scp, scp->result, cmd->sa.ioasc);
  160. scsi_done(scp);
  161. } else if (cmd->cmd_tmf) {
  162. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  163. cfg->tmf_active = false;
  164. wake_up_all_locked(&cfg->tmf_waitq);
  165. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  166. } else
  167. complete(&cmd->cevent);
  168. }
  169. /**
  170. * flush_pending_cmds() - flush all pending commands on this hardware queue
  171. * @hwq: Hardware queue to flush.
  172. *
  173. * The hardware send queue lock associated with this hardware queue must be
  174. * held when calling this routine.
  175. */
  176. static void flush_pending_cmds(struct hwq *hwq)
  177. {
  178. struct cxlflash_cfg *cfg = hwq->afu->parent;
  179. struct afu_cmd *cmd, *tmp;
  180. struct scsi_cmnd *scp;
  181. ulong lock_flags;
  182. list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
  183. /* Bypass command when on a doneq, cmd_complete() will handle */
  184. if (!list_empty(&cmd->queue))
  185. continue;
  186. list_del(&cmd->list);
  187. if (cmd->scp) {
  188. scp = cmd->scp;
  189. scp->result = (DID_IMM_RETRY << 16);
  190. scsi_done(scp);
  191. } else {
  192. cmd->cmd_aborted = true;
  193. if (cmd->cmd_tmf) {
  194. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  195. cfg->tmf_active = false;
  196. wake_up_all_locked(&cfg->tmf_waitq);
  197. spin_unlock_irqrestore(&cfg->tmf_slock,
  198. lock_flags);
  199. } else
  200. complete(&cmd->cevent);
  201. }
  202. }
  203. }
  204. /**
  205. * context_reset() - reset context via specified register
  206. * @hwq: Hardware queue owning the context to be reset.
  207. * @reset_reg: MMIO register to perform reset.
  208. *
  209. * When the reset is successful, the SISLite specification guarantees that
  210. * the AFU has aborted all currently pending I/O. Accordingly, these commands
  211. * must be flushed.
  212. *
  213. * Return: 0 on success, -errno on failure
  214. */
  215. static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
  216. {
  217. struct cxlflash_cfg *cfg = hwq->afu->parent;
  218. struct device *dev = &cfg->dev->dev;
  219. int rc = -ETIMEDOUT;
  220. int nretry = 0;
  221. u64 val = 0x1;
  222. ulong lock_flags;
  223. dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
  224. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  225. writeq_be(val, reset_reg);
  226. do {
  227. val = readq_be(reset_reg);
  228. if ((val & 0x1) == 0x0) {
  229. rc = 0;
  230. break;
  231. }
  232. /* Double delay each time */
  233. udelay(1 << nretry);
  234. } while (nretry++ < MC_ROOM_RETRY_CNT);
  235. if (!rc)
  236. flush_pending_cmds(hwq);
  237. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  238. dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
  239. __func__, rc, val, nretry);
  240. return rc;
  241. }
  242. /**
  243. * context_reset_ioarrin() - reset context via IOARRIN register
  244. * @hwq: Hardware queue owning the context to be reset.
  245. *
  246. * Return: 0 on success, -errno on failure
  247. */
  248. static int context_reset_ioarrin(struct hwq *hwq)
  249. {
  250. return context_reset(hwq, &hwq->host_map->ioarrin);
  251. }
  252. /**
  253. * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
  254. * @hwq: Hardware queue owning the context to be reset.
  255. *
  256. * Return: 0 on success, -errno on failure
  257. */
  258. static int context_reset_sq(struct hwq *hwq)
  259. {
  260. return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
  261. }
  262. /**
  263. * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
  264. * @afu: AFU associated with the host.
  265. * @cmd: AFU command to send.
  266. *
  267. * Return:
  268. * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  269. */
  270. static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
  271. {
  272. struct cxlflash_cfg *cfg = afu->parent;
  273. struct device *dev = &cfg->dev->dev;
  274. struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
  275. int rc = 0;
  276. s64 room;
  277. ulong lock_flags;
  278. /*
  279. * To avoid the performance penalty of MMIO, spread the update of
  280. * 'room' over multiple commands.
  281. */
  282. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  283. if (--hwq->room < 0) {
  284. room = readq_be(&hwq->host_map->cmd_room);
  285. if (room <= 0) {
  286. dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
  287. "0x%02X, room=0x%016llX\n",
  288. __func__, cmd->rcb.cdb[0], room);
  289. hwq->room = 0;
  290. rc = SCSI_MLQUEUE_HOST_BUSY;
  291. goto out;
  292. }
  293. hwq->room = room - 1;
  294. }
  295. list_add(&cmd->list, &hwq->pending_cmds);
  296. writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
  297. out:
  298. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  299. dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
  300. __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
  301. return rc;
  302. }
  303. /**
  304. * send_cmd_sq() - sends an AFU command via SQ ring
  305. * @afu: AFU associated with the host.
  306. * @cmd: AFU command to send.
  307. *
  308. * Return:
  309. * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  310. */
  311. static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
  312. {
  313. struct cxlflash_cfg *cfg = afu->parent;
  314. struct device *dev = &cfg->dev->dev;
  315. struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
  316. int rc = 0;
  317. int newval;
  318. ulong lock_flags;
  319. newval = atomic_dec_if_positive(&hwq->hsq_credits);
  320. if (newval <= 0) {
  321. rc = SCSI_MLQUEUE_HOST_BUSY;
  322. goto out;
  323. }
  324. cmd->rcb.ioasa = &cmd->sa;
  325. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  326. *hwq->hsq_curr = cmd->rcb;
  327. if (hwq->hsq_curr < hwq->hsq_end)
  328. hwq->hsq_curr++;
  329. else
  330. hwq->hsq_curr = hwq->hsq_start;
  331. list_add(&cmd->list, &hwq->pending_cmds);
  332. writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
  333. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  334. out:
  335. dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
  336. "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
  337. cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
  338. readq_be(&hwq->host_map->sq_head),
  339. readq_be(&hwq->host_map->sq_tail));
  340. return rc;
  341. }
  342. /**
  343. * wait_resp() - polls for a response or timeout to a sent AFU command
  344. * @afu: AFU associated with the host.
  345. * @cmd: AFU command that was sent.
  346. *
  347. * Return: 0 on success, -errno on failure
  348. */
  349. static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
  350. {
  351. struct cxlflash_cfg *cfg = afu->parent;
  352. struct device *dev = &cfg->dev->dev;
  353. int rc = 0;
  354. ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
  355. timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
  356. if (!timeout)
  357. rc = -ETIMEDOUT;
  358. if (cmd->cmd_aborted)
  359. rc = -EAGAIN;
  360. if (unlikely(cmd->sa.ioasc != 0)) {
  361. dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
  362. __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
  363. rc = -EIO;
  364. }
  365. return rc;
  366. }
  367. /**
  368. * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
  369. * @host: SCSI host associated with device.
  370. * @scp: SCSI command to send.
  371. * @afu: SCSI command to send.
  372. *
  373. * Hashes a command based upon the hardware queue mode.
  374. *
  375. * Return: Trusted index of target hardware queue
  376. */
  377. static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
  378. struct afu *afu)
  379. {
  380. u32 tag;
  381. u32 hwq = 0;
  382. if (afu->num_hwqs == 1)
  383. return 0;
  384. switch (afu->hwq_mode) {
  385. case HWQ_MODE_RR:
  386. hwq = afu->hwq_rr_count++ % afu->num_hwqs;
  387. break;
  388. case HWQ_MODE_TAG:
  389. tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
  390. hwq = blk_mq_unique_tag_to_hwq(tag);
  391. break;
  392. case HWQ_MODE_CPU:
  393. hwq = smp_processor_id() % afu->num_hwqs;
  394. break;
  395. default:
  396. WARN_ON_ONCE(1);
  397. }
  398. return hwq;
  399. }
  400. /**
  401. * send_tmf() - sends a Task Management Function (TMF)
  402. * @cfg: Internal structure associated with the host.
  403. * @sdev: SCSI device destined for TMF.
  404. * @tmfcmd: TMF command to send.
  405. *
  406. * Return:
  407. * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
  408. */
  409. static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
  410. u64 tmfcmd)
  411. {
  412. struct afu *afu = cfg->afu;
  413. struct afu_cmd *cmd = NULL;
  414. struct device *dev = &cfg->dev->dev;
  415. struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
  416. bool needs_deletion = false;
  417. char *buf = NULL;
  418. ulong lock_flags;
  419. int rc = 0;
  420. ulong to;
  421. buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
  422. if (unlikely(!buf)) {
  423. dev_err(dev, "%s: no memory for command\n", __func__);
  424. rc = -ENOMEM;
  425. goto out;
  426. }
  427. cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
  428. INIT_LIST_HEAD(&cmd->queue);
  429. /* When Task Management Function is active do not send another */
  430. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  431. if (cfg->tmf_active)
  432. wait_event_interruptible_lock_irq(cfg->tmf_waitq,
  433. !cfg->tmf_active,
  434. cfg->tmf_slock);
  435. cfg->tmf_active = true;
  436. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  437. cmd->parent = afu;
  438. cmd->cmd_tmf = true;
  439. cmd->hwq_index = hwq->index;
  440. cmd->rcb.ctx_id = hwq->ctx_hndl;
  441. cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
  442. cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
  443. cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
  444. cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
  445. SISL_REQ_FLAGS_SUP_UNDERRUN |
  446. SISL_REQ_FLAGS_TMF_CMD);
  447. memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
  448. rc = afu->send_cmd(afu, cmd);
  449. if (unlikely(rc)) {
  450. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  451. cfg->tmf_active = false;
  452. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  453. goto out;
  454. }
  455. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  456. to = msecs_to_jiffies(5000);
  457. to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
  458. !cfg->tmf_active,
  459. cfg->tmf_slock,
  460. to);
  461. if (!to) {
  462. dev_err(dev, "%s: TMF timed out\n", __func__);
  463. rc = -ETIMEDOUT;
  464. needs_deletion = true;
  465. } else if (cmd->cmd_aborted) {
  466. dev_err(dev, "%s: TMF aborted\n", __func__);
  467. rc = -EAGAIN;
  468. } else if (cmd->sa.ioasc) {
  469. dev_err(dev, "%s: TMF failed ioasc=%08x\n",
  470. __func__, cmd->sa.ioasc);
  471. rc = -EIO;
  472. }
  473. cfg->tmf_active = false;
  474. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  475. if (needs_deletion) {
  476. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  477. list_del(&cmd->list);
  478. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  479. }
  480. out:
  481. kfree(buf);
  482. return rc;
  483. }
  484. /**
  485. * cxlflash_driver_info() - information handler for this host driver
  486. * @host: SCSI host associated with device.
  487. *
  488. * Return: A string describing the device.
  489. */
  490. static const char *cxlflash_driver_info(struct Scsi_Host *host)
  491. {
  492. return CXLFLASH_ADAPTER_NAME;
  493. }
  494. /**
  495. * cxlflash_queuecommand() - sends a mid-layer request
  496. * @host: SCSI host associated with device.
  497. * @scp: SCSI command to send.
  498. *
  499. * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  500. */
  501. static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
  502. {
  503. struct cxlflash_cfg *cfg = shost_priv(host);
  504. struct afu *afu = cfg->afu;
  505. struct device *dev = &cfg->dev->dev;
  506. struct afu_cmd *cmd = sc_to_afuci(scp);
  507. struct scatterlist *sg = scsi_sglist(scp);
  508. int hwq_index = cmd_to_target_hwq(host, scp, afu);
  509. struct hwq *hwq = get_hwq(afu, hwq_index);
  510. u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
  511. ulong lock_flags;
  512. int rc = 0;
  513. dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
  514. "cdb=(%08x-%08x-%08x-%08x)\n",
  515. __func__, scp, host->host_no, scp->device->channel,
  516. scp->device->id, scp->device->lun,
  517. get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
  518. get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
  519. get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
  520. get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
  521. /*
  522. * If a Task Management Function is active, wait for it to complete
  523. * before continuing with regular commands.
  524. */
  525. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  526. if (cfg->tmf_active) {
  527. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  528. rc = SCSI_MLQUEUE_HOST_BUSY;
  529. goto out;
  530. }
  531. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  532. switch (cfg->state) {
  533. case STATE_PROBING:
  534. case STATE_PROBED:
  535. case STATE_RESET:
  536. dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
  537. rc = SCSI_MLQUEUE_HOST_BUSY;
  538. goto out;
  539. case STATE_FAILTERM:
  540. dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
  541. scp->result = (DID_NO_CONNECT << 16);
  542. scsi_done(scp);
  543. rc = 0;
  544. goto out;
  545. default:
  546. atomic_inc(&afu->cmds_active);
  547. break;
  548. }
  549. if (likely(sg)) {
  550. cmd->rcb.data_len = sg->length;
  551. cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
  552. }
  553. cmd->scp = scp;
  554. cmd->parent = afu;
  555. cmd->hwq_index = hwq_index;
  556. cmd->sa.ioasc = 0;
  557. cmd->rcb.ctx_id = hwq->ctx_hndl;
  558. cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
  559. cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
  560. cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
  561. if (scp->sc_data_direction == DMA_TO_DEVICE)
  562. req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
  563. cmd->rcb.req_flags = req_flags;
  564. memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
  565. rc = afu->send_cmd(afu, cmd);
  566. atomic_dec(&afu->cmds_active);
  567. out:
  568. return rc;
  569. }
  570. /**
  571. * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
  572. * @cfg: Internal structure associated with the host.
  573. */
  574. static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
  575. {
  576. struct pci_dev *pdev = cfg->dev;
  577. if (pci_channel_offline(pdev))
  578. wait_event_timeout(cfg->reset_waitq,
  579. !pci_channel_offline(pdev),
  580. CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
  581. }
  582. /**
  583. * free_mem() - free memory associated with the AFU
  584. * @cfg: Internal structure associated with the host.
  585. */
  586. static void free_mem(struct cxlflash_cfg *cfg)
  587. {
  588. struct afu *afu = cfg->afu;
  589. if (cfg->afu) {
  590. free_pages((ulong)afu, get_order(sizeof(struct afu)));
  591. cfg->afu = NULL;
  592. }
  593. }
  594. /**
  595. * cxlflash_reset_sync() - synchronizing point for asynchronous resets
  596. * @cfg: Internal structure associated with the host.
  597. */
  598. static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
  599. {
  600. if (cfg->async_reset_cookie == 0)
  601. return;
  602. /* Wait until all async calls prior to this cookie have completed */
  603. async_synchronize_cookie(cfg->async_reset_cookie + 1);
  604. cfg->async_reset_cookie = 0;
  605. }
  606. /**
  607. * stop_afu() - stops the AFU command timers and unmaps the MMIO space
  608. * @cfg: Internal structure associated with the host.
  609. *
  610. * Safe to call with AFU in a partially allocated/initialized state.
  611. *
  612. * Cancels scheduled worker threads, waits for any active internal AFU
  613. * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
  614. */
  615. static void stop_afu(struct cxlflash_cfg *cfg)
  616. {
  617. struct afu *afu = cfg->afu;
  618. struct hwq *hwq;
  619. int i;
  620. cancel_work_sync(&cfg->work_q);
  621. if (!current_is_async())
  622. cxlflash_reset_sync(cfg);
  623. if (likely(afu)) {
  624. while (atomic_read(&afu->cmds_active))
  625. ssleep(1);
  626. if (afu_is_irqpoll_enabled(afu)) {
  627. for (i = 0; i < afu->num_hwqs; i++) {
  628. hwq = get_hwq(afu, i);
  629. irq_poll_disable(&hwq->irqpoll);
  630. }
  631. }
  632. if (likely(afu->afu_map)) {
  633. cfg->ops->psa_unmap(afu->afu_map);
  634. afu->afu_map = NULL;
  635. }
  636. }
  637. }
  638. /**
  639. * term_intr() - disables all AFU interrupts
  640. * @cfg: Internal structure associated with the host.
  641. * @level: Depth of allocation, where to begin waterfall tear down.
  642. * @index: Index of the hardware queue.
  643. *
  644. * Safe to call with AFU/MC in partially allocated/initialized state.
  645. */
  646. static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
  647. u32 index)
  648. {
  649. struct afu *afu = cfg->afu;
  650. struct device *dev = &cfg->dev->dev;
  651. struct hwq *hwq;
  652. if (!afu) {
  653. dev_err(dev, "%s: returning with NULL afu\n", __func__);
  654. return;
  655. }
  656. hwq = get_hwq(afu, index);
  657. if (!hwq->ctx_cookie) {
  658. dev_err(dev, "%s: returning with NULL MC\n", __func__);
  659. return;
  660. }
  661. switch (level) {
  662. case UNMAP_THREE:
  663. /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
  664. if (index == PRIMARY_HWQ)
  665. cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
  666. fallthrough;
  667. case UNMAP_TWO:
  668. cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
  669. fallthrough;
  670. case UNMAP_ONE:
  671. cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
  672. fallthrough;
  673. case FREE_IRQ:
  674. cfg->ops->free_afu_irqs(hwq->ctx_cookie);
  675. fallthrough;
  676. case UNDO_NOOP:
  677. /* No action required */
  678. break;
  679. }
  680. }
  681. /**
  682. * term_mc() - terminates the master context
  683. * @cfg: Internal structure associated with the host.
  684. * @index: Index of the hardware queue.
  685. *
  686. * Safe to call with AFU/MC in partially allocated/initialized state.
  687. */
  688. static void term_mc(struct cxlflash_cfg *cfg, u32 index)
  689. {
  690. struct afu *afu = cfg->afu;
  691. struct device *dev = &cfg->dev->dev;
  692. struct hwq *hwq;
  693. ulong lock_flags;
  694. if (!afu) {
  695. dev_err(dev, "%s: returning with NULL afu\n", __func__);
  696. return;
  697. }
  698. hwq = get_hwq(afu, index);
  699. if (!hwq->ctx_cookie) {
  700. dev_err(dev, "%s: returning with NULL MC\n", __func__);
  701. return;
  702. }
  703. WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
  704. if (index != PRIMARY_HWQ)
  705. WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
  706. hwq->ctx_cookie = NULL;
  707. spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
  708. hwq->hrrq_online = false;
  709. spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
  710. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  711. flush_pending_cmds(hwq);
  712. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  713. }
  714. /**
  715. * term_afu() - terminates the AFU
  716. * @cfg: Internal structure associated with the host.
  717. *
  718. * Safe to call with AFU/MC in partially allocated/initialized state.
  719. */
  720. static void term_afu(struct cxlflash_cfg *cfg)
  721. {
  722. struct device *dev = &cfg->dev->dev;
  723. int k;
  724. /*
  725. * Tear down is carefully orchestrated to ensure
  726. * no interrupts can come in when the problem state
  727. * area is unmapped.
  728. *
  729. * 1) Disable all AFU interrupts for each master
  730. * 2) Unmap the problem state area
  731. * 3) Stop each master context
  732. */
  733. for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
  734. term_intr(cfg, UNMAP_THREE, k);
  735. stop_afu(cfg);
  736. for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
  737. term_mc(cfg, k);
  738. dev_dbg(dev, "%s: returning\n", __func__);
  739. }
  740. /**
  741. * notify_shutdown() - notifies device of pending shutdown
  742. * @cfg: Internal structure associated with the host.
  743. * @wait: Whether to wait for shutdown processing to complete.
  744. *
  745. * This function will notify the AFU that the adapter is being shutdown
  746. * and will wait for shutdown processing to complete if wait is true.
  747. * This notification should flush pending I/Os to the device and halt
  748. * further I/Os until the next AFU reset is issued and device restarted.
  749. */
  750. static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
  751. {
  752. struct afu *afu = cfg->afu;
  753. struct device *dev = &cfg->dev->dev;
  754. struct dev_dependent_vals *ddv;
  755. __be64 __iomem *fc_port_regs;
  756. u64 reg, status;
  757. int i, retry_cnt = 0;
  758. ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
  759. if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
  760. return;
  761. if (!afu || !afu->afu_map) {
  762. dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
  763. return;
  764. }
  765. /* Notify AFU */
  766. for (i = 0; i < cfg->num_fc_ports; i++) {
  767. fc_port_regs = get_fc_port_regs(cfg, i);
  768. reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
  769. reg |= SISL_FC_SHUTDOWN_NORMAL;
  770. writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
  771. }
  772. if (!wait)
  773. return;
  774. /* Wait up to 1.5 seconds for shutdown processing to complete */
  775. for (i = 0; i < cfg->num_fc_ports; i++) {
  776. fc_port_regs = get_fc_port_regs(cfg, i);
  777. retry_cnt = 0;
  778. while (true) {
  779. status = readq_be(&fc_port_regs[FC_STATUS / 8]);
  780. if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
  781. break;
  782. if (++retry_cnt >= MC_RETRY_CNT) {
  783. dev_dbg(dev, "%s: port %d shutdown processing "
  784. "not yet completed\n", __func__, i);
  785. break;
  786. }
  787. msleep(100 * retry_cnt);
  788. }
  789. }
  790. }
  791. /**
  792. * cxlflash_get_minor() - gets the first available minor number
  793. *
  794. * Return: Unique minor number that can be used to create the character device.
  795. */
  796. static int cxlflash_get_minor(void)
  797. {
  798. int minor;
  799. long bit;
  800. bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  801. if (bit >= CXLFLASH_MAX_ADAPTERS)
  802. return -1;
  803. minor = bit & MINORMASK;
  804. set_bit(minor, cxlflash_minor);
  805. return minor;
  806. }
  807. /**
  808. * cxlflash_put_minor() - releases the minor number
  809. * @minor: Minor number that is no longer needed.
  810. */
  811. static void cxlflash_put_minor(int minor)
  812. {
  813. clear_bit(minor, cxlflash_minor);
  814. }
  815. /**
  816. * cxlflash_release_chrdev() - release the character device for the host
  817. * @cfg: Internal structure associated with the host.
  818. */
  819. static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
  820. {
  821. device_unregister(cfg->chardev);
  822. cfg->chardev = NULL;
  823. cdev_del(&cfg->cdev);
  824. cxlflash_put_minor(MINOR(cfg->cdev.dev));
  825. }
  826. /**
  827. * cxlflash_remove() - PCI entry point to tear down host
  828. * @pdev: PCI device associated with the host.
  829. *
  830. * Safe to use as a cleanup in partially allocated/initialized state. Note that
  831. * the reset_waitq is flushed as part of the stop/termination of user contexts.
  832. */
  833. static void cxlflash_remove(struct pci_dev *pdev)
  834. {
  835. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  836. struct device *dev = &pdev->dev;
  837. ulong lock_flags;
  838. if (!pci_is_enabled(pdev)) {
  839. dev_dbg(dev, "%s: Device is disabled\n", __func__);
  840. return;
  841. }
  842. /* Yield to running recovery threads before continuing with remove */
  843. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
  844. cfg->state != STATE_PROBING);
  845. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  846. if (cfg->tmf_active)
  847. wait_event_interruptible_lock_irq(cfg->tmf_waitq,
  848. !cfg->tmf_active,
  849. cfg->tmf_slock);
  850. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  851. /* Notify AFU and wait for shutdown processing to complete */
  852. notify_shutdown(cfg, true);
  853. cfg->state = STATE_FAILTERM;
  854. cxlflash_stop_term_user_contexts(cfg);
  855. switch (cfg->init_state) {
  856. case INIT_STATE_CDEV:
  857. cxlflash_release_chrdev(cfg);
  858. fallthrough;
  859. case INIT_STATE_SCSI:
  860. cxlflash_term_local_luns(cfg);
  861. scsi_remove_host(cfg->host);
  862. fallthrough;
  863. case INIT_STATE_AFU:
  864. term_afu(cfg);
  865. fallthrough;
  866. case INIT_STATE_PCI:
  867. cfg->ops->destroy_afu(cfg->afu_cookie);
  868. pci_disable_device(pdev);
  869. fallthrough;
  870. case INIT_STATE_NONE:
  871. free_mem(cfg);
  872. scsi_host_put(cfg->host);
  873. break;
  874. }
  875. dev_dbg(dev, "%s: returning\n", __func__);
  876. }
  877. /**
  878. * alloc_mem() - allocates the AFU and its command pool
  879. * @cfg: Internal structure associated with the host.
  880. *
  881. * A partially allocated state remains on failure.
  882. *
  883. * Return:
  884. * 0 on success
  885. * -ENOMEM on failure to allocate memory
  886. */
  887. static int alloc_mem(struct cxlflash_cfg *cfg)
  888. {
  889. int rc = 0;
  890. struct device *dev = &cfg->dev->dev;
  891. /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
  892. cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  893. get_order(sizeof(struct afu)));
  894. if (unlikely(!cfg->afu)) {
  895. dev_err(dev, "%s: cannot get %d free pages\n",
  896. __func__, get_order(sizeof(struct afu)));
  897. rc = -ENOMEM;
  898. goto out;
  899. }
  900. cfg->afu->parent = cfg;
  901. cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
  902. cfg->afu->afu_map = NULL;
  903. out:
  904. return rc;
  905. }
  906. /**
  907. * init_pci() - initializes the host as a PCI device
  908. * @cfg: Internal structure associated with the host.
  909. *
  910. * Return: 0 on success, -errno on failure
  911. */
  912. static int init_pci(struct cxlflash_cfg *cfg)
  913. {
  914. struct pci_dev *pdev = cfg->dev;
  915. struct device *dev = &cfg->dev->dev;
  916. int rc = 0;
  917. rc = pci_enable_device(pdev);
  918. if (rc || pci_channel_offline(pdev)) {
  919. if (pci_channel_offline(pdev)) {
  920. cxlflash_wait_for_pci_err_recovery(cfg);
  921. rc = pci_enable_device(pdev);
  922. }
  923. if (rc) {
  924. dev_err(dev, "%s: Cannot enable adapter\n", __func__);
  925. cxlflash_wait_for_pci_err_recovery(cfg);
  926. goto out;
  927. }
  928. }
  929. out:
  930. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  931. return rc;
  932. }
  933. /**
  934. * init_scsi() - adds the host to the SCSI stack and kicks off host scan
  935. * @cfg: Internal structure associated with the host.
  936. *
  937. * Return: 0 on success, -errno on failure
  938. */
  939. static int init_scsi(struct cxlflash_cfg *cfg)
  940. {
  941. struct pci_dev *pdev = cfg->dev;
  942. struct device *dev = &cfg->dev->dev;
  943. int rc = 0;
  944. rc = scsi_add_host(cfg->host, &pdev->dev);
  945. if (rc) {
  946. dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
  947. goto out;
  948. }
  949. scsi_scan_host(cfg->host);
  950. out:
  951. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  952. return rc;
  953. }
  954. /**
  955. * set_port_online() - transitions the specified host FC port to online state
  956. * @fc_regs: Top of MMIO region defined for specified port.
  957. *
  958. * The provided MMIO region must be mapped prior to call. Online state means
  959. * that the FC link layer has synced, completed the handshaking process, and
  960. * is ready for login to start.
  961. */
  962. static void set_port_online(__be64 __iomem *fc_regs)
  963. {
  964. u64 cmdcfg;
  965. cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
  966. cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
  967. cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
  968. writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
  969. }
  970. /**
  971. * set_port_offline() - transitions the specified host FC port to offline state
  972. * @fc_regs: Top of MMIO region defined for specified port.
  973. *
  974. * The provided MMIO region must be mapped prior to call.
  975. */
  976. static void set_port_offline(__be64 __iomem *fc_regs)
  977. {
  978. u64 cmdcfg;
  979. cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
  980. cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
  981. cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
  982. writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
  983. }
  984. /**
  985. * wait_port_online() - waits for the specified host FC port come online
  986. * @fc_regs: Top of MMIO region defined for specified port.
  987. * @delay_us: Number of microseconds to delay between reading port status.
  988. * @nretry: Number of cycles to retry reading port status.
  989. *
  990. * The provided MMIO region must be mapped prior to call. This will timeout
  991. * when the cable is not plugged in.
  992. *
  993. * Return:
  994. * TRUE (1) when the specified port is online
  995. * FALSE (0) when the specified port fails to come online after timeout
  996. */
  997. static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
  998. {
  999. u64 status;
  1000. WARN_ON(delay_us < 1000);
  1001. do {
  1002. msleep(delay_us / 1000);
  1003. status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
  1004. if (status == U64_MAX)
  1005. nretry /= 2;
  1006. } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
  1007. nretry--);
  1008. return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
  1009. }
  1010. /**
  1011. * wait_port_offline() - waits for the specified host FC port go offline
  1012. * @fc_regs: Top of MMIO region defined for specified port.
  1013. * @delay_us: Number of microseconds to delay between reading port status.
  1014. * @nretry: Number of cycles to retry reading port status.
  1015. *
  1016. * The provided MMIO region must be mapped prior to call.
  1017. *
  1018. * Return:
  1019. * TRUE (1) when the specified port is offline
  1020. * FALSE (0) when the specified port fails to go offline after timeout
  1021. */
  1022. static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
  1023. {
  1024. u64 status;
  1025. WARN_ON(delay_us < 1000);
  1026. do {
  1027. msleep(delay_us / 1000);
  1028. status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
  1029. if (status == U64_MAX)
  1030. nretry /= 2;
  1031. } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
  1032. nretry--);
  1033. return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
  1034. }
  1035. /**
  1036. * afu_set_wwpn() - configures the WWPN for the specified host FC port
  1037. * @afu: AFU associated with the host that owns the specified FC port.
  1038. * @port: Port number being configured.
  1039. * @fc_regs: Top of MMIO region defined for specified port.
  1040. * @wwpn: The world-wide-port-number previously discovered for port.
  1041. *
  1042. * The provided MMIO region must be mapped prior to call. As part of the
  1043. * sequence to configure the WWPN, the port is toggled offline and then back
  1044. * online. This toggling action can cause this routine to delay up to a few
  1045. * seconds. When configured to use the internal LUN feature of the AFU, a
  1046. * failure to come online is overridden.
  1047. */
  1048. static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
  1049. u64 wwpn)
  1050. {
  1051. struct cxlflash_cfg *cfg = afu->parent;
  1052. struct device *dev = &cfg->dev->dev;
  1053. set_port_offline(fc_regs);
  1054. if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  1055. FC_PORT_STATUS_RETRY_CNT)) {
  1056. dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
  1057. __func__, port);
  1058. }
  1059. writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
  1060. set_port_online(fc_regs);
  1061. if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  1062. FC_PORT_STATUS_RETRY_CNT)) {
  1063. dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
  1064. __func__, port);
  1065. }
  1066. }
  1067. /**
  1068. * afu_link_reset() - resets the specified host FC port
  1069. * @afu: AFU associated with the host that owns the specified FC port.
  1070. * @port: Port number being configured.
  1071. * @fc_regs: Top of MMIO region defined for specified port.
  1072. *
  1073. * The provided MMIO region must be mapped prior to call. The sequence to
  1074. * reset the port involves toggling it offline and then back online. This
  1075. * action can cause this routine to delay up to a few seconds. An effort
  1076. * is made to maintain link with the device by switching to host to use
  1077. * the alternate port exclusively while the reset takes place.
  1078. * failure to come online is overridden.
  1079. */
  1080. static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
  1081. {
  1082. struct cxlflash_cfg *cfg = afu->parent;
  1083. struct device *dev = &cfg->dev->dev;
  1084. u64 port_sel;
  1085. /* first switch the AFU to the other links, if any */
  1086. port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
  1087. port_sel &= ~(1ULL << port);
  1088. writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
  1089. cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
  1090. set_port_offline(fc_regs);
  1091. if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  1092. FC_PORT_STATUS_RETRY_CNT))
  1093. dev_err(dev, "%s: wait on port %d to go offline timed out\n",
  1094. __func__, port);
  1095. set_port_online(fc_regs);
  1096. if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  1097. FC_PORT_STATUS_RETRY_CNT))
  1098. dev_err(dev, "%s: wait on port %d to go online timed out\n",
  1099. __func__, port);
  1100. /* switch back to include this port */
  1101. port_sel |= (1ULL << port);
  1102. writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
  1103. cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
  1104. dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
  1105. }
  1106. /**
  1107. * afu_err_intr_init() - clears and initializes the AFU for error interrupts
  1108. * @afu: AFU associated with the host.
  1109. */
  1110. static void afu_err_intr_init(struct afu *afu)
  1111. {
  1112. struct cxlflash_cfg *cfg = afu->parent;
  1113. __be64 __iomem *fc_port_regs;
  1114. int i;
  1115. struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
  1116. u64 reg;
  1117. /* global async interrupts: AFU clears afu_ctrl on context exit
  1118. * if async interrupts were sent to that context. This prevents
  1119. * the AFU form sending further async interrupts when
  1120. * there is
  1121. * nobody to receive them.
  1122. */
  1123. /* mask all */
  1124. writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
  1125. /* set LISN# to send and point to primary master context */
  1126. reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
  1127. if (afu->internal_lun)
  1128. reg |= 1; /* Bit 63 indicates local lun */
  1129. writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
  1130. /* clear all */
  1131. writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
  1132. /* unmask bits that are of interest */
  1133. /* note: afu can send an interrupt after this step */
  1134. writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
  1135. /* clear again in case a bit came on after previous clear but before */
  1136. /* unmask */
  1137. writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
  1138. /* Clear/Set internal lun bits */
  1139. fc_port_regs = get_fc_port_regs(cfg, 0);
  1140. reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
  1141. reg &= SISL_FC_INTERNAL_MASK;
  1142. if (afu->internal_lun)
  1143. reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
  1144. writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
  1145. /* now clear FC errors */
  1146. for (i = 0; i < cfg->num_fc_ports; i++) {
  1147. fc_port_regs = get_fc_port_regs(cfg, i);
  1148. writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
  1149. writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
  1150. }
  1151. /* sync interrupts for master's IOARRIN write */
  1152. /* note that unlike asyncs, there can be no pending sync interrupts */
  1153. /* at this time (this is a fresh context and master has not written */
  1154. /* IOARRIN yet), so there is nothing to clear. */
  1155. /* set LISN#, it is always sent to the context that wrote IOARRIN */
  1156. for (i = 0; i < afu->num_hwqs; i++) {
  1157. hwq = get_hwq(afu, i);
  1158. reg = readq_be(&hwq->host_map->ctx_ctrl);
  1159. WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
  1160. reg |= SISL_MSI_SYNC_ERROR;
  1161. writeq_be(reg, &hwq->host_map->ctx_ctrl);
  1162. writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
  1163. }
  1164. }
  1165. /**
  1166. * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
  1167. * @irq: Interrupt number.
  1168. * @data: Private data provided at interrupt registration, the AFU.
  1169. *
  1170. * Return: Always return IRQ_HANDLED.
  1171. */
  1172. static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
  1173. {
  1174. struct hwq *hwq = (struct hwq *)data;
  1175. struct cxlflash_cfg *cfg = hwq->afu->parent;
  1176. struct device *dev = &cfg->dev->dev;
  1177. u64 reg;
  1178. u64 reg_unmasked;
  1179. reg = readq_be(&hwq->host_map->intr_status);
  1180. reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
  1181. if (reg_unmasked == 0UL) {
  1182. dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
  1183. __func__, reg);
  1184. goto cxlflash_sync_err_irq_exit;
  1185. }
  1186. dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
  1187. __func__, reg);
  1188. writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
  1189. cxlflash_sync_err_irq_exit:
  1190. return IRQ_HANDLED;
  1191. }
  1192. /**
  1193. * process_hrrq() - process the read-response queue
  1194. * @hwq: HWQ associated with the host.
  1195. * @doneq: Queue of commands harvested from the RRQ.
  1196. * @budget: Threshold of RRQ entries to process.
  1197. *
  1198. * This routine must be called holding the disabled RRQ spin lock.
  1199. *
  1200. * Return: The number of entries processed.
  1201. */
  1202. static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
  1203. {
  1204. struct afu *afu = hwq->afu;
  1205. struct afu_cmd *cmd;
  1206. struct sisl_ioasa *ioasa;
  1207. struct sisl_ioarcb *ioarcb;
  1208. bool toggle = hwq->toggle;
  1209. int num_hrrq = 0;
  1210. u64 entry,
  1211. *hrrq_start = hwq->hrrq_start,
  1212. *hrrq_end = hwq->hrrq_end,
  1213. *hrrq_curr = hwq->hrrq_curr;
  1214. /* Process ready RRQ entries up to the specified budget (if any) */
  1215. while (true) {
  1216. entry = *hrrq_curr;
  1217. if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
  1218. break;
  1219. entry &= ~SISL_RESP_HANDLE_T_BIT;
  1220. if (afu_is_sq_cmd_mode(afu)) {
  1221. ioasa = (struct sisl_ioasa *)entry;
  1222. cmd = container_of(ioasa, struct afu_cmd, sa);
  1223. } else {
  1224. ioarcb = (struct sisl_ioarcb *)entry;
  1225. cmd = container_of(ioarcb, struct afu_cmd, rcb);
  1226. }
  1227. list_add_tail(&cmd->queue, doneq);
  1228. /* Advance to next entry or wrap and flip the toggle bit */
  1229. if (hrrq_curr < hrrq_end)
  1230. hrrq_curr++;
  1231. else {
  1232. hrrq_curr = hrrq_start;
  1233. toggle ^= SISL_RESP_HANDLE_T_BIT;
  1234. }
  1235. atomic_inc(&hwq->hsq_credits);
  1236. num_hrrq++;
  1237. if (budget > 0 && num_hrrq >= budget)
  1238. break;
  1239. }
  1240. hwq->hrrq_curr = hrrq_curr;
  1241. hwq->toggle = toggle;
  1242. return num_hrrq;
  1243. }
  1244. /**
  1245. * process_cmd_doneq() - process a queue of harvested RRQ commands
  1246. * @doneq: Queue of completed commands.
  1247. *
  1248. * Note that upon return the queue can no longer be trusted.
  1249. */
  1250. static void process_cmd_doneq(struct list_head *doneq)
  1251. {
  1252. struct afu_cmd *cmd, *tmp;
  1253. WARN_ON(list_empty(doneq));
  1254. list_for_each_entry_safe(cmd, tmp, doneq, queue)
  1255. cmd_complete(cmd);
  1256. }
  1257. /**
  1258. * cxlflash_irqpoll() - process a queue of harvested RRQ commands
  1259. * @irqpoll: IRQ poll structure associated with queue to poll.
  1260. * @budget: Threshold of RRQ entries to process per poll.
  1261. *
  1262. * Return: The number of entries processed.
  1263. */
  1264. static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
  1265. {
  1266. struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
  1267. unsigned long hrrq_flags;
  1268. LIST_HEAD(doneq);
  1269. int num_entries = 0;
  1270. spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
  1271. num_entries = process_hrrq(hwq, &doneq, budget);
  1272. if (num_entries < budget)
  1273. irq_poll_complete(irqpoll);
  1274. spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
  1275. process_cmd_doneq(&doneq);
  1276. return num_entries;
  1277. }
  1278. /**
  1279. * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
  1280. * @irq: Interrupt number.
  1281. * @data: Private data provided at interrupt registration, the AFU.
  1282. *
  1283. * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
  1284. */
  1285. static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
  1286. {
  1287. struct hwq *hwq = (struct hwq *)data;
  1288. struct afu *afu = hwq->afu;
  1289. unsigned long hrrq_flags;
  1290. LIST_HEAD(doneq);
  1291. int num_entries = 0;
  1292. spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
  1293. /* Silently drop spurious interrupts when queue is not online */
  1294. if (!hwq->hrrq_online) {
  1295. spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
  1296. return IRQ_HANDLED;
  1297. }
  1298. if (afu_is_irqpoll_enabled(afu)) {
  1299. irq_poll_sched(&hwq->irqpoll);
  1300. spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
  1301. return IRQ_HANDLED;
  1302. }
  1303. num_entries = process_hrrq(hwq, &doneq, -1);
  1304. spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
  1305. if (num_entries == 0)
  1306. return IRQ_NONE;
  1307. process_cmd_doneq(&doneq);
  1308. return IRQ_HANDLED;
  1309. }
  1310. /*
  1311. * Asynchronous interrupt information table
  1312. *
  1313. * NOTE:
  1314. * - Order matters here as this array is indexed by bit position.
  1315. *
  1316. * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
  1317. * as complex and complains due to a lack of parentheses/braces.
  1318. */
  1319. #define ASTATUS_FC(_a, _b, _c, _d) \
  1320. { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
  1321. #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
  1322. ASTATUS_FC(_a, LINK_UP, "link up", 0), \
  1323. ASTATUS_FC(_a, LINK_DN, "link down", 0), \
  1324. ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
  1325. ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
  1326. ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
  1327. ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
  1328. ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
  1329. ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
  1330. static const struct asyc_intr_info ainfo[] = {
  1331. BUILD_SISL_ASTATUS_FC_PORT(1),
  1332. BUILD_SISL_ASTATUS_FC_PORT(0),
  1333. BUILD_SISL_ASTATUS_FC_PORT(3),
  1334. BUILD_SISL_ASTATUS_FC_PORT(2)
  1335. };
  1336. /**
  1337. * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
  1338. * @irq: Interrupt number.
  1339. * @data: Private data provided at interrupt registration, the AFU.
  1340. *
  1341. * Return: Always return IRQ_HANDLED.
  1342. */
  1343. static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
  1344. {
  1345. struct hwq *hwq = (struct hwq *)data;
  1346. struct afu *afu = hwq->afu;
  1347. struct cxlflash_cfg *cfg = afu->parent;
  1348. struct device *dev = &cfg->dev->dev;
  1349. const struct asyc_intr_info *info;
  1350. struct sisl_global_map __iomem *global = &afu->afu_map->global;
  1351. __be64 __iomem *fc_port_regs;
  1352. u64 reg_unmasked;
  1353. u64 reg;
  1354. u64 bit;
  1355. u8 port;
  1356. reg = readq_be(&global->regs.aintr_status);
  1357. reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
  1358. if (unlikely(reg_unmasked == 0)) {
  1359. dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
  1360. __func__, reg);
  1361. goto out;
  1362. }
  1363. /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
  1364. writeq_be(reg_unmasked, &global->regs.aintr_clear);
  1365. /* Check each bit that is on */
  1366. for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
  1367. if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
  1368. WARN_ON_ONCE(1);
  1369. continue;
  1370. }
  1371. info = &ainfo[bit];
  1372. if (unlikely(info->status != 1ULL << bit)) {
  1373. WARN_ON_ONCE(1);
  1374. continue;
  1375. }
  1376. port = info->port;
  1377. fc_port_regs = get_fc_port_regs(cfg, port);
  1378. dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
  1379. __func__, port, info->desc,
  1380. readq_be(&fc_port_regs[FC_STATUS / 8]));
  1381. /*
  1382. * Do link reset first, some OTHER errors will set FC_ERROR
  1383. * again if cleared before or w/o a reset
  1384. */
  1385. if (info->action & LINK_RESET) {
  1386. dev_err(dev, "%s: FC Port %d: resetting link\n",
  1387. __func__, port);
  1388. cfg->lr_state = LINK_RESET_REQUIRED;
  1389. cfg->lr_port = port;
  1390. schedule_work(&cfg->work_q);
  1391. }
  1392. if (info->action & CLR_FC_ERROR) {
  1393. reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
  1394. /*
  1395. * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
  1396. * should be the same and tracing one is sufficient.
  1397. */
  1398. dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
  1399. __func__, port, reg);
  1400. writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
  1401. writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
  1402. }
  1403. if (info->action & SCAN_HOST) {
  1404. atomic_inc(&cfg->scan_host_needed);
  1405. schedule_work(&cfg->work_q);
  1406. }
  1407. }
  1408. out:
  1409. return IRQ_HANDLED;
  1410. }
  1411. /**
  1412. * read_vpd() - obtains the WWPNs from VPD
  1413. * @cfg: Internal structure associated with the host.
  1414. * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
  1415. *
  1416. * Return: 0 on success, -errno on failure
  1417. */
  1418. static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
  1419. {
  1420. struct device *dev = &cfg->dev->dev;
  1421. struct pci_dev *pdev = cfg->dev;
  1422. int i, k, rc = 0;
  1423. unsigned int kw_size;
  1424. ssize_t vpd_size;
  1425. char vpd_data[CXLFLASH_VPD_LEN];
  1426. char tmp_buf[WWPN_BUF_LEN] = { 0 };
  1427. const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
  1428. cfg->dev_id->driver_data;
  1429. const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
  1430. const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
  1431. /* Get the VPD data from the device */
  1432. vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
  1433. if (unlikely(vpd_size <= 0)) {
  1434. dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
  1435. __func__, vpd_size);
  1436. rc = -ENODEV;
  1437. goto out;
  1438. }
  1439. /*
  1440. * Find the offset of the WWPN tag within the read only
  1441. * VPD data and validate the found field (partials are
  1442. * no good to us). Convert the ASCII data to an integer
  1443. * value. Note that we must copy to a temporary buffer
  1444. * because the conversion service requires that the ASCII
  1445. * string be terminated.
  1446. *
  1447. * Allow for WWPN not being found for all devices, setting
  1448. * the returned WWPN to zero when not found. Notify with a
  1449. * log error for cards that should have had WWPN keywords
  1450. * in the VPD - cards requiring WWPN will not have their
  1451. * ports programmed and operate in an undefined state.
  1452. */
  1453. for (k = 0; k < cfg->num_fc_ports; k++) {
  1454. i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
  1455. wwpn_vpd_tags[k], &kw_size);
  1456. if (i == -ENOENT) {
  1457. if (wwpn_vpd_required)
  1458. dev_err(dev, "%s: Port %d WWPN not found\n",
  1459. __func__, k);
  1460. wwpn[k] = 0ULL;
  1461. continue;
  1462. }
  1463. if (i < 0 || kw_size != WWPN_LEN) {
  1464. dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
  1465. __func__, k);
  1466. rc = -ENODEV;
  1467. goto out;
  1468. }
  1469. memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
  1470. rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
  1471. if (unlikely(rc)) {
  1472. dev_err(dev, "%s: WWPN conversion failed for port %d\n",
  1473. __func__, k);
  1474. rc = -ENODEV;
  1475. goto out;
  1476. }
  1477. dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
  1478. }
  1479. out:
  1480. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1481. return rc;
  1482. }
  1483. /**
  1484. * init_pcr() - initialize the provisioning and control registers
  1485. * @cfg: Internal structure associated with the host.
  1486. *
  1487. * Also sets up fast access to the mapped registers and initializes AFU
  1488. * command fields that never change.
  1489. */
  1490. static void init_pcr(struct cxlflash_cfg *cfg)
  1491. {
  1492. struct afu *afu = cfg->afu;
  1493. struct sisl_ctrl_map __iomem *ctrl_map;
  1494. struct hwq *hwq;
  1495. void *cookie;
  1496. int i;
  1497. for (i = 0; i < MAX_CONTEXT; i++) {
  1498. ctrl_map = &afu->afu_map->ctrls[i].ctrl;
  1499. /* Disrupt any clients that could be running */
  1500. /* e.g. clients that survived a master restart */
  1501. writeq_be(0, &ctrl_map->rht_start);
  1502. writeq_be(0, &ctrl_map->rht_cnt_id);
  1503. writeq_be(0, &ctrl_map->ctx_cap);
  1504. }
  1505. /* Copy frequently used fields into hwq */
  1506. for (i = 0; i < afu->num_hwqs; i++) {
  1507. hwq = get_hwq(afu, i);
  1508. cookie = hwq->ctx_cookie;
  1509. hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
  1510. hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
  1511. hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
  1512. /* Program the Endian Control for the master context */
  1513. writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
  1514. }
  1515. }
  1516. /**
  1517. * init_global() - initialize AFU global registers
  1518. * @cfg: Internal structure associated with the host.
  1519. */
  1520. static int init_global(struct cxlflash_cfg *cfg)
  1521. {
  1522. struct afu *afu = cfg->afu;
  1523. struct device *dev = &cfg->dev->dev;
  1524. struct hwq *hwq;
  1525. struct sisl_host_map __iomem *hmap;
  1526. __be64 __iomem *fc_port_regs;
  1527. u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
  1528. int i = 0, num_ports = 0;
  1529. int rc = 0;
  1530. int j;
  1531. void *ctx;
  1532. u64 reg;
  1533. rc = read_vpd(cfg, &wwpn[0]);
  1534. if (rc) {
  1535. dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
  1536. goto out;
  1537. }
  1538. /* Set up RRQ and SQ in HWQ for master issued cmds */
  1539. for (i = 0; i < afu->num_hwqs; i++) {
  1540. hwq = get_hwq(afu, i);
  1541. hmap = hwq->host_map;
  1542. writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
  1543. writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
  1544. hwq->hrrq_online = true;
  1545. if (afu_is_sq_cmd_mode(afu)) {
  1546. writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
  1547. writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
  1548. }
  1549. }
  1550. /* AFU configuration */
  1551. reg = readq_be(&afu->afu_map->global.regs.afu_config);
  1552. reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
  1553. /* enable all auto retry options and control endianness */
  1554. /* leave others at default: */
  1555. /* CTX_CAP write protected, mbox_r does not clear on read and */
  1556. /* checker on if dual afu */
  1557. writeq_be(reg, &afu->afu_map->global.regs.afu_config);
  1558. /* Global port select: select either port */
  1559. if (afu->internal_lun) {
  1560. /* Only use port 0 */
  1561. writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
  1562. num_ports = 0;
  1563. } else {
  1564. writeq_be(PORT_MASK(cfg->num_fc_ports),
  1565. &afu->afu_map->global.regs.afu_port_sel);
  1566. num_ports = cfg->num_fc_ports;
  1567. }
  1568. for (i = 0; i < num_ports; i++) {
  1569. fc_port_regs = get_fc_port_regs(cfg, i);
  1570. /* Unmask all errors (but they are still masked at AFU) */
  1571. writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
  1572. /* Clear CRC error cnt & set a threshold */
  1573. (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
  1574. writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
  1575. /* Set WWPNs. If already programmed, wwpn[i] is 0 */
  1576. if (wwpn[i] != 0)
  1577. afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
  1578. /* Programming WWPN back to back causes additional
  1579. * offline/online transitions and a PLOGI
  1580. */
  1581. msleep(100);
  1582. }
  1583. if (afu_is_ocxl_lisn(afu)) {
  1584. /* Set up the LISN effective address for each master */
  1585. for (i = 0; i < afu->num_hwqs; i++) {
  1586. hwq = get_hwq(afu, i);
  1587. ctx = hwq->ctx_cookie;
  1588. for (j = 0; j < hwq->num_irqs; j++) {
  1589. reg = cfg->ops->get_irq_objhndl(ctx, j);
  1590. writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
  1591. }
  1592. reg = hwq->ctx_hndl;
  1593. writeq_be(SISL_LISN_PASID(reg, reg),
  1594. &hwq->ctrl_map->lisn_pasid[0]);
  1595. writeq_be(SISL_LISN_PASID(0UL, reg),
  1596. &hwq->ctrl_map->lisn_pasid[1]);
  1597. }
  1598. }
  1599. /* Set up master's own CTX_CAP to allow real mode, host translation */
  1600. /* tables, afu cmds and read/write GSCSI cmds. */
  1601. /* First, unlock ctx_cap write by reading mbox */
  1602. for (i = 0; i < afu->num_hwqs; i++) {
  1603. hwq = get_hwq(afu, i);
  1604. (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
  1605. writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
  1606. SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
  1607. SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
  1608. &hwq->ctrl_map->ctx_cap);
  1609. }
  1610. /*
  1611. * Determine write-same unmap support for host by evaluating the unmap
  1612. * sector support bit of the context control register associated with
  1613. * the primary hardware queue. Note that while this status is reflected
  1614. * in a context register, the outcome can be assumed to be host-wide.
  1615. */
  1616. hwq = get_hwq(afu, PRIMARY_HWQ);
  1617. reg = readq_be(&hwq->host_map->ctx_ctrl);
  1618. if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
  1619. cfg->ws_unmap = true;
  1620. /* Initialize heartbeat */
  1621. afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
  1622. out:
  1623. return rc;
  1624. }
  1625. /**
  1626. * start_afu() - initializes and starts the AFU
  1627. * @cfg: Internal structure associated with the host.
  1628. */
  1629. static int start_afu(struct cxlflash_cfg *cfg)
  1630. {
  1631. struct afu *afu = cfg->afu;
  1632. struct device *dev = &cfg->dev->dev;
  1633. struct hwq *hwq;
  1634. int rc = 0;
  1635. int i;
  1636. init_pcr(cfg);
  1637. /* Initialize each HWQ */
  1638. for (i = 0; i < afu->num_hwqs; i++) {
  1639. hwq = get_hwq(afu, i);
  1640. /* After an AFU reset, RRQ entries are stale, clear them */
  1641. memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
  1642. /* Initialize RRQ pointers */
  1643. hwq->hrrq_start = &hwq->rrq_entry[0];
  1644. hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
  1645. hwq->hrrq_curr = hwq->hrrq_start;
  1646. hwq->toggle = 1;
  1647. /* Initialize spin locks */
  1648. spin_lock_init(&hwq->hrrq_slock);
  1649. spin_lock_init(&hwq->hsq_slock);
  1650. /* Initialize SQ */
  1651. if (afu_is_sq_cmd_mode(afu)) {
  1652. memset(&hwq->sq, 0, sizeof(hwq->sq));
  1653. hwq->hsq_start = &hwq->sq[0];
  1654. hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
  1655. hwq->hsq_curr = hwq->hsq_start;
  1656. atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
  1657. }
  1658. /* Initialize IRQ poll */
  1659. if (afu_is_irqpoll_enabled(afu))
  1660. irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
  1661. cxlflash_irqpoll);
  1662. }
  1663. rc = init_global(cfg);
  1664. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1665. return rc;
  1666. }
  1667. /**
  1668. * init_intr() - setup interrupt handlers for the master context
  1669. * @cfg: Internal structure associated with the host.
  1670. * @hwq: Hardware queue to initialize.
  1671. *
  1672. * Return: 0 on success, -errno on failure
  1673. */
  1674. static enum undo_level init_intr(struct cxlflash_cfg *cfg,
  1675. struct hwq *hwq)
  1676. {
  1677. struct device *dev = &cfg->dev->dev;
  1678. void *ctx = hwq->ctx_cookie;
  1679. int rc = 0;
  1680. enum undo_level level = UNDO_NOOP;
  1681. bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
  1682. int num_irqs = hwq->num_irqs;
  1683. rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
  1684. if (unlikely(rc)) {
  1685. dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
  1686. __func__, rc);
  1687. level = UNDO_NOOP;
  1688. goto out;
  1689. }
  1690. rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
  1691. "SISL_MSI_SYNC_ERROR");
  1692. if (unlikely(rc <= 0)) {
  1693. dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
  1694. level = FREE_IRQ;
  1695. goto out;
  1696. }
  1697. rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
  1698. "SISL_MSI_RRQ_UPDATED");
  1699. if (unlikely(rc <= 0)) {
  1700. dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
  1701. level = UNMAP_ONE;
  1702. goto out;
  1703. }
  1704. /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
  1705. if (!is_primary_hwq)
  1706. goto out;
  1707. rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
  1708. "SISL_MSI_ASYNC_ERROR");
  1709. if (unlikely(rc <= 0)) {
  1710. dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
  1711. level = UNMAP_TWO;
  1712. goto out;
  1713. }
  1714. out:
  1715. return level;
  1716. }
  1717. /**
  1718. * init_mc() - create and register as the master context
  1719. * @cfg: Internal structure associated with the host.
  1720. * @index: HWQ Index of the master context.
  1721. *
  1722. * Return: 0 on success, -errno on failure
  1723. */
  1724. static int init_mc(struct cxlflash_cfg *cfg, u32 index)
  1725. {
  1726. void *ctx;
  1727. struct device *dev = &cfg->dev->dev;
  1728. struct hwq *hwq = get_hwq(cfg->afu, index);
  1729. int rc = 0;
  1730. int num_irqs;
  1731. enum undo_level level;
  1732. hwq->afu = cfg->afu;
  1733. hwq->index = index;
  1734. INIT_LIST_HEAD(&hwq->pending_cmds);
  1735. if (index == PRIMARY_HWQ) {
  1736. ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
  1737. num_irqs = 3;
  1738. } else {
  1739. ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
  1740. num_irqs = 2;
  1741. }
  1742. if (IS_ERR_OR_NULL(ctx)) {
  1743. rc = -ENOMEM;
  1744. goto err1;
  1745. }
  1746. WARN_ON(hwq->ctx_cookie);
  1747. hwq->ctx_cookie = ctx;
  1748. hwq->num_irqs = num_irqs;
  1749. /* Set it up as a master with the CXL */
  1750. cfg->ops->set_master(ctx);
  1751. /* Reset AFU when initializing primary context */
  1752. if (index == PRIMARY_HWQ) {
  1753. rc = cfg->ops->afu_reset(ctx);
  1754. if (unlikely(rc)) {
  1755. dev_err(dev, "%s: AFU reset failed rc=%d\n",
  1756. __func__, rc);
  1757. goto err1;
  1758. }
  1759. }
  1760. level = init_intr(cfg, hwq);
  1761. if (unlikely(level)) {
  1762. dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
  1763. goto err2;
  1764. }
  1765. /* Finally, activate the context by starting it */
  1766. rc = cfg->ops->start_context(hwq->ctx_cookie);
  1767. if (unlikely(rc)) {
  1768. dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
  1769. level = UNMAP_THREE;
  1770. goto err2;
  1771. }
  1772. out:
  1773. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1774. return rc;
  1775. err2:
  1776. term_intr(cfg, level, index);
  1777. if (index != PRIMARY_HWQ)
  1778. cfg->ops->release_context(ctx);
  1779. err1:
  1780. hwq->ctx_cookie = NULL;
  1781. goto out;
  1782. }
  1783. /**
  1784. * get_num_afu_ports() - determines and configures the number of AFU ports
  1785. * @cfg: Internal structure associated with the host.
  1786. *
  1787. * This routine determines the number of AFU ports by converting the global
  1788. * port selection mask. The converted value is only valid following an AFU
  1789. * reset (explicit or power-on). This routine must be invoked shortly after
  1790. * mapping as other routines are dependent on the number of ports during the
  1791. * initialization sequence.
  1792. *
  1793. * To support legacy AFUs that might not have reflected an initial global
  1794. * port mask (value read is 0), default to the number of ports originally
  1795. * supported by the cxlflash driver (2) before hardware with other port
  1796. * offerings was introduced.
  1797. */
  1798. static void get_num_afu_ports(struct cxlflash_cfg *cfg)
  1799. {
  1800. struct afu *afu = cfg->afu;
  1801. struct device *dev = &cfg->dev->dev;
  1802. u64 port_mask;
  1803. int num_fc_ports = LEGACY_FC_PORTS;
  1804. port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
  1805. if (port_mask != 0ULL)
  1806. num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
  1807. dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
  1808. __func__, port_mask, num_fc_ports);
  1809. cfg->num_fc_ports = num_fc_ports;
  1810. cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
  1811. }
  1812. /**
  1813. * init_afu() - setup as master context and start AFU
  1814. * @cfg: Internal structure associated with the host.
  1815. *
  1816. * This routine is a higher level of control for configuring the
  1817. * AFU on probe and reset paths.
  1818. *
  1819. * Return: 0 on success, -errno on failure
  1820. */
  1821. static int init_afu(struct cxlflash_cfg *cfg)
  1822. {
  1823. u64 reg;
  1824. int rc = 0;
  1825. struct afu *afu = cfg->afu;
  1826. struct device *dev = &cfg->dev->dev;
  1827. struct hwq *hwq;
  1828. int i;
  1829. cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
  1830. mutex_init(&afu->sync_active);
  1831. afu->num_hwqs = afu->desired_hwqs;
  1832. for (i = 0; i < afu->num_hwqs; i++) {
  1833. rc = init_mc(cfg, i);
  1834. if (rc) {
  1835. dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
  1836. __func__, rc, i);
  1837. goto err1;
  1838. }
  1839. }
  1840. /* Map the entire MMIO space of the AFU using the first context */
  1841. hwq = get_hwq(afu, PRIMARY_HWQ);
  1842. afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
  1843. if (!afu->afu_map) {
  1844. dev_err(dev, "%s: psa_map failed\n", __func__);
  1845. rc = -ENOMEM;
  1846. goto err1;
  1847. }
  1848. /* No byte reverse on reading afu_version or string will be backwards */
  1849. reg = readq(&afu->afu_map->global.regs.afu_version);
  1850. memcpy(afu->version, &reg, sizeof(reg));
  1851. afu->interface_version =
  1852. readq_be(&afu->afu_map->global.regs.interface_version);
  1853. if ((afu->interface_version + 1) == 0) {
  1854. dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
  1855. "interface version %016llx\n", afu->version,
  1856. afu->interface_version);
  1857. rc = -EINVAL;
  1858. goto err1;
  1859. }
  1860. if (afu_is_sq_cmd_mode(afu)) {
  1861. afu->send_cmd = send_cmd_sq;
  1862. afu->context_reset = context_reset_sq;
  1863. } else {
  1864. afu->send_cmd = send_cmd_ioarrin;
  1865. afu->context_reset = context_reset_ioarrin;
  1866. }
  1867. dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
  1868. afu->version, afu->interface_version);
  1869. get_num_afu_ports(cfg);
  1870. rc = start_afu(cfg);
  1871. if (rc) {
  1872. dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
  1873. goto err1;
  1874. }
  1875. afu_err_intr_init(cfg->afu);
  1876. for (i = 0; i < afu->num_hwqs; i++) {
  1877. hwq = get_hwq(afu, i);
  1878. hwq->room = readq_be(&hwq->host_map->cmd_room);
  1879. }
  1880. /* Restore the LUN mappings */
  1881. cxlflash_restore_luntable(cfg);
  1882. out:
  1883. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1884. return rc;
  1885. err1:
  1886. for (i = afu->num_hwqs - 1; i >= 0; i--) {
  1887. term_intr(cfg, UNMAP_THREE, i);
  1888. term_mc(cfg, i);
  1889. }
  1890. goto out;
  1891. }
  1892. /**
  1893. * afu_reset() - resets the AFU
  1894. * @cfg: Internal structure associated with the host.
  1895. *
  1896. * Return: 0 on success, -errno on failure
  1897. */
  1898. static int afu_reset(struct cxlflash_cfg *cfg)
  1899. {
  1900. struct device *dev = &cfg->dev->dev;
  1901. int rc = 0;
  1902. /* Stop the context before the reset. Since the context is
  1903. * no longer available restart it after the reset is complete
  1904. */
  1905. term_afu(cfg);
  1906. rc = init_afu(cfg);
  1907. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1908. return rc;
  1909. }
  1910. /**
  1911. * drain_ioctls() - wait until all currently executing ioctls have completed
  1912. * @cfg: Internal structure associated with the host.
  1913. *
  1914. * Obtain write access to read/write semaphore that wraps ioctl
  1915. * handling to 'drain' ioctls currently executing.
  1916. */
  1917. static void drain_ioctls(struct cxlflash_cfg *cfg)
  1918. {
  1919. down_write(&cfg->ioctl_rwsem);
  1920. up_write(&cfg->ioctl_rwsem);
  1921. }
  1922. /**
  1923. * cxlflash_async_reset_host() - asynchronous host reset handler
  1924. * @data: Private data provided while scheduling reset.
  1925. * @cookie: Cookie that can be used for checkpointing.
  1926. */
  1927. static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
  1928. {
  1929. struct cxlflash_cfg *cfg = data;
  1930. struct device *dev = &cfg->dev->dev;
  1931. int rc = 0;
  1932. if (cfg->state != STATE_RESET) {
  1933. dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
  1934. __func__, cfg->state);
  1935. goto out;
  1936. }
  1937. drain_ioctls(cfg);
  1938. cxlflash_mark_contexts_error(cfg);
  1939. rc = afu_reset(cfg);
  1940. if (rc)
  1941. cfg->state = STATE_FAILTERM;
  1942. else
  1943. cfg->state = STATE_NORMAL;
  1944. wake_up_all(&cfg->reset_waitq);
  1945. out:
  1946. scsi_unblock_requests(cfg->host);
  1947. }
  1948. /**
  1949. * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
  1950. * @cfg: Internal structure associated with the host.
  1951. */
  1952. static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
  1953. {
  1954. struct device *dev = &cfg->dev->dev;
  1955. if (cfg->state != STATE_NORMAL) {
  1956. dev_dbg(dev, "%s: Not performing reset state=%d\n",
  1957. __func__, cfg->state);
  1958. return;
  1959. }
  1960. cfg->state = STATE_RESET;
  1961. scsi_block_requests(cfg->host);
  1962. cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
  1963. cfg);
  1964. }
  1965. /**
  1966. * send_afu_cmd() - builds and sends an internal AFU command
  1967. * @afu: AFU associated with the host.
  1968. * @rcb: Pre-populated IOARCB describing command to send.
  1969. *
  1970. * The AFU can only take one internal AFU command at a time. This limitation is
  1971. * enforced by using a mutex to provide exclusive access to the AFU during the
  1972. * operation. This design point requires calling threads to not be on interrupt
  1973. * context due to the possibility of sleeping during concurrent AFU operations.
  1974. *
  1975. * The command status is optionally passed back to the caller when the caller
  1976. * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
  1977. *
  1978. * Return:
  1979. * 0 on success, -errno on failure
  1980. */
  1981. static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
  1982. {
  1983. struct cxlflash_cfg *cfg = afu->parent;
  1984. struct device *dev = &cfg->dev->dev;
  1985. struct afu_cmd *cmd = NULL;
  1986. struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
  1987. ulong lock_flags;
  1988. char *buf = NULL;
  1989. int rc = 0;
  1990. int nretry = 0;
  1991. if (cfg->state != STATE_NORMAL) {
  1992. dev_dbg(dev, "%s: Sync not required state=%u\n",
  1993. __func__, cfg->state);
  1994. return 0;
  1995. }
  1996. mutex_lock(&afu->sync_active);
  1997. atomic_inc(&afu->cmds_active);
  1998. buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
  1999. if (unlikely(!buf)) {
  2000. dev_err(dev, "%s: no memory for command\n", __func__);
  2001. rc = -ENOMEM;
  2002. goto out;
  2003. }
  2004. cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
  2005. retry:
  2006. memset(cmd, 0, sizeof(*cmd));
  2007. memcpy(&cmd->rcb, rcb, sizeof(*rcb));
  2008. INIT_LIST_HEAD(&cmd->queue);
  2009. init_completion(&cmd->cevent);
  2010. cmd->parent = afu;
  2011. cmd->hwq_index = hwq->index;
  2012. cmd->rcb.ctx_id = hwq->ctx_hndl;
  2013. dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
  2014. __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
  2015. rc = afu->send_cmd(afu, cmd);
  2016. if (unlikely(rc)) {
  2017. rc = -ENOBUFS;
  2018. goto out;
  2019. }
  2020. rc = wait_resp(afu, cmd);
  2021. switch (rc) {
  2022. case -ETIMEDOUT:
  2023. rc = afu->context_reset(hwq);
  2024. if (rc) {
  2025. /* Delete the command from pending_cmds list */
  2026. spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
  2027. list_del(&cmd->list);
  2028. spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
  2029. cxlflash_schedule_async_reset(cfg);
  2030. break;
  2031. }
  2032. fallthrough; /* to retry */
  2033. case -EAGAIN:
  2034. if (++nretry < 2)
  2035. goto retry;
  2036. fallthrough; /* to exit */
  2037. default:
  2038. break;
  2039. }
  2040. if (rcb->ioasa)
  2041. *rcb->ioasa = cmd->sa;
  2042. out:
  2043. atomic_dec(&afu->cmds_active);
  2044. mutex_unlock(&afu->sync_active);
  2045. kfree(buf);
  2046. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  2047. return rc;
  2048. }
  2049. /**
  2050. * cxlflash_afu_sync() - builds and sends an AFU sync command
  2051. * @afu: AFU associated with the host.
  2052. * @ctx: Identifies context requesting sync.
  2053. * @res: Identifies resource requesting sync.
  2054. * @mode: Type of sync to issue (lightweight, heavyweight, global).
  2055. *
  2056. * AFU sync operations are only necessary and allowed when the device is
  2057. * operating normally. When not operating normally, sync requests can occur as
  2058. * part of cleaning up resources associated with an adapter prior to removal.
  2059. * In this scenario, these requests are simply ignored (safe due to the AFU
  2060. * going away).
  2061. *
  2062. * Return:
  2063. * 0 on success, -errno on failure
  2064. */
  2065. int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
  2066. {
  2067. struct cxlflash_cfg *cfg = afu->parent;
  2068. struct device *dev = &cfg->dev->dev;
  2069. struct sisl_ioarcb rcb = { 0 };
  2070. dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
  2071. __func__, afu, ctx, res, mode);
  2072. rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
  2073. rcb.msi = SISL_MSI_RRQ_UPDATED;
  2074. rcb.timeout = MC_AFU_SYNC_TIMEOUT;
  2075. rcb.cdb[0] = SISL_AFU_CMD_SYNC;
  2076. rcb.cdb[1] = mode;
  2077. put_unaligned_be16(ctx, &rcb.cdb[2]);
  2078. put_unaligned_be32(res, &rcb.cdb[4]);
  2079. return send_afu_cmd(afu, &rcb);
  2080. }
  2081. /**
  2082. * cxlflash_eh_abort_handler() - abort a SCSI command
  2083. * @scp: SCSI command to abort.
  2084. *
  2085. * CXL Flash devices do not support a single command abort. Reset the context
  2086. * as per SISLite specification. Flush any pending commands in the hardware
  2087. * queue before the reset.
  2088. *
  2089. * Return: SUCCESS/FAILED as defined in scsi/scsi.h
  2090. */
  2091. static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
  2092. {
  2093. int rc = FAILED;
  2094. struct Scsi_Host *host = scp->device->host;
  2095. struct cxlflash_cfg *cfg = shost_priv(host);
  2096. struct afu_cmd *cmd = sc_to_afuc(scp);
  2097. struct device *dev = &cfg->dev->dev;
  2098. struct afu *afu = cfg->afu;
  2099. struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
  2100. dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
  2101. "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
  2102. scp->device->channel, scp->device->id, scp->device->lun,
  2103. get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
  2104. get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
  2105. get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
  2106. get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
  2107. /* When the state is not normal, another reset/reload is in progress.
  2108. * Return failed and the mid-layer will invoke host reset handler.
  2109. */
  2110. if (cfg->state != STATE_NORMAL) {
  2111. dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
  2112. __func__, cfg->state);
  2113. goto out;
  2114. }
  2115. rc = afu->context_reset(hwq);
  2116. if (unlikely(rc))
  2117. goto out;
  2118. rc = SUCCESS;
  2119. out:
  2120. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  2121. return rc;
  2122. }
  2123. /**
  2124. * cxlflash_eh_device_reset_handler() - reset a single LUN
  2125. * @scp: SCSI command to send.
  2126. *
  2127. * Return:
  2128. * SUCCESS as defined in scsi/scsi.h
  2129. * FAILED as defined in scsi/scsi.h
  2130. */
  2131. static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
  2132. {
  2133. int rc = SUCCESS;
  2134. struct scsi_device *sdev = scp->device;
  2135. struct Scsi_Host *host = sdev->host;
  2136. struct cxlflash_cfg *cfg = shost_priv(host);
  2137. struct device *dev = &cfg->dev->dev;
  2138. int rcr = 0;
  2139. dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
  2140. host->host_no, sdev->channel, sdev->id, sdev->lun);
  2141. retry:
  2142. switch (cfg->state) {
  2143. case STATE_NORMAL:
  2144. rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
  2145. if (unlikely(rcr))
  2146. rc = FAILED;
  2147. break;
  2148. case STATE_RESET:
  2149. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
  2150. goto retry;
  2151. default:
  2152. rc = FAILED;
  2153. break;
  2154. }
  2155. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  2156. return rc;
  2157. }
  2158. /**
  2159. * cxlflash_eh_host_reset_handler() - reset the host adapter
  2160. * @scp: SCSI command from stack identifying host.
  2161. *
  2162. * Following a reset, the state is evaluated again in case an EEH occurred
  2163. * during the reset. In such a scenario, the host reset will either yield
  2164. * until the EEH recovery is complete or return success or failure based
  2165. * upon the current device state.
  2166. *
  2167. * Return:
  2168. * SUCCESS as defined in scsi/scsi.h
  2169. * FAILED as defined in scsi/scsi.h
  2170. */
  2171. static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
  2172. {
  2173. int rc = SUCCESS;
  2174. int rcr = 0;
  2175. struct Scsi_Host *host = scp->device->host;
  2176. struct cxlflash_cfg *cfg = shost_priv(host);
  2177. struct device *dev = &cfg->dev->dev;
  2178. dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
  2179. switch (cfg->state) {
  2180. case STATE_NORMAL:
  2181. cfg->state = STATE_RESET;
  2182. drain_ioctls(cfg);
  2183. cxlflash_mark_contexts_error(cfg);
  2184. rcr = afu_reset(cfg);
  2185. if (rcr) {
  2186. rc = FAILED;
  2187. cfg->state = STATE_FAILTERM;
  2188. } else
  2189. cfg->state = STATE_NORMAL;
  2190. wake_up_all(&cfg->reset_waitq);
  2191. ssleep(1);
  2192. fallthrough;
  2193. case STATE_RESET:
  2194. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
  2195. if (cfg->state == STATE_NORMAL)
  2196. break;
  2197. fallthrough;
  2198. default:
  2199. rc = FAILED;
  2200. break;
  2201. }
  2202. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  2203. return rc;
  2204. }
  2205. /**
  2206. * cxlflash_change_queue_depth() - change the queue depth for the device
  2207. * @sdev: SCSI device destined for queue depth change.
  2208. * @qdepth: Requested queue depth value to set.
  2209. *
  2210. * The requested queue depth is capped to the maximum supported value.
  2211. *
  2212. * Return: The actual queue depth set.
  2213. */
  2214. static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
  2215. {
  2216. if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
  2217. qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
  2218. scsi_change_queue_depth(sdev, qdepth);
  2219. return sdev->queue_depth;
  2220. }
  2221. /**
  2222. * cxlflash_show_port_status() - queries and presents the current port status
  2223. * @port: Desired port for status reporting.
  2224. * @cfg: Internal structure associated with the host.
  2225. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2226. *
  2227. * Return: The size of the ASCII string returned in @buf or -EINVAL.
  2228. */
  2229. static ssize_t cxlflash_show_port_status(u32 port,
  2230. struct cxlflash_cfg *cfg,
  2231. char *buf)
  2232. {
  2233. struct device *dev = &cfg->dev->dev;
  2234. char *disp_status;
  2235. u64 status;
  2236. __be64 __iomem *fc_port_regs;
  2237. WARN_ON(port >= MAX_FC_PORTS);
  2238. if (port >= cfg->num_fc_ports) {
  2239. dev_info(dev, "%s: Port %d not supported on this card.\n",
  2240. __func__, port);
  2241. return -EINVAL;
  2242. }
  2243. fc_port_regs = get_fc_port_regs(cfg, port);
  2244. status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
  2245. status &= FC_MTIP_STATUS_MASK;
  2246. if (status == FC_MTIP_STATUS_ONLINE)
  2247. disp_status = "online";
  2248. else if (status == FC_MTIP_STATUS_OFFLINE)
  2249. disp_status = "offline";
  2250. else
  2251. disp_status = "unknown";
  2252. return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
  2253. }
  2254. /**
  2255. * port0_show() - queries and presents the current status of port 0
  2256. * @dev: Generic device associated with the host owning the port.
  2257. * @attr: Device attribute representing the port.
  2258. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2259. *
  2260. * Return: The size of the ASCII string returned in @buf.
  2261. */
  2262. static ssize_t port0_show(struct device *dev,
  2263. struct device_attribute *attr,
  2264. char *buf)
  2265. {
  2266. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2267. return cxlflash_show_port_status(0, cfg, buf);
  2268. }
  2269. /**
  2270. * port1_show() - queries and presents the current status of port 1
  2271. * @dev: Generic device associated with the host owning the port.
  2272. * @attr: Device attribute representing the port.
  2273. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2274. *
  2275. * Return: The size of the ASCII string returned in @buf.
  2276. */
  2277. static ssize_t port1_show(struct device *dev,
  2278. struct device_attribute *attr,
  2279. char *buf)
  2280. {
  2281. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2282. return cxlflash_show_port_status(1, cfg, buf);
  2283. }
  2284. /**
  2285. * port2_show() - queries and presents the current status of port 2
  2286. * @dev: Generic device associated with the host owning the port.
  2287. * @attr: Device attribute representing the port.
  2288. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2289. *
  2290. * Return: The size of the ASCII string returned in @buf.
  2291. */
  2292. static ssize_t port2_show(struct device *dev,
  2293. struct device_attribute *attr,
  2294. char *buf)
  2295. {
  2296. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2297. return cxlflash_show_port_status(2, cfg, buf);
  2298. }
  2299. /**
  2300. * port3_show() - queries and presents the current status of port 3
  2301. * @dev: Generic device associated with the host owning the port.
  2302. * @attr: Device attribute representing the port.
  2303. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2304. *
  2305. * Return: The size of the ASCII string returned in @buf.
  2306. */
  2307. static ssize_t port3_show(struct device *dev,
  2308. struct device_attribute *attr,
  2309. char *buf)
  2310. {
  2311. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2312. return cxlflash_show_port_status(3, cfg, buf);
  2313. }
  2314. /**
  2315. * lun_mode_show() - presents the current LUN mode of the host
  2316. * @dev: Generic device associated with the host.
  2317. * @attr: Device attribute representing the LUN mode.
  2318. * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
  2319. *
  2320. * Return: The size of the ASCII string returned in @buf.
  2321. */
  2322. static ssize_t lun_mode_show(struct device *dev,
  2323. struct device_attribute *attr, char *buf)
  2324. {
  2325. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2326. struct afu *afu = cfg->afu;
  2327. return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
  2328. }
  2329. /**
  2330. * lun_mode_store() - sets the LUN mode of the host
  2331. * @dev: Generic device associated with the host.
  2332. * @attr: Device attribute representing the LUN mode.
  2333. * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
  2334. * @count: Length of data resizing in @buf.
  2335. *
  2336. * The CXL Flash AFU supports a dummy LUN mode where the external
  2337. * links and storage are not required. Space on the FPGA is used
  2338. * to create 1 or 2 small LUNs which are presented to the system
  2339. * as if they were a normal storage device. This feature is useful
  2340. * during development and also provides manufacturing with a way
  2341. * to test the AFU without an actual device.
  2342. *
  2343. * 0 = external LUN[s] (default)
  2344. * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
  2345. * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
  2346. * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
  2347. * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
  2348. *
  2349. * Return: The size of the ASCII string returned in @buf.
  2350. */
  2351. static ssize_t lun_mode_store(struct device *dev,
  2352. struct device_attribute *attr,
  2353. const char *buf, size_t count)
  2354. {
  2355. struct Scsi_Host *shost = class_to_shost(dev);
  2356. struct cxlflash_cfg *cfg = shost_priv(shost);
  2357. struct afu *afu = cfg->afu;
  2358. int rc;
  2359. u32 lun_mode;
  2360. rc = kstrtouint(buf, 10, &lun_mode);
  2361. if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
  2362. afu->internal_lun = lun_mode;
  2363. /*
  2364. * When configured for internal LUN, there is only one channel,
  2365. * channel number 0, else there will be one less than the number
  2366. * of fc ports for this card.
  2367. */
  2368. if (afu->internal_lun)
  2369. shost->max_channel = 0;
  2370. else
  2371. shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
  2372. afu_reset(cfg);
  2373. scsi_scan_host(cfg->host);
  2374. }
  2375. return count;
  2376. }
  2377. /**
  2378. * ioctl_version_show() - presents the current ioctl version of the host
  2379. * @dev: Generic device associated with the host.
  2380. * @attr: Device attribute representing the ioctl version.
  2381. * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
  2382. *
  2383. * Return: The size of the ASCII string returned in @buf.
  2384. */
  2385. static ssize_t ioctl_version_show(struct device *dev,
  2386. struct device_attribute *attr, char *buf)
  2387. {
  2388. ssize_t bytes = 0;
  2389. bytes = scnprintf(buf, PAGE_SIZE,
  2390. "disk: %u\n", DK_CXLFLASH_VERSION_0);
  2391. bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
  2392. "host: %u\n", HT_CXLFLASH_VERSION_0);
  2393. return bytes;
  2394. }
  2395. /**
  2396. * cxlflash_show_port_lun_table() - queries and presents the port LUN table
  2397. * @port: Desired port for status reporting.
  2398. * @cfg: Internal structure associated with the host.
  2399. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2400. *
  2401. * Return: The size of the ASCII string returned in @buf or -EINVAL.
  2402. */
  2403. static ssize_t cxlflash_show_port_lun_table(u32 port,
  2404. struct cxlflash_cfg *cfg,
  2405. char *buf)
  2406. {
  2407. struct device *dev = &cfg->dev->dev;
  2408. __be64 __iomem *fc_port_luns;
  2409. int i;
  2410. ssize_t bytes = 0;
  2411. WARN_ON(port >= MAX_FC_PORTS);
  2412. if (port >= cfg->num_fc_ports) {
  2413. dev_info(dev, "%s: Port %d not supported on this card.\n",
  2414. __func__, port);
  2415. return -EINVAL;
  2416. }
  2417. fc_port_luns = get_fc_port_luns(cfg, port);
  2418. for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
  2419. bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
  2420. "%03d: %016llx\n",
  2421. i, readq_be(&fc_port_luns[i]));
  2422. return bytes;
  2423. }
  2424. /**
  2425. * port0_lun_table_show() - presents the current LUN table of port 0
  2426. * @dev: Generic device associated with the host owning the port.
  2427. * @attr: Device attribute representing the port.
  2428. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2429. *
  2430. * Return: The size of the ASCII string returned in @buf.
  2431. */
  2432. static ssize_t port0_lun_table_show(struct device *dev,
  2433. struct device_attribute *attr,
  2434. char *buf)
  2435. {
  2436. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2437. return cxlflash_show_port_lun_table(0, cfg, buf);
  2438. }
  2439. /**
  2440. * port1_lun_table_show() - presents the current LUN table of port 1
  2441. * @dev: Generic device associated with the host owning the port.
  2442. * @attr: Device attribute representing the port.
  2443. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2444. *
  2445. * Return: The size of the ASCII string returned in @buf.
  2446. */
  2447. static ssize_t port1_lun_table_show(struct device *dev,
  2448. struct device_attribute *attr,
  2449. char *buf)
  2450. {
  2451. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2452. return cxlflash_show_port_lun_table(1, cfg, buf);
  2453. }
  2454. /**
  2455. * port2_lun_table_show() - presents the current LUN table of port 2
  2456. * @dev: Generic device associated with the host owning the port.
  2457. * @attr: Device attribute representing the port.
  2458. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2459. *
  2460. * Return: The size of the ASCII string returned in @buf.
  2461. */
  2462. static ssize_t port2_lun_table_show(struct device *dev,
  2463. struct device_attribute *attr,
  2464. char *buf)
  2465. {
  2466. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2467. return cxlflash_show_port_lun_table(2, cfg, buf);
  2468. }
  2469. /**
  2470. * port3_lun_table_show() - presents the current LUN table of port 3
  2471. * @dev: Generic device associated with the host owning the port.
  2472. * @attr: Device attribute representing the port.
  2473. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  2474. *
  2475. * Return: The size of the ASCII string returned in @buf.
  2476. */
  2477. static ssize_t port3_lun_table_show(struct device *dev,
  2478. struct device_attribute *attr,
  2479. char *buf)
  2480. {
  2481. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2482. return cxlflash_show_port_lun_table(3, cfg, buf);
  2483. }
  2484. /**
  2485. * irqpoll_weight_show() - presents the current IRQ poll weight for the host
  2486. * @dev: Generic device associated with the host.
  2487. * @attr: Device attribute representing the IRQ poll weight.
  2488. * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
  2489. * weight in ASCII.
  2490. *
  2491. * An IRQ poll weight of 0 indicates polling is disabled.
  2492. *
  2493. * Return: The size of the ASCII string returned in @buf.
  2494. */
  2495. static ssize_t irqpoll_weight_show(struct device *dev,
  2496. struct device_attribute *attr, char *buf)
  2497. {
  2498. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2499. struct afu *afu = cfg->afu;
  2500. return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
  2501. }
  2502. /**
  2503. * irqpoll_weight_store() - sets the current IRQ poll weight for the host
  2504. * @dev: Generic device associated with the host.
  2505. * @attr: Device attribute representing the IRQ poll weight.
  2506. * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
  2507. * weight in ASCII.
  2508. * @count: Length of data resizing in @buf.
  2509. *
  2510. * An IRQ poll weight of 0 indicates polling is disabled.
  2511. *
  2512. * Return: The size of the ASCII string returned in @buf.
  2513. */
  2514. static ssize_t irqpoll_weight_store(struct device *dev,
  2515. struct device_attribute *attr,
  2516. const char *buf, size_t count)
  2517. {
  2518. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2519. struct device *cfgdev = &cfg->dev->dev;
  2520. struct afu *afu = cfg->afu;
  2521. struct hwq *hwq;
  2522. u32 weight;
  2523. int rc, i;
  2524. rc = kstrtouint(buf, 10, &weight);
  2525. if (rc)
  2526. return -EINVAL;
  2527. if (weight > 256) {
  2528. dev_info(cfgdev,
  2529. "Invalid IRQ poll weight. It must be 256 or less.\n");
  2530. return -EINVAL;
  2531. }
  2532. if (weight == afu->irqpoll_weight) {
  2533. dev_info(cfgdev,
  2534. "Current IRQ poll weight has the same weight.\n");
  2535. return -EINVAL;
  2536. }
  2537. if (afu_is_irqpoll_enabled(afu)) {
  2538. for (i = 0; i < afu->num_hwqs; i++) {
  2539. hwq = get_hwq(afu, i);
  2540. irq_poll_disable(&hwq->irqpoll);
  2541. }
  2542. }
  2543. afu->irqpoll_weight = weight;
  2544. if (weight > 0) {
  2545. for (i = 0; i < afu->num_hwqs; i++) {
  2546. hwq = get_hwq(afu, i);
  2547. irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
  2548. }
  2549. }
  2550. return count;
  2551. }
  2552. /**
  2553. * num_hwqs_show() - presents the number of hardware queues for the host
  2554. * @dev: Generic device associated with the host.
  2555. * @attr: Device attribute representing the number of hardware queues.
  2556. * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
  2557. * queues in ASCII.
  2558. *
  2559. * Return: The size of the ASCII string returned in @buf.
  2560. */
  2561. static ssize_t num_hwqs_show(struct device *dev,
  2562. struct device_attribute *attr, char *buf)
  2563. {
  2564. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2565. struct afu *afu = cfg->afu;
  2566. return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
  2567. }
  2568. /**
  2569. * num_hwqs_store() - sets the number of hardware queues for the host
  2570. * @dev: Generic device associated with the host.
  2571. * @attr: Device attribute representing the number of hardware queues.
  2572. * @buf: Buffer of length PAGE_SIZE containing the number of hardware
  2573. * queues in ASCII.
  2574. * @count: Length of data resizing in @buf.
  2575. *
  2576. * n > 0: num_hwqs = n
  2577. * n = 0: num_hwqs = num_online_cpus()
  2578. * n < 0: num_online_cpus() / abs(n)
  2579. *
  2580. * Return: The size of the ASCII string returned in @buf.
  2581. */
  2582. static ssize_t num_hwqs_store(struct device *dev,
  2583. struct device_attribute *attr,
  2584. const char *buf, size_t count)
  2585. {
  2586. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2587. struct afu *afu = cfg->afu;
  2588. int rc;
  2589. int nhwqs, num_hwqs;
  2590. rc = kstrtoint(buf, 10, &nhwqs);
  2591. if (rc)
  2592. return -EINVAL;
  2593. if (nhwqs >= 1)
  2594. num_hwqs = nhwqs;
  2595. else if (nhwqs == 0)
  2596. num_hwqs = num_online_cpus();
  2597. else
  2598. num_hwqs = num_online_cpus() / abs(nhwqs);
  2599. afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
  2600. WARN_ON_ONCE(afu->desired_hwqs == 0);
  2601. retry:
  2602. switch (cfg->state) {
  2603. case STATE_NORMAL:
  2604. cfg->state = STATE_RESET;
  2605. drain_ioctls(cfg);
  2606. cxlflash_mark_contexts_error(cfg);
  2607. rc = afu_reset(cfg);
  2608. if (rc)
  2609. cfg->state = STATE_FAILTERM;
  2610. else
  2611. cfg->state = STATE_NORMAL;
  2612. wake_up_all(&cfg->reset_waitq);
  2613. break;
  2614. case STATE_RESET:
  2615. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
  2616. if (cfg->state == STATE_NORMAL)
  2617. goto retry;
  2618. fallthrough;
  2619. default:
  2620. /* Ideally should not happen */
  2621. dev_err(dev, "%s: Device is not ready, state=%d\n",
  2622. __func__, cfg->state);
  2623. break;
  2624. }
  2625. return count;
  2626. }
  2627. static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
  2628. /**
  2629. * hwq_mode_show() - presents the HWQ steering mode for the host
  2630. * @dev: Generic device associated with the host.
  2631. * @attr: Device attribute representing the HWQ steering mode.
  2632. * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
  2633. * as a character string.
  2634. *
  2635. * Return: The size of the ASCII string returned in @buf.
  2636. */
  2637. static ssize_t hwq_mode_show(struct device *dev,
  2638. struct device_attribute *attr, char *buf)
  2639. {
  2640. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  2641. struct afu *afu = cfg->afu;
  2642. return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
  2643. }
  2644. /**
  2645. * hwq_mode_store() - sets the HWQ steering mode for the host
  2646. * @dev: Generic device associated with the host.
  2647. * @attr: Device attribute representing the HWQ steering mode.
  2648. * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
  2649. * as a character string.
  2650. * @count: Length of data resizing in @buf.
  2651. *
  2652. * rr = Round-Robin
  2653. * tag = Block MQ Tagging
  2654. * cpu = CPU Affinity
  2655. *
  2656. * Return: The size of the ASCII string returned in @buf.
  2657. */
  2658. static ssize_t hwq_mode_store(struct device *dev,
  2659. struct device_attribute *attr,
  2660. const char *buf, size_t count)
  2661. {
  2662. struct Scsi_Host *shost = class_to_shost(dev);
  2663. struct cxlflash_cfg *cfg = shost_priv(shost);
  2664. struct device *cfgdev = &cfg->dev->dev;
  2665. struct afu *afu = cfg->afu;
  2666. int i;
  2667. u32 mode = MAX_HWQ_MODE;
  2668. for (i = 0; i < MAX_HWQ_MODE; i++) {
  2669. if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
  2670. mode = i;
  2671. break;
  2672. }
  2673. }
  2674. if (mode >= MAX_HWQ_MODE) {
  2675. dev_info(cfgdev, "Invalid HWQ steering mode.\n");
  2676. return -EINVAL;
  2677. }
  2678. afu->hwq_mode = mode;
  2679. return count;
  2680. }
  2681. /**
  2682. * mode_show() - presents the current mode of the device
  2683. * @dev: Generic device associated with the device.
  2684. * @attr: Device attribute representing the device mode.
  2685. * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
  2686. *
  2687. * Return: The size of the ASCII string returned in @buf.
  2688. */
  2689. static ssize_t mode_show(struct device *dev,
  2690. struct device_attribute *attr, char *buf)
  2691. {
  2692. struct scsi_device *sdev = to_scsi_device(dev);
  2693. return scnprintf(buf, PAGE_SIZE, "%s\n",
  2694. sdev->hostdata ? "superpipe" : "legacy");
  2695. }
  2696. /*
  2697. * Host attributes
  2698. */
  2699. static DEVICE_ATTR_RO(port0);
  2700. static DEVICE_ATTR_RO(port1);
  2701. static DEVICE_ATTR_RO(port2);
  2702. static DEVICE_ATTR_RO(port3);
  2703. static DEVICE_ATTR_RW(lun_mode);
  2704. static DEVICE_ATTR_RO(ioctl_version);
  2705. static DEVICE_ATTR_RO(port0_lun_table);
  2706. static DEVICE_ATTR_RO(port1_lun_table);
  2707. static DEVICE_ATTR_RO(port2_lun_table);
  2708. static DEVICE_ATTR_RO(port3_lun_table);
  2709. static DEVICE_ATTR_RW(irqpoll_weight);
  2710. static DEVICE_ATTR_RW(num_hwqs);
  2711. static DEVICE_ATTR_RW(hwq_mode);
  2712. static struct attribute *cxlflash_host_attrs[] = {
  2713. &dev_attr_port0.attr,
  2714. &dev_attr_port1.attr,
  2715. &dev_attr_port2.attr,
  2716. &dev_attr_port3.attr,
  2717. &dev_attr_lun_mode.attr,
  2718. &dev_attr_ioctl_version.attr,
  2719. &dev_attr_port0_lun_table.attr,
  2720. &dev_attr_port1_lun_table.attr,
  2721. &dev_attr_port2_lun_table.attr,
  2722. &dev_attr_port3_lun_table.attr,
  2723. &dev_attr_irqpoll_weight.attr,
  2724. &dev_attr_num_hwqs.attr,
  2725. &dev_attr_hwq_mode.attr,
  2726. NULL
  2727. };
  2728. ATTRIBUTE_GROUPS(cxlflash_host);
  2729. /*
  2730. * Device attributes
  2731. */
  2732. static DEVICE_ATTR_RO(mode);
  2733. static struct attribute *cxlflash_dev_attrs[] = {
  2734. &dev_attr_mode.attr,
  2735. NULL
  2736. };
  2737. ATTRIBUTE_GROUPS(cxlflash_dev);
  2738. /*
  2739. * Host template
  2740. */
  2741. static struct scsi_host_template driver_template = {
  2742. .module = THIS_MODULE,
  2743. .name = CXLFLASH_ADAPTER_NAME,
  2744. .info = cxlflash_driver_info,
  2745. .ioctl = cxlflash_ioctl,
  2746. .proc_name = CXLFLASH_NAME,
  2747. .queuecommand = cxlflash_queuecommand,
  2748. .eh_abort_handler = cxlflash_eh_abort_handler,
  2749. .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
  2750. .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
  2751. .change_queue_depth = cxlflash_change_queue_depth,
  2752. .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
  2753. .can_queue = CXLFLASH_MAX_CMDS,
  2754. .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
  2755. .this_id = -1,
  2756. .sg_tablesize = 1, /* No scatter gather support */
  2757. .max_sectors = CXLFLASH_MAX_SECTORS,
  2758. .shost_groups = cxlflash_host_groups,
  2759. .sdev_groups = cxlflash_dev_groups,
  2760. };
  2761. /*
  2762. * Device dependent values
  2763. */
  2764. static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
  2765. CXLFLASH_WWPN_VPD_REQUIRED };
  2766. static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
  2767. CXLFLASH_NOTIFY_SHUTDOWN };
  2768. static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
  2769. (CXLFLASH_NOTIFY_SHUTDOWN |
  2770. CXLFLASH_OCXL_DEV) };
  2771. /*
  2772. * PCI device binding table
  2773. */
  2774. static struct pci_device_id cxlflash_pci_table[] = {
  2775. {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
  2776. PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
  2777. {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
  2778. PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
  2779. {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
  2780. PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
  2781. {}
  2782. };
  2783. MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
  2784. /**
  2785. * cxlflash_worker_thread() - work thread handler for the AFU
  2786. * @work: Work structure contained within cxlflash associated with host.
  2787. *
  2788. * Handles the following events:
  2789. * - Link reset which cannot be performed on interrupt context due to
  2790. * blocking up to a few seconds
  2791. * - Rescan the host
  2792. */
  2793. static void cxlflash_worker_thread(struct work_struct *work)
  2794. {
  2795. struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
  2796. work_q);
  2797. struct afu *afu = cfg->afu;
  2798. struct device *dev = &cfg->dev->dev;
  2799. __be64 __iomem *fc_port_regs;
  2800. int port;
  2801. ulong lock_flags;
  2802. /* Avoid MMIO if the device has failed */
  2803. if (cfg->state != STATE_NORMAL)
  2804. return;
  2805. spin_lock_irqsave(cfg->host->host_lock, lock_flags);
  2806. if (cfg->lr_state == LINK_RESET_REQUIRED) {
  2807. port = cfg->lr_port;
  2808. if (port < 0)
  2809. dev_err(dev, "%s: invalid port index %d\n",
  2810. __func__, port);
  2811. else {
  2812. spin_unlock_irqrestore(cfg->host->host_lock,
  2813. lock_flags);
  2814. /* The reset can block... */
  2815. fc_port_regs = get_fc_port_regs(cfg, port);
  2816. afu_link_reset(afu, port, fc_port_regs);
  2817. spin_lock_irqsave(cfg->host->host_lock, lock_flags);
  2818. }
  2819. cfg->lr_state = LINK_RESET_COMPLETE;
  2820. }
  2821. spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
  2822. if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
  2823. scsi_scan_host(cfg->host);
  2824. }
  2825. /**
  2826. * cxlflash_chr_open() - character device open handler
  2827. * @inode: Device inode associated with this character device.
  2828. * @file: File pointer for this device.
  2829. *
  2830. * Only users with admin privileges are allowed to open the character device.
  2831. *
  2832. * Return: 0 on success, -errno on failure
  2833. */
  2834. static int cxlflash_chr_open(struct inode *inode, struct file *file)
  2835. {
  2836. struct cxlflash_cfg *cfg;
  2837. if (!capable(CAP_SYS_ADMIN))
  2838. return -EACCES;
  2839. cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
  2840. file->private_data = cfg;
  2841. return 0;
  2842. }
  2843. /**
  2844. * decode_hioctl() - translates encoded host ioctl to easily identifiable string
  2845. * @cmd: The host ioctl command to decode.
  2846. *
  2847. * Return: A string identifying the decoded host ioctl.
  2848. */
  2849. static char *decode_hioctl(unsigned int cmd)
  2850. {
  2851. switch (cmd) {
  2852. case HT_CXLFLASH_LUN_PROVISION:
  2853. return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
  2854. }
  2855. return "UNKNOWN";
  2856. }
  2857. /**
  2858. * cxlflash_lun_provision() - host LUN provisioning handler
  2859. * @cfg: Internal structure associated with the host.
  2860. * @arg: Kernel copy of userspace ioctl data structure.
  2861. *
  2862. * Return: 0 on success, -errno on failure
  2863. */
  2864. static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, void *arg)
  2865. {
  2866. struct ht_cxlflash_lun_provision *lunprov = arg;
  2867. struct afu *afu = cfg->afu;
  2868. struct device *dev = &cfg->dev->dev;
  2869. struct sisl_ioarcb rcb;
  2870. struct sisl_ioasa asa;
  2871. __be64 __iomem *fc_port_regs;
  2872. u16 port = lunprov->port;
  2873. u16 scmd = lunprov->hdr.subcmd;
  2874. u16 type;
  2875. u64 reg;
  2876. u64 size;
  2877. u64 lun_id;
  2878. int rc = 0;
  2879. if (!afu_is_lun_provision(afu)) {
  2880. rc = -ENOTSUPP;
  2881. goto out;
  2882. }
  2883. if (port >= cfg->num_fc_ports) {
  2884. rc = -EINVAL;
  2885. goto out;
  2886. }
  2887. switch (scmd) {
  2888. case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
  2889. type = SISL_AFU_LUN_PROVISION_CREATE;
  2890. size = lunprov->size;
  2891. lun_id = 0;
  2892. break;
  2893. case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
  2894. type = SISL_AFU_LUN_PROVISION_DELETE;
  2895. size = 0;
  2896. lun_id = lunprov->lun_id;
  2897. break;
  2898. case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
  2899. fc_port_regs = get_fc_port_regs(cfg, port);
  2900. reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
  2901. lunprov->max_num_luns = reg;
  2902. reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
  2903. lunprov->cur_num_luns = reg;
  2904. reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
  2905. lunprov->max_cap_port = reg;
  2906. reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
  2907. lunprov->cur_cap_port = reg;
  2908. goto out;
  2909. default:
  2910. rc = -EINVAL;
  2911. goto out;
  2912. }
  2913. memset(&rcb, 0, sizeof(rcb));
  2914. memset(&asa, 0, sizeof(asa));
  2915. rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
  2916. rcb.lun_id = lun_id;
  2917. rcb.msi = SISL_MSI_RRQ_UPDATED;
  2918. rcb.timeout = MC_LUN_PROV_TIMEOUT;
  2919. rcb.ioasa = &asa;
  2920. rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
  2921. rcb.cdb[1] = type;
  2922. rcb.cdb[2] = port;
  2923. put_unaligned_be64(size, &rcb.cdb[8]);
  2924. rc = send_afu_cmd(afu, &rcb);
  2925. if (rc) {
  2926. dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
  2927. __func__, rc, asa.ioasc, asa.afu_extra);
  2928. goto out;
  2929. }
  2930. if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
  2931. lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
  2932. memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
  2933. }
  2934. out:
  2935. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  2936. return rc;
  2937. }
  2938. /**
  2939. * cxlflash_afu_debug() - host AFU debug handler
  2940. * @cfg: Internal structure associated with the host.
  2941. * @arg: Kernel copy of userspace ioctl data structure.
  2942. *
  2943. * For debug requests requiring a data buffer, always provide an aligned
  2944. * (cache line) buffer to the AFU to appease any alignment requirements.
  2945. *
  2946. * Return: 0 on success, -errno on failure
  2947. */
  2948. static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, void *arg)
  2949. {
  2950. struct ht_cxlflash_afu_debug *afu_dbg = arg;
  2951. struct afu *afu = cfg->afu;
  2952. struct device *dev = &cfg->dev->dev;
  2953. struct sisl_ioarcb rcb;
  2954. struct sisl_ioasa asa;
  2955. char *buf = NULL;
  2956. char *kbuf = NULL;
  2957. void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
  2958. u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
  2959. u32 ulen = afu_dbg->data_len;
  2960. bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
  2961. int rc = 0;
  2962. if (!afu_is_afu_debug(afu)) {
  2963. rc = -ENOTSUPP;
  2964. goto out;
  2965. }
  2966. if (ulen) {
  2967. req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
  2968. if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
  2969. rc = -EINVAL;
  2970. goto out;
  2971. }
  2972. buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
  2973. if (unlikely(!buf)) {
  2974. rc = -ENOMEM;
  2975. goto out;
  2976. }
  2977. kbuf = PTR_ALIGN(buf, cache_line_size());
  2978. if (is_write) {
  2979. req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
  2980. if (copy_from_user(kbuf, ubuf, ulen)) {
  2981. rc = -EFAULT;
  2982. goto out;
  2983. }
  2984. }
  2985. }
  2986. memset(&rcb, 0, sizeof(rcb));
  2987. memset(&asa, 0, sizeof(asa));
  2988. rcb.req_flags = req_flags;
  2989. rcb.msi = SISL_MSI_RRQ_UPDATED;
  2990. rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
  2991. rcb.ioasa = &asa;
  2992. if (ulen) {
  2993. rcb.data_len = ulen;
  2994. rcb.data_ea = (uintptr_t)kbuf;
  2995. }
  2996. rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
  2997. memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
  2998. HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
  2999. rc = send_afu_cmd(afu, &rcb);
  3000. if (rc) {
  3001. dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
  3002. __func__, rc, asa.ioasc, asa.afu_extra);
  3003. goto out;
  3004. }
  3005. if (ulen && !is_write) {
  3006. if (copy_to_user(ubuf, kbuf, ulen))
  3007. rc = -EFAULT;
  3008. }
  3009. out:
  3010. kfree(buf);
  3011. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  3012. return rc;
  3013. }
  3014. /**
  3015. * cxlflash_chr_ioctl() - character device IOCTL handler
  3016. * @file: File pointer for this device.
  3017. * @cmd: IOCTL command.
  3018. * @arg: Userspace ioctl data structure.
  3019. *
  3020. * A read/write semaphore is used to implement a 'drain' of currently
  3021. * running ioctls. The read semaphore is taken at the beginning of each
  3022. * ioctl thread and released upon concluding execution. Additionally the
  3023. * semaphore should be released and then reacquired in any ioctl execution
  3024. * path which will wait for an event to occur that is outside the scope of
  3025. * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
  3026. * a thread simply needs to acquire the write semaphore.
  3027. *
  3028. * Return: 0 on success, -errno on failure
  3029. */
  3030. static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
  3031. unsigned long arg)
  3032. {
  3033. typedef int (*hioctl) (struct cxlflash_cfg *, void *);
  3034. struct cxlflash_cfg *cfg = file->private_data;
  3035. struct device *dev = &cfg->dev->dev;
  3036. char buf[sizeof(union cxlflash_ht_ioctls)];
  3037. void __user *uarg = (void __user *)arg;
  3038. struct ht_cxlflash_hdr *hdr;
  3039. size_t size = 0;
  3040. bool known_ioctl = false;
  3041. int idx = 0;
  3042. int rc = 0;
  3043. hioctl do_ioctl = NULL;
  3044. static const struct {
  3045. size_t size;
  3046. hioctl ioctl;
  3047. } ioctl_tbl[] = { /* NOTE: order matters here */
  3048. { sizeof(struct ht_cxlflash_lun_provision), cxlflash_lun_provision },
  3049. { sizeof(struct ht_cxlflash_afu_debug), cxlflash_afu_debug },
  3050. };
  3051. /* Hold read semaphore so we can drain if needed */
  3052. down_read(&cfg->ioctl_rwsem);
  3053. dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
  3054. __func__, cmd, idx, sizeof(ioctl_tbl));
  3055. switch (cmd) {
  3056. case HT_CXLFLASH_LUN_PROVISION:
  3057. case HT_CXLFLASH_AFU_DEBUG:
  3058. known_ioctl = true;
  3059. idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
  3060. size = ioctl_tbl[idx].size;
  3061. do_ioctl = ioctl_tbl[idx].ioctl;
  3062. if (likely(do_ioctl))
  3063. break;
  3064. fallthrough;
  3065. default:
  3066. rc = -EINVAL;
  3067. goto out;
  3068. }
  3069. if (unlikely(copy_from_user(&buf, uarg, size))) {
  3070. dev_err(dev, "%s: copy_from_user() fail "
  3071. "size=%lu cmd=%d (%s) uarg=%p\n",
  3072. __func__, size, cmd, decode_hioctl(cmd), uarg);
  3073. rc = -EFAULT;
  3074. goto out;
  3075. }
  3076. hdr = (struct ht_cxlflash_hdr *)&buf;
  3077. if (hdr->version != HT_CXLFLASH_VERSION_0) {
  3078. dev_dbg(dev, "%s: Version %u not supported for %s\n",
  3079. __func__, hdr->version, decode_hioctl(cmd));
  3080. rc = -EINVAL;
  3081. goto out;
  3082. }
  3083. if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
  3084. dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
  3085. rc = -EINVAL;
  3086. goto out;
  3087. }
  3088. rc = do_ioctl(cfg, (void *)&buf);
  3089. if (likely(!rc))
  3090. if (unlikely(copy_to_user(uarg, &buf, size))) {
  3091. dev_err(dev, "%s: copy_to_user() fail "
  3092. "size=%lu cmd=%d (%s) uarg=%p\n",
  3093. __func__, size, cmd, decode_hioctl(cmd), uarg);
  3094. rc = -EFAULT;
  3095. }
  3096. /* fall through to exit */
  3097. out:
  3098. up_read(&cfg->ioctl_rwsem);
  3099. if (unlikely(rc && known_ioctl))
  3100. dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
  3101. __func__, decode_hioctl(cmd), cmd, rc);
  3102. else
  3103. dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
  3104. __func__, decode_hioctl(cmd), cmd, rc);
  3105. return rc;
  3106. }
  3107. /*
  3108. * Character device file operations
  3109. */
  3110. static const struct file_operations cxlflash_chr_fops = {
  3111. .owner = THIS_MODULE,
  3112. .open = cxlflash_chr_open,
  3113. .unlocked_ioctl = cxlflash_chr_ioctl,
  3114. .compat_ioctl = compat_ptr_ioctl,
  3115. };
  3116. /**
  3117. * init_chrdev() - initialize the character device for the host
  3118. * @cfg: Internal structure associated with the host.
  3119. *
  3120. * Return: 0 on success, -errno on failure
  3121. */
  3122. static int init_chrdev(struct cxlflash_cfg *cfg)
  3123. {
  3124. struct device *dev = &cfg->dev->dev;
  3125. struct device *char_dev;
  3126. dev_t devno;
  3127. int minor;
  3128. int rc = 0;
  3129. minor = cxlflash_get_minor();
  3130. if (unlikely(minor < 0)) {
  3131. dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
  3132. rc = -ENOSPC;
  3133. goto out;
  3134. }
  3135. devno = MKDEV(cxlflash_major, minor);
  3136. cdev_init(&cfg->cdev, &cxlflash_chr_fops);
  3137. rc = cdev_add(&cfg->cdev, devno, 1);
  3138. if (rc) {
  3139. dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
  3140. goto err1;
  3141. }
  3142. char_dev = device_create(&cxlflash_class, NULL, devno,
  3143. NULL, "cxlflash%d", minor);
  3144. if (IS_ERR(char_dev)) {
  3145. rc = PTR_ERR(char_dev);
  3146. dev_err(dev, "%s: device_create failed rc=%d\n",
  3147. __func__, rc);
  3148. goto err2;
  3149. }
  3150. cfg->chardev = char_dev;
  3151. out:
  3152. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  3153. return rc;
  3154. err2:
  3155. cdev_del(&cfg->cdev);
  3156. err1:
  3157. cxlflash_put_minor(minor);
  3158. goto out;
  3159. }
  3160. /**
  3161. * cxlflash_probe() - PCI entry point to add host
  3162. * @pdev: PCI device associated with the host.
  3163. * @dev_id: PCI device id associated with device.
  3164. *
  3165. * The device will initially start out in a 'probing' state and
  3166. * transition to the 'normal' state at the end of a successful
  3167. * probe. Should an EEH event occur during probe, the notification
  3168. * thread (error_detected()) will wait until the probe handler
  3169. * is nearly complete. At that time, the device will be moved to
  3170. * a 'probed' state and the EEH thread woken up to drive the slot
  3171. * reset and recovery (device moves to 'normal' state). Meanwhile,
  3172. * the probe will be allowed to exit successfully.
  3173. *
  3174. * Return: 0 on success, -errno on failure
  3175. */
  3176. static int cxlflash_probe(struct pci_dev *pdev,
  3177. const struct pci_device_id *dev_id)
  3178. {
  3179. struct Scsi_Host *host;
  3180. struct cxlflash_cfg *cfg = NULL;
  3181. struct device *dev = &pdev->dev;
  3182. struct dev_dependent_vals *ddv;
  3183. int rc = 0;
  3184. int k;
  3185. dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
  3186. __func__, pdev->irq);
  3187. ddv = (struct dev_dependent_vals *)dev_id->driver_data;
  3188. driver_template.max_sectors = ddv->max_sectors;
  3189. host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
  3190. if (!host) {
  3191. dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
  3192. rc = -ENOMEM;
  3193. goto out;
  3194. }
  3195. host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
  3196. host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
  3197. host->unique_id = host->host_no;
  3198. host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
  3199. cfg = shost_priv(host);
  3200. cfg->state = STATE_PROBING;
  3201. cfg->host = host;
  3202. rc = alloc_mem(cfg);
  3203. if (rc) {
  3204. dev_err(dev, "%s: alloc_mem failed\n", __func__);
  3205. rc = -ENOMEM;
  3206. scsi_host_put(cfg->host);
  3207. goto out;
  3208. }
  3209. cfg->init_state = INIT_STATE_NONE;
  3210. cfg->dev = pdev;
  3211. cfg->cxl_fops = cxlflash_cxl_fops;
  3212. cfg->ops = cxlflash_assign_ops(ddv);
  3213. WARN_ON_ONCE(!cfg->ops);
  3214. /*
  3215. * Promoted LUNs move to the top of the LUN table. The rest stay on
  3216. * the bottom half. The bottom half grows from the end (index = 255),
  3217. * whereas the top half grows from the beginning (index = 0).
  3218. *
  3219. * Initialize the last LUN index for all possible ports.
  3220. */
  3221. cfg->promote_lun_index = 0;
  3222. for (k = 0; k < MAX_FC_PORTS; k++)
  3223. cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
  3224. cfg->dev_id = (struct pci_device_id *)dev_id;
  3225. init_waitqueue_head(&cfg->tmf_waitq);
  3226. init_waitqueue_head(&cfg->reset_waitq);
  3227. INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
  3228. cfg->lr_state = LINK_RESET_INVALID;
  3229. cfg->lr_port = -1;
  3230. spin_lock_init(&cfg->tmf_slock);
  3231. mutex_init(&cfg->ctx_tbl_list_mutex);
  3232. mutex_init(&cfg->ctx_recovery_mutex);
  3233. init_rwsem(&cfg->ioctl_rwsem);
  3234. INIT_LIST_HEAD(&cfg->ctx_err_recovery);
  3235. INIT_LIST_HEAD(&cfg->lluns);
  3236. pci_set_drvdata(pdev, cfg);
  3237. rc = init_pci(cfg);
  3238. if (rc) {
  3239. dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
  3240. goto out_remove;
  3241. }
  3242. cfg->init_state = INIT_STATE_PCI;
  3243. cfg->afu_cookie = cfg->ops->create_afu(pdev);
  3244. if (unlikely(!cfg->afu_cookie)) {
  3245. dev_err(dev, "%s: create_afu failed\n", __func__);
  3246. rc = -ENOMEM;
  3247. goto out_remove;
  3248. }
  3249. rc = init_afu(cfg);
  3250. if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
  3251. dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
  3252. goto out_remove;
  3253. }
  3254. cfg->init_state = INIT_STATE_AFU;
  3255. rc = init_scsi(cfg);
  3256. if (rc) {
  3257. dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
  3258. goto out_remove;
  3259. }
  3260. cfg->init_state = INIT_STATE_SCSI;
  3261. rc = init_chrdev(cfg);
  3262. if (rc) {
  3263. dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
  3264. goto out_remove;
  3265. }
  3266. cfg->init_state = INIT_STATE_CDEV;
  3267. if (wq_has_sleeper(&cfg->reset_waitq)) {
  3268. cfg->state = STATE_PROBED;
  3269. wake_up_all(&cfg->reset_waitq);
  3270. } else
  3271. cfg->state = STATE_NORMAL;
  3272. out:
  3273. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  3274. return rc;
  3275. out_remove:
  3276. cfg->state = STATE_PROBED;
  3277. cxlflash_remove(pdev);
  3278. goto out;
  3279. }
  3280. /**
  3281. * cxlflash_pci_error_detected() - called when a PCI error is detected
  3282. * @pdev: PCI device struct.
  3283. * @state: PCI channel state.
  3284. *
  3285. * When an EEH occurs during an active reset, wait until the reset is
  3286. * complete and then take action based upon the device state.
  3287. *
  3288. * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  3289. */
  3290. static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
  3291. pci_channel_state_t state)
  3292. {
  3293. int rc = 0;
  3294. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  3295. struct device *dev = &cfg->dev->dev;
  3296. dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
  3297. switch (state) {
  3298. case pci_channel_io_frozen:
  3299. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
  3300. cfg->state != STATE_PROBING);
  3301. if (cfg->state == STATE_FAILTERM)
  3302. return PCI_ERS_RESULT_DISCONNECT;
  3303. cfg->state = STATE_RESET;
  3304. scsi_block_requests(cfg->host);
  3305. drain_ioctls(cfg);
  3306. rc = cxlflash_mark_contexts_error(cfg);
  3307. if (unlikely(rc))
  3308. dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
  3309. __func__, rc);
  3310. term_afu(cfg);
  3311. return PCI_ERS_RESULT_NEED_RESET;
  3312. case pci_channel_io_perm_failure:
  3313. cfg->state = STATE_FAILTERM;
  3314. wake_up_all(&cfg->reset_waitq);
  3315. scsi_unblock_requests(cfg->host);
  3316. return PCI_ERS_RESULT_DISCONNECT;
  3317. default:
  3318. break;
  3319. }
  3320. return PCI_ERS_RESULT_NEED_RESET;
  3321. }
  3322. /**
  3323. * cxlflash_pci_slot_reset() - called when PCI slot has been reset
  3324. * @pdev: PCI device struct.
  3325. *
  3326. * This routine is called by the pci error recovery code after the PCI
  3327. * slot has been reset, just before we should resume normal operations.
  3328. *
  3329. * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
  3330. */
  3331. static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
  3332. {
  3333. int rc = 0;
  3334. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  3335. struct device *dev = &cfg->dev->dev;
  3336. dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
  3337. rc = init_afu(cfg);
  3338. if (unlikely(rc)) {
  3339. dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
  3340. return PCI_ERS_RESULT_DISCONNECT;
  3341. }
  3342. return PCI_ERS_RESULT_RECOVERED;
  3343. }
  3344. /**
  3345. * cxlflash_pci_resume() - called when normal operation can resume
  3346. * @pdev: PCI device struct
  3347. */
  3348. static void cxlflash_pci_resume(struct pci_dev *pdev)
  3349. {
  3350. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  3351. struct device *dev = &cfg->dev->dev;
  3352. dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
  3353. cfg->state = STATE_NORMAL;
  3354. wake_up_all(&cfg->reset_waitq);
  3355. scsi_unblock_requests(cfg->host);
  3356. }
  3357. /**
  3358. * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
  3359. * @dev: Character device.
  3360. * @mode: Mode that can be used to verify access.
  3361. *
  3362. * Return: Allocated string describing the devtmpfs structure.
  3363. */
  3364. static char *cxlflash_devnode(const struct device *dev, umode_t *mode)
  3365. {
  3366. return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
  3367. }
  3368. /**
  3369. * cxlflash_class_init() - create character device class
  3370. *
  3371. * Return: 0 on success, -errno on failure
  3372. */
  3373. static int cxlflash_class_init(void)
  3374. {
  3375. dev_t devno;
  3376. int rc = 0;
  3377. rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
  3378. if (unlikely(rc)) {
  3379. pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
  3380. goto out;
  3381. }
  3382. cxlflash_major = MAJOR(devno);
  3383. rc = class_register(&cxlflash_class);
  3384. if (rc) {
  3385. pr_err("%s: class_create failed rc=%d\n", __func__, rc);
  3386. goto err;
  3387. }
  3388. out:
  3389. pr_debug("%s: returning rc=%d\n", __func__, rc);
  3390. return rc;
  3391. err:
  3392. unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
  3393. goto out;
  3394. }
  3395. /**
  3396. * cxlflash_class_exit() - destroy character device class
  3397. */
  3398. static void cxlflash_class_exit(void)
  3399. {
  3400. dev_t devno = MKDEV(cxlflash_major, 0);
  3401. class_unregister(&cxlflash_class);
  3402. unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
  3403. }
  3404. static const struct pci_error_handlers cxlflash_err_handler = {
  3405. .error_detected = cxlflash_pci_error_detected,
  3406. .slot_reset = cxlflash_pci_slot_reset,
  3407. .resume = cxlflash_pci_resume,
  3408. };
  3409. /*
  3410. * PCI device structure
  3411. */
  3412. static struct pci_driver cxlflash_driver = {
  3413. .name = CXLFLASH_NAME,
  3414. .id_table = cxlflash_pci_table,
  3415. .probe = cxlflash_probe,
  3416. .remove = cxlflash_remove,
  3417. .shutdown = cxlflash_remove,
  3418. .err_handler = &cxlflash_err_handler,
  3419. };
  3420. /**
  3421. * init_cxlflash() - module entry point
  3422. *
  3423. * Return: 0 on success, -errno on failure
  3424. */
  3425. static int __init init_cxlflash(void)
  3426. {
  3427. int rc;
  3428. check_sizes();
  3429. cxlflash_list_init();
  3430. rc = cxlflash_class_init();
  3431. if (unlikely(rc))
  3432. goto out;
  3433. rc = pci_register_driver(&cxlflash_driver);
  3434. if (unlikely(rc))
  3435. goto err;
  3436. out:
  3437. pr_debug("%s: returning rc=%d\n", __func__, rc);
  3438. return rc;
  3439. err:
  3440. cxlflash_class_exit();
  3441. goto out;
  3442. }
  3443. /**
  3444. * exit_cxlflash() - module exit point
  3445. */
  3446. static void __exit exit_cxlflash(void)
  3447. {
  3448. cxlflash_term_global_luns();
  3449. cxlflash_free_errpage();
  3450. pci_unregister_driver(&cxlflash_driver);
  3451. cxlflash_class_exit();
  3452. }
  3453. module_init(init_cxlflash);
  3454. module_exit(exit_cxlflash);