qedf_main.c 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic FCoE Offload Driver
  4. * Copyright (c) 2016-2018 Cavium Inc.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/device.h>
  11. #include <linux/highmem.h>
  12. #include <linux/crc32.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/kthread.h>
  16. #include <linux/phylink.h>
  17. #include <scsi/libfc.h>
  18. #include <scsi/scsi_host.h>
  19. #include <scsi/fc_frame.h>
  20. #include <linux/if_ether.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/cpu.h>
  23. #include "qedf.h"
  24. #include "qedf_dbg.h"
  25. #include <uapi/linux/pci_regs.h>
  26. const struct qed_fcoe_ops *qed_ops;
  27. static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  28. static void qedf_remove(struct pci_dev *pdev);
  29. static void qedf_shutdown(struct pci_dev *pdev);
  30. static void qedf_schedule_recovery_handler(void *dev);
  31. static void qedf_recovery_handler(struct work_struct *work);
  32. static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
  33. /*
  34. * Driver module parameters.
  35. */
  36. static unsigned int qedf_dev_loss_tmo = 60;
  37. module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
  38. MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
  39. "remote ports (default 60)");
  40. uint qedf_debug = QEDF_LOG_INFO;
  41. module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
  42. MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
  43. " mask");
  44. static uint qedf_fipvlan_retries = 60;
  45. module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
  46. MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
  47. "before giving up (default 60)");
  48. static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
  49. module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
  50. MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
  51. "(default 1002).");
  52. static int qedf_default_prio = -1;
  53. module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
  54. MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
  55. " traffic (value between 0 and 7, default 3).");
  56. uint qedf_dump_frames;
  57. module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
  58. MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
  59. "(default off)");
  60. static uint qedf_queue_depth;
  61. module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
  62. MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
  63. "by the qedf driver. Default is 0 (use OS default).");
  64. uint qedf_io_tracing;
  65. module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
  66. MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
  67. "into trace buffer. (default off).");
  68. static uint qedf_max_lun = MAX_FIBRE_LUNS;
  69. module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
  70. MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
  71. "supports. (default 0xffffffff)");
  72. uint qedf_link_down_tmo;
  73. module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
  74. MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
  75. "link is down by N seconds.");
  76. bool qedf_retry_delay;
  77. module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
  78. MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
  79. "delay handling (default off).");
  80. static bool qedf_dcbx_no_wait;
  81. module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
  82. MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
  83. "sending FIP VLAN requests on link up (Default: off).");
  84. static uint qedf_dp_module;
  85. module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
  86. MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
  87. "qed module during probe.");
  88. static uint qedf_dp_level = QED_LEVEL_NOTICE;
  89. module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
  90. MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
  91. "during probe (0-3: 0 more verbose).");
  92. static bool qedf_enable_recovery = true;
  93. module_param_named(enable_recovery, qedf_enable_recovery,
  94. bool, S_IRUGO | S_IWUSR);
  95. MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
  96. "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
  97. struct workqueue_struct *qedf_io_wq;
  98. static struct fcoe_percpu_s qedf_global;
  99. static DEFINE_SPINLOCK(qedf_global_lock);
  100. static struct kmem_cache *qedf_io_work_cache;
  101. void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
  102. {
  103. int vlan_id_tmp = 0;
  104. vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
  105. qedf->vlan_id = vlan_id_tmp;
  106. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  107. "Setting vlan_id=0x%04x prio=%d.\n",
  108. vlan_id_tmp, qedf->prio);
  109. }
  110. /* Returns true if we have a valid vlan, false otherwise */
  111. static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
  112. {
  113. while (qedf->fipvlan_retries--) {
  114. /* This is to catch if link goes down during fipvlan retries */
  115. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  116. QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
  117. return false;
  118. }
  119. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  120. QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
  121. return false;
  122. }
  123. if (qedf->vlan_id > 0) {
  124. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  125. "vlan = 0x%x already set, calling ctlr_link_up.\n",
  126. qedf->vlan_id);
  127. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
  128. fcoe_ctlr_link_up(&qedf->ctlr);
  129. return true;
  130. }
  131. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  132. "Retry %d.\n", qedf->fipvlan_retries);
  133. init_completion(&qedf->fipvlan_compl);
  134. qedf_fcoe_send_vlan_req(qedf);
  135. wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
  136. }
  137. return false;
  138. }
  139. static void qedf_handle_link_update(struct work_struct *work)
  140. {
  141. struct qedf_ctx *qedf =
  142. container_of(work, struct qedf_ctx, link_update.work);
  143. int rc;
  144. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
  145. atomic_read(&qedf->link_state));
  146. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
  147. rc = qedf_initiate_fipvlan_req(qedf);
  148. if (rc)
  149. return;
  150. if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
  151. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  152. "Link is down, resetting vlan_id.\n");
  153. qedf->vlan_id = 0;
  154. return;
  155. }
  156. /*
  157. * If we get here then we never received a repsonse to our
  158. * fip vlan request so set the vlan_id to the default and
  159. * tell FCoE that the link is up
  160. */
  161. QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
  162. "response, falling back to default VLAN %d.\n",
  163. qedf_fallback_vlan);
  164. qedf_set_vlan_id(qedf, qedf_fallback_vlan);
  165. /*
  166. * Zero out data_src_addr so we'll update it with the new
  167. * lport port_id
  168. */
  169. eth_zero_addr(qedf->data_src_addr);
  170. fcoe_ctlr_link_up(&qedf->ctlr);
  171. } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  172. /*
  173. * If we hit here and link_down_tmo_valid is still 1 it means
  174. * that link_down_tmo timed out so set it to 0 to make sure any
  175. * other readers have accurate state.
  176. */
  177. atomic_set(&qedf->link_down_tmo_valid, 0);
  178. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  179. "Calling fcoe_ctlr_link_down().\n");
  180. fcoe_ctlr_link_down(&qedf->ctlr);
  181. if (qedf_wait_for_upload(qedf) == false)
  182. QEDF_ERR(&qedf->dbg_ctx,
  183. "Could not upload all sessions.\n");
  184. /* Reset the number of FIP VLAN retries */
  185. qedf->fipvlan_retries = qedf_fipvlan_retries;
  186. }
  187. }
  188. #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1
  189. #define QEDF_FCOE_MAC_METHOD_FCF_MAP 2
  190. #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3
  191. static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
  192. {
  193. u8 *granted_mac;
  194. struct fc_frame_header *fh = fc_frame_header_get(fp);
  195. u8 fc_map[3];
  196. int method = 0;
  197. /* Get granted MAC address from FIP FLOGI payload */
  198. granted_mac = fr_cb(fp)->granted_mac;
  199. /*
  200. * We set the source MAC for FCoE traffic based on the Granted MAC
  201. * address from the switch.
  202. *
  203. * If granted_mac is non-zero, we used that.
  204. * If the granted_mac is zeroed out, created the FCoE MAC based on
  205. * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
  206. * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
  207. * d_id of the FLOGI frame.
  208. */
  209. if (!is_zero_ether_addr(granted_mac)) {
  210. ether_addr_copy(qedf->data_src_addr, granted_mac);
  211. method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
  212. } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
  213. hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
  214. qedf->data_src_addr[0] = fc_map[0];
  215. qedf->data_src_addr[1] = fc_map[1];
  216. qedf->data_src_addr[2] = fc_map[2];
  217. qedf->data_src_addr[3] = fh->fh_d_id[0];
  218. qedf->data_src_addr[4] = fh->fh_d_id[1];
  219. qedf->data_src_addr[5] = fh->fh_d_id[2];
  220. method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
  221. } else {
  222. fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
  223. method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
  224. }
  225. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  226. "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
  227. }
  228. static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  229. void *arg)
  230. {
  231. struct fc_exch *exch = fc_seq_exch(seq);
  232. struct fc_lport *lport = exch->lp;
  233. struct qedf_ctx *qedf = lport_priv(lport);
  234. if (!qedf) {
  235. QEDF_ERR(NULL, "qedf is NULL.\n");
  236. return;
  237. }
  238. /*
  239. * If ERR_PTR is set then don't try to stat anything as it will cause
  240. * a crash when we access fp.
  241. */
  242. if (IS_ERR(fp)) {
  243. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  244. "fp has IS_ERR() set.\n");
  245. goto skip_stat;
  246. }
  247. /* Log stats for FLOGI reject */
  248. if (fc_frame_payload_op(fp) == ELS_LS_RJT)
  249. qedf->flogi_failed++;
  250. else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
  251. /* Set the source MAC we will use for FCoE traffic */
  252. qedf_set_data_src_addr(qedf, fp);
  253. qedf->flogi_pending = 0;
  254. }
  255. /* Complete flogi_compl so we can proceed to sending ADISCs */
  256. complete(&qedf->flogi_compl);
  257. skip_stat:
  258. /* Report response to libfc */
  259. fc_lport_flogi_resp(seq, fp, lport);
  260. }
  261. static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
  262. struct fc_frame *fp, unsigned int op,
  263. void (*resp)(struct fc_seq *,
  264. struct fc_frame *,
  265. void *),
  266. void *arg, u32 timeout)
  267. {
  268. struct qedf_ctx *qedf = lport_priv(lport);
  269. /*
  270. * Intercept FLOGI for statistic purposes. Note we use the resp
  271. * callback to tell if this is really a flogi.
  272. */
  273. if (resp == fc_lport_flogi_resp) {
  274. qedf->flogi_cnt++;
  275. qedf->flogi_pending++;
  276. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  277. QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
  278. qedf->flogi_pending = 0;
  279. }
  280. if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
  281. schedule_delayed_work(&qedf->stag_work, 2);
  282. return NULL;
  283. }
  284. return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
  285. arg, timeout);
  286. }
  287. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  288. }
  289. int qedf_send_flogi(struct qedf_ctx *qedf)
  290. {
  291. struct fc_lport *lport;
  292. struct fc_frame *fp;
  293. lport = qedf->lport;
  294. if (!lport->tt.elsct_send) {
  295. QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
  296. return -EINVAL;
  297. }
  298. fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
  299. if (!fp) {
  300. QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
  301. return -ENOMEM;
  302. }
  303. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  304. "Sending FLOGI to reestablish session with switch.\n");
  305. lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
  306. ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
  307. init_completion(&qedf->flogi_compl);
  308. return 0;
  309. }
  310. /*
  311. * This function is called if link_down_tmo is in use. If we get a link up and
  312. * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
  313. * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
  314. */
  315. static void qedf_link_recovery(struct work_struct *work)
  316. {
  317. struct qedf_ctx *qedf =
  318. container_of(work, struct qedf_ctx, link_recovery.work);
  319. struct fc_lport *lport = qedf->lport;
  320. struct fc_rport_priv *rdata;
  321. bool rc;
  322. int retries = 30;
  323. int rval, i;
  324. struct list_head rdata_login_list;
  325. INIT_LIST_HEAD(&rdata_login_list);
  326. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  327. "Link down tmo did not expire.\n");
  328. /*
  329. * Essentially reset the fcoe_ctlr here without affecting the state
  330. * of the libfc structs.
  331. */
  332. qedf->ctlr.state = FIP_ST_LINK_WAIT;
  333. fcoe_ctlr_link_down(&qedf->ctlr);
  334. /*
  335. * Bring the link up before we send the fipvlan request so libfcoe
  336. * can select a new fcf in parallel
  337. */
  338. fcoe_ctlr_link_up(&qedf->ctlr);
  339. /* Since the link when down and up to verify which vlan we're on */
  340. qedf->fipvlan_retries = qedf_fipvlan_retries;
  341. rc = qedf_initiate_fipvlan_req(qedf);
  342. /* If getting the VLAN fails, set the VLAN to the fallback one */
  343. if (!rc)
  344. qedf_set_vlan_id(qedf, qedf_fallback_vlan);
  345. /*
  346. * We need to wait for an FCF to be selected due to the
  347. * fcoe_ctlr_link_up other the FLOGI will be rejected.
  348. */
  349. while (retries > 0) {
  350. if (qedf->ctlr.sel_fcf) {
  351. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  352. "FCF reselected, proceeding with FLOGI.\n");
  353. break;
  354. }
  355. msleep(500);
  356. retries--;
  357. }
  358. if (retries < 1) {
  359. QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
  360. "FCF selection.\n");
  361. return;
  362. }
  363. rval = qedf_send_flogi(qedf);
  364. if (rval)
  365. return;
  366. /* Wait for FLOGI completion before proceeding with sending ADISCs */
  367. i = wait_for_completion_timeout(&qedf->flogi_compl,
  368. qedf->lport->r_a_tov);
  369. if (i == 0) {
  370. QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
  371. return;
  372. }
  373. /*
  374. * Call lport->tt.rport_login which will cause libfc to send an
  375. * ADISC since the rport is in state ready.
  376. */
  377. mutex_lock(&lport->disc.disc_mutex);
  378. list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
  379. if (kref_get_unless_zero(&rdata->kref)) {
  380. fc_rport_login(rdata);
  381. kref_put(&rdata->kref, fc_rport_destroy);
  382. }
  383. }
  384. mutex_unlock(&lport->disc.disc_mutex);
  385. }
  386. static void qedf_update_link_speed(struct qedf_ctx *qedf,
  387. struct qed_link_output *link)
  388. {
  389. __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
  390. struct fc_lport *lport = qedf->lport;
  391. lport->link_speed = FC_PORTSPEED_UNKNOWN;
  392. lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
  393. /* Set fc_host link speed */
  394. switch (link->speed) {
  395. case 10000:
  396. lport->link_speed = FC_PORTSPEED_10GBIT;
  397. break;
  398. case 25000:
  399. lport->link_speed = FC_PORTSPEED_25GBIT;
  400. break;
  401. case 40000:
  402. lport->link_speed = FC_PORTSPEED_40GBIT;
  403. break;
  404. case 50000:
  405. lport->link_speed = FC_PORTSPEED_50GBIT;
  406. break;
  407. case 100000:
  408. lport->link_speed = FC_PORTSPEED_100GBIT;
  409. break;
  410. case 20000:
  411. lport->link_speed = FC_PORTSPEED_20GBIT;
  412. break;
  413. default:
  414. lport->link_speed = FC_PORTSPEED_UNKNOWN;
  415. break;
  416. }
  417. /*
  418. * Set supported link speed by querying the supported
  419. * capabilities of the link.
  420. */
  421. phylink_zero(sup_caps);
  422. phylink_set(sup_caps, 10000baseT_Full);
  423. phylink_set(sup_caps, 10000baseKX4_Full);
  424. phylink_set(sup_caps, 10000baseR_FEC);
  425. phylink_set(sup_caps, 10000baseCR_Full);
  426. phylink_set(sup_caps, 10000baseSR_Full);
  427. phylink_set(sup_caps, 10000baseLR_Full);
  428. phylink_set(sup_caps, 10000baseLRM_Full);
  429. phylink_set(sup_caps, 10000baseKR_Full);
  430. if (linkmode_intersects(link->supported_caps, sup_caps))
  431. lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
  432. phylink_zero(sup_caps);
  433. phylink_set(sup_caps, 25000baseKR_Full);
  434. phylink_set(sup_caps, 25000baseCR_Full);
  435. phylink_set(sup_caps, 25000baseSR_Full);
  436. if (linkmode_intersects(link->supported_caps, sup_caps))
  437. lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
  438. phylink_zero(sup_caps);
  439. phylink_set(sup_caps, 40000baseLR4_Full);
  440. phylink_set(sup_caps, 40000baseKR4_Full);
  441. phylink_set(sup_caps, 40000baseCR4_Full);
  442. phylink_set(sup_caps, 40000baseSR4_Full);
  443. if (linkmode_intersects(link->supported_caps, sup_caps))
  444. lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
  445. phylink_zero(sup_caps);
  446. phylink_set(sup_caps, 50000baseKR2_Full);
  447. phylink_set(sup_caps, 50000baseCR2_Full);
  448. phylink_set(sup_caps, 50000baseSR2_Full);
  449. if (linkmode_intersects(link->supported_caps, sup_caps))
  450. lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
  451. phylink_zero(sup_caps);
  452. phylink_set(sup_caps, 100000baseKR4_Full);
  453. phylink_set(sup_caps, 100000baseSR4_Full);
  454. phylink_set(sup_caps, 100000baseCR4_Full);
  455. phylink_set(sup_caps, 100000baseLR4_ER4_Full);
  456. if (linkmode_intersects(link->supported_caps, sup_caps))
  457. lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
  458. phylink_zero(sup_caps);
  459. phylink_set(sup_caps, 20000baseKR2_Full);
  460. if (linkmode_intersects(link->supported_caps, sup_caps))
  461. lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
  462. if (lport->host && lport->host->shost_data)
  463. fc_host_supported_speeds(lport->host) =
  464. lport->link_supported_speeds;
  465. }
  466. static void qedf_bw_update(void *dev)
  467. {
  468. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  469. struct qed_link_output link;
  470. /* Get the latest status of the link */
  471. qed_ops->common->get_link(qedf->cdev, &link);
  472. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  473. QEDF_ERR(&qedf->dbg_ctx,
  474. "Ignore link update, driver getting unload.\n");
  475. return;
  476. }
  477. if (link.link_up) {
  478. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
  479. qedf_update_link_speed(qedf, &link);
  480. else
  481. QEDF_ERR(&qedf->dbg_ctx,
  482. "Ignore bw update, link is down.\n");
  483. } else {
  484. QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
  485. }
  486. }
  487. static void qedf_link_update(void *dev, struct qed_link_output *link)
  488. {
  489. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  490. /*
  491. * Prevent race where we're removing the module and we get link update
  492. * for qed.
  493. */
  494. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  495. QEDF_ERR(&qedf->dbg_ctx,
  496. "Ignore link update, driver getting unload.\n");
  497. return;
  498. }
  499. if (link->link_up) {
  500. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
  501. QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
  502. "Ignoring link up event as link is already up.\n");
  503. return;
  504. }
  505. QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
  506. link->speed / 1000);
  507. /* Cancel any pending link down work */
  508. cancel_delayed_work(&qedf->link_update);
  509. atomic_set(&qedf->link_state, QEDF_LINK_UP);
  510. qedf_update_link_speed(qedf, link);
  511. if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
  512. qedf_dcbx_no_wait) {
  513. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  514. "DCBx done.\n");
  515. if (atomic_read(&qedf->link_down_tmo_valid) > 0)
  516. queue_delayed_work(qedf->link_update_wq,
  517. &qedf->link_recovery, 0);
  518. else
  519. queue_delayed_work(qedf->link_update_wq,
  520. &qedf->link_update, 0);
  521. atomic_set(&qedf->link_down_tmo_valid, 0);
  522. }
  523. } else {
  524. QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
  525. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  526. atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
  527. /*
  528. * Flag that we're waiting for the link to come back up before
  529. * informing the fcoe layer of the event.
  530. */
  531. if (qedf_link_down_tmo > 0) {
  532. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  533. "Starting link down tmo.\n");
  534. atomic_set(&qedf->link_down_tmo_valid, 1);
  535. }
  536. qedf->vlan_id = 0;
  537. qedf_update_link_speed(qedf, link);
  538. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  539. qedf_link_down_tmo * HZ);
  540. }
  541. }
  542. static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
  543. {
  544. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  545. u8 tmp_prio;
  546. QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
  547. "prio=%d.\n", get->operational.valid, get->operational.enabled,
  548. get->operational.app_prio.fcoe);
  549. if (get->operational.enabled && get->operational.valid) {
  550. /* If DCBX was already negotiated on link up then just exit */
  551. if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
  552. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  553. "DCBX already set on link up.\n");
  554. return;
  555. }
  556. atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
  557. /*
  558. * Set the 8021q priority in the following manner:
  559. *
  560. * 1. If a modparam is set use that
  561. * 2. If the value is not between 0..7 use the default
  562. * 3. Use the priority we get from the DCBX app tag
  563. */
  564. tmp_prio = get->operational.app_prio.fcoe;
  565. if (qedf_default_prio > -1)
  566. qedf->prio = qedf_default_prio;
  567. else if (tmp_prio > 7) {
  568. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  569. "FIP/FCoE prio %d out of range, setting to %d.\n",
  570. tmp_prio, QEDF_DEFAULT_PRIO);
  571. qedf->prio = QEDF_DEFAULT_PRIO;
  572. } else
  573. qedf->prio = tmp_prio;
  574. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
  575. !qedf_dcbx_no_wait) {
  576. if (atomic_read(&qedf->link_down_tmo_valid) > 0)
  577. queue_delayed_work(qedf->link_update_wq,
  578. &qedf->link_recovery, 0);
  579. else
  580. queue_delayed_work(qedf->link_update_wq,
  581. &qedf->link_update, 0);
  582. atomic_set(&qedf->link_down_tmo_valid, 0);
  583. }
  584. }
  585. }
  586. static u32 qedf_get_login_failures(void *cookie)
  587. {
  588. struct qedf_ctx *qedf;
  589. qedf = (struct qedf_ctx *)cookie;
  590. return qedf->flogi_failed;
  591. }
  592. static struct qed_fcoe_cb_ops qedf_cb_ops = {
  593. {
  594. .link_update = qedf_link_update,
  595. .bw_update = qedf_bw_update,
  596. .schedule_recovery_handler = qedf_schedule_recovery_handler,
  597. .dcbx_aen = qedf_dcbx_handler,
  598. .get_generic_tlv_data = qedf_get_generic_tlv_data,
  599. .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
  600. .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
  601. }
  602. };
  603. /*
  604. * Various transport templates.
  605. */
  606. static struct scsi_transport_template *qedf_fc_transport_template;
  607. static struct scsi_transport_template *qedf_fc_vport_transport_template;
  608. /*
  609. * SCSI EH handlers
  610. */
  611. static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
  612. {
  613. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  614. struct fc_lport *lport;
  615. struct qedf_ctx *qedf;
  616. struct qedf_ioreq *io_req;
  617. struct fc_rport_libfc_priv *rp = rport->dd_data;
  618. struct fc_rport_priv *rdata;
  619. struct qedf_rport *fcport = NULL;
  620. int rc = FAILED;
  621. int wait_count = 100;
  622. int refcount = 0;
  623. int rval;
  624. int got_ref = 0;
  625. lport = shost_priv(sc_cmd->device->host);
  626. qedf = (struct qedf_ctx *)lport_priv(lport);
  627. /* rport and tgt are allocated together, so tgt should be non-NULL */
  628. fcport = (struct qedf_rport *)&rp[1];
  629. rdata = fcport->rdata;
  630. if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
  631. QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
  632. rc = SUCCESS;
  633. goto out;
  634. }
  635. io_req = qedf_priv(sc_cmd)->io_req;
  636. if (!io_req) {
  637. QEDF_ERR(&qedf->dbg_ctx,
  638. "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
  639. sc_cmd, sc_cmd->cmnd[0],
  640. rdata->ids.port_id);
  641. rc = SUCCESS;
  642. goto drop_rdata_kref;
  643. }
  644. rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
  645. if (rval)
  646. got_ref = 1;
  647. /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
  648. if (!rval || io_req->sc_cmd != sc_cmd) {
  649. QEDF_ERR(&qedf->dbg_ctx,
  650. "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
  651. io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
  652. goto drop_rdata_kref;
  653. }
  654. if (fc_remote_port_chkready(rport)) {
  655. refcount = kref_read(&io_req->refcount);
  656. QEDF_ERR(&qedf->dbg_ctx,
  657. "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
  658. io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
  659. refcount, rdata->ids.port_id);
  660. goto drop_rdata_kref;
  661. }
  662. rc = fc_block_rport(rport);
  663. if (rc)
  664. goto drop_rdata_kref;
  665. if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
  666. QEDF_ERR(&qedf->dbg_ctx,
  667. "Connection uploading, xid=0x%x., port_id=%06x\n",
  668. io_req->xid, rdata->ids.port_id);
  669. while (io_req->sc_cmd && (wait_count != 0)) {
  670. msleep(100);
  671. wait_count--;
  672. }
  673. if (wait_count) {
  674. QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
  675. rc = SUCCESS;
  676. } else {
  677. QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
  678. rc = FAILED;
  679. }
  680. goto drop_rdata_kref;
  681. }
  682. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  683. QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
  684. goto drop_rdata_kref;
  685. }
  686. QEDF_ERR(&qedf->dbg_ctx,
  687. "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
  688. io_req, sc_cmd, io_req->xid, io_req->fp_idx,
  689. rdata->ids.port_id);
  690. if (qedf->stop_io_on_error) {
  691. qedf_stop_all_io(qedf);
  692. rc = SUCCESS;
  693. goto drop_rdata_kref;
  694. }
  695. init_completion(&io_req->abts_done);
  696. rval = qedf_initiate_abts(io_req, true);
  697. if (rval) {
  698. QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
  699. /*
  700. * If we fail to queue the ABTS then return this command to
  701. * the SCSI layer as it will own and free the xid
  702. */
  703. rc = SUCCESS;
  704. qedf_scsi_done(qedf, io_req, DID_ERROR);
  705. goto drop_rdata_kref;
  706. }
  707. wait_for_completion(&io_req->abts_done);
  708. if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
  709. io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
  710. io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
  711. /*
  712. * If we get a reponse to the abort this is success from
  713. * the perspective that all references to the command have
  714. * been removed from the driver and firmware
  715. */
  716. rc = SUCCESS;
  717. } else {
  718. /* If the abort and cleanup failed then return a failure */
  719. rc = FAILED;
  720. }
  721. if (rc == SUCCESS)
  722. QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
  723. io_req->xid);
  724. else
  725. QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
  726. io_req->xid);
  727. drop_rdata_kref:
  728. kref_put(&rdata->kref, fc_rport_destroy);
  729. out:
  730. if (got_ref)
  731. kref_put(&io_req->refcount, qedf_release_cmd);
  732. return rc;
  733. }
  734. static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
  735. {
  736. struct scsi_target *starget = scsi_target(sc_cmd->device);
  737. struct fc_rport *rport = starget_to_rport(starget);
  738. QEDF_ERR(NULL, "TARGET RESET Issued...");
  739. return qedf_initiate_tmf(rport, 0, FCP_TMF_TGT_RESET);
  740. }
  741. static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
  742. {
  743. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  744. QEDF_ERR(NULL, "LUN RESET Issued...\n");
  745. return qedf_initiate_tmf(rport, sc_cmd->device->lun, FCP_TMF_LUN_RESET);
  746. }
  747. bool qedf_wait_for_upload(struct qedf_ctx *qedf)
  748. {
  749. struct qedf_rport *fcport;
  750. int wait_cnt = 120;
  751. while (wait_cnt--) {
  752. if (atomic_read(&qedf->num_offloads))
  753. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  754. "Waiting for all uploads to complete num_offloads = 0x%x.\n",
  755. atomic_read(&qedf->num_offloads));
  756. else
  757. return true;
  758. msleep(500);
  759. }
  760. rcu_read_lock();
  761. list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
  762. if (test_bit(QEDF_RPORT_SESSION_READY,
  763. &fcport->flags)) {
  764. if (fcport->rdata)
  765. QEDF_ERR(&qedf->dbg_ctx,
  766. "Waiting for fcport %p portid=%06x.\n",
  767. fcport, fcport->rdata->ids.port_id);
  768. } else {
  769. QEDF_ERR(&qedf->dbg_ctx,
  770. "Waiting for fcport %p.\n", fcport);
  771. }
  772. }
  773. rcu_read_unlock();
  774. return false;
  775. }
  776. /* Performs soft reset of qedf_ctx by simulating a link down/up */
  777. void qedf_ctx_soft_reset(struct fc_lport *lport)
  778. {
  779. struct qedf_ctx *qedf;
  780. struct qed_link_output if_link;
  781. qedf = lport_priv(lport);
  782. if (lport->vport) {
  783. clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
  784. printk_ratelimited("Cannot issue host reset on NPIV port.\n");
  785. return;
  786. }
  787. qedf->flogi_pending = 0;
  788. /* For host reset, essentially do a soft link up/down */
  789. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  790. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  791. "Queuing link down work.\n");
  792. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  793. 0);
  794. if (qedf_wait_for_upload(qedf) == false) {
  795. QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
  796. WARN_ON(atomic_read(&qedf->num_offloads));
  797. }
  798. /* Before setting link up query physical link state */
  799. qed_ops->common->get_link(qedf->cdev, &if_link);
  800. /* Bail if the physical link is not up */
  801. if (!if_link.link_up) {
  802. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  803. "Physical link is not up.\n");
  804. clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
  805. return;
  806. }
  807. /* Flush and wait to make sure link down is processed */
  808. flush_delayed_work(&qedf->link_update);
  809. msleep(500);
  810. atomic_set(&qedf->link_state, QEDF_LINK_UP);
  811. qedf->vlan_id = 0;
  812. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  813. "Queue link up work.\n");
  814. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  815. 0);
  816. clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
  817. }
  818. /* Reset the host by gracefully logging out and then logging back in */
  819. static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
  820. {
  821. struct fc_lport *lport;
  822. struct qedf_ctx *qedf;
  823. lport = shost_priv(sc_cmd->device->host);
  824. qedf = lport_priv(lport);
  825. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
  826. test_bit(QEDF_UNLOADING, &qedf->flags))
  827. return FAILED;
  828. QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
  829. qedf_ctx_soft_reset(lport);
  830. return SUCCESS;
  831. }
  832. static int qedf_slave_configure(struct scsi_device *sdev)
  833. {
  834. if (qedf_queue_depth) {
  835. scsi_change_queue_depth(sdev, qedf_queue_depth);
  836. }
  837. return 0;
  838. }
  839. static const struct scsi_host_template qedf_host_template = {
  840. .module = THIS_MODULE,
  841. .name = QEDF_MODULE_NAME,
  842. .this_id = -1,
  843. .cmd_per_lun = 32,
  844. .max_sectors = 0xffff,
  845. .queuecommand = qedf_queuecommand,
  846. .shost_groups = qedf_host_groups,
  847. .eh_abort_handler = qedf_eh_abort,
  848. .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
  849. .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
  850. .eh_host_reset_handler = qedf_eh_host_reset,
  851. .slave_configure = qedf_slave_configure,
  852. .dma_boundary = QED_HW_DMA_BOUNDARY,
  853. .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
  854. .can_queue = FCOE_PARAMS_NUM_TASKS,
  855. .change_queue_depth = scsi_change_queue_depth,
  856. .cmd_size = sizeof(struct qedf_cmd_priv),
  857. };
  858. static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  859. {
  860. int rc;
  861. spin_lock(&qedf_global_lock);
  862. rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
  863. spin_unlock(&qedf_global_lock);
  864. return rc;
  865. }
  866. static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
  867. {
  868. struct qedf_rport *fcport;
  869. struct fc_rport_priv *rdata;
  870. rcu_read_lock();
  871. list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
  872. rdata = fcport->rdata;
  873. if (rdata == NULL)
  874. continue;
  875. if (rdata->ids.port_id == port_id) {
  876. rcu_read_unlock();
  877. return fcport;
  878. }
  879. }
  880. rcu_read_unlock();
  881. /* Return NULL to caller to let them know fcport was not found */
  882. return NULL;
  883. }
  884. /* Transmits an ELS frame over an offloaded session */
  885. static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
  886. {
  887. struct fc_frame_header *fh;
  888. int rc = 0;
  889. fh = fc_frame_header_get(fp);
  890. if ((fh->fh_type == FC_TYPE_ELS) &&
  891. (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  892. switch (fc_frame_payload_op(fp)) {
  893. case ELS_ADISC:
  894. qedf_send_adisc(fcport, fp);
  895. rc = 1;
  896. break;
  897. }
  898. }
  899. return rc;
  900. }
  901. /*
  902. * qedf_xmit - qedf FCoE frame transmit function
  903. */
  904. static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
  905. {
  906. struct fc_lport *base_lport;
  907. struct qedf_ctx *qedf;
  908. struct ethhdr *eh;
  909. struct fcoe_crc_eof *cp;
  910. struct sk_buff *skb;
  911. struct fc_frame_header *fh;
  912. struct fcoe_hdr *hp;
  913. u8 sof, eof;
  914. u32 crc;
  915. unsigned int hlen, tlen, elen;
  916. int wlen;
  917. struct fc_lport *tmp_lport;
  918. struct fc_lport *vn_port = NULL;
  919. struct qedf_rport *fcport;
  920. int rc;
  921. u16 vlan_tci = 0;
  922. qedf = (struct qedf_ctx *)lport_priv(lport);
  923. fh = fc_frame_header_get(fp);
  924. skb = fp_skb(fp);
  925. /* Filter out traffic to other NPIV ports on the same host */
  926. if (lport->vport)
  927. base_lport = shost_priv(vport_to_shost(lport->vport));
  928. else
  929. base_lport = lport;
  930. /* Flag if the destination is the base port */
  931. if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
  932. vn_port = base_lport;
  933. } else {
  934. /* Got through the list of vports attached to the base_lport
  935. * and see if we have a match with the destination address.
  936. */
  937. list_for_each_entry(tmp_lport, &base_lport->vports, list) {
  938. if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
  939. vn_port = tmp_lport;
  940. break;
  941. }
  942. }
  943. }
  944. if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
  945. struct fc_rport_priv *rdata = NULL;
  946. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  947. "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
  948. kfree_skb(skb);
  949. rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
  950. if (rdata) {
  951. rdata->retries = lport->max_rport_retry_count;
  952. kref_put(&rdata->kref, fc_rport_destroy);
  953. }
  954. return -EINVAL;
  955. }
  956. /* End NPIV filtering */
  957. if (!qedf->ctlr.sel_fcf) {
  958. kfree_skb(skb);
  959. return 0;
  960. }
  961. if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
  962. QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
  963. kfree_skb(skb);
  964. return 0;
  965. }
  966. if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
  967. QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
  968. kfree_skb(skb);
  969. return 0;
  970. }
  971. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  972. if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
  973. return 0;
  974. }
  975. /* Check to see if this needs to be sent on an offloaded session */
  976. fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
  977. if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  978. rc = qedf_xmit_l2_frame(fcport, fp);
  979. /*
  980. * If the frame was successfully sent over the middle path
  981. * then do not try to also send it over the LL2 path
  982. */
  983. if (rc)
  984. return 0;
  985. }
  986. sof = fr_sof(fp);
  987. eof = fr_eof(fp);
  988. elen = sizeof(struct ethhdr);
  989. hlen = sizeof(struct fcoe_hdr);
  990. tlen = sizeof(struct fcoe_crc_eof);
  991. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  992. skb->ip_summed = CHECKSUM_NONE;
  993. crc = fcoe_fc_crc(fp);
  994. /* copy port crc and eof to the skb buff */
  995. if (skb_is_nonlinear(skb)) {
  996. skb_frag_t *frag;
  997. if (qedf_get_paged_crc_eof(skb, tlen)) {
  998. kfree_skb(skb);
  999. return -ENOMEM;
  1000. }
  1001. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  1002. cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
  1003. } else {
  1004. cp = skb_put(skb, tlen);
  1005. }
  1006. memset(cp, 0, sizeof(*cp));
  1007. cp->fcoe_eof = eof;
  1008. cp->fcoe_crc32 = cpu_to_le32(~crc);
  1009. if (skb_is_nonlinear(skb)) {
  1010. kunmap_atomic(cp);
  1011. cp = NULL;
  1012. }
  1013. /* adjust skb network/transport offsets to match mac/fcoe/port */
  1014. skb_push(skb, elen + hlen);
  1015. skb_reset_mac_header(skb);
  1016. skb_reset_network_header(skb);
  1017. skb->mac_len = elen;
  1018. skb->protocol = htons(ETH_P_FCOE);
  1019. /*
  1020. * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
  1021. * for FIP/FCoE traffic.
  1022. */
  1023. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
  1024. /* fill up mac and fcoe headers */
  1025. eh = eth_hdr(skb);
  1026. eh->h_proto = htons(ETH_P_FCOE);
  1027. if (qedf->ctlr.map_dest)
  1028. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  1029. else
  1030. /* insert GW address */
  1031. ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
  1032. /* Set the source MAC address */
  1033. ether_addr_copy(eh->h_source, qedf->data_src_addr);
  1034. hp = (struct fcoe_hdr *)(eh + 1);
  1035. memset(hp, 0, sizeof(*hp));
  1036. if (FC_FCOE_VER)
  1037. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  1038. hp->fcoe_sof = sof;
  1039. /*update tx stats */
  1040. this_cpu_inc(lport->stats->TxFrames);
  1041. this_cpu_add(lport->stats->TxWords, wlen);
  1042. /* Get VLAN ID from skb for printing purposes */
  1043. __vlan_hwaccel_get_tag(skb, &vlan_tci);
  1044. /* send down to lld */
  1045. fr_dev(fp) = lport;
  1046. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
  1047. "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
  1048. ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
  1049. vlan_tci);
  1050. if (qedf_dump_frames)
  1051. print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
  1052. 1, skb->data, skb->len, false);
  1053. rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
  1054. if (rc) {
  1055. QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
  1056. kfree_skb(skb);
  1057. return rc;
  1058. }
  1059. return 0;
  1060. }
  1061. static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
  1062. {
  1063. int rval = 0;
  1064. u32 *pbl;
  1065. dma_addr_t page;
  1066. int num_pages;
  1067. /* Calculate appropriate queue and PBL sizes */
  1068. fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
  1069. fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
  1070. fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
  1071. sizeof(void *);
  1072. fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
  1073. fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
  1074. &fcport->sq_dma, GFP_KERNEL);
  1075. if (!fcport->sq) {
  1076. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
  1077. rval = 1;
  1078. goto out;
  1079. }
  1080. fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
  1081. fcport->sq_pbl_size,
  1082. &fcport->sq_pbl_dma, GFP_KERNEL);
  1083. if (!fcport->sq_pbl) {
  1084. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
  1085. rval = 1;
  1086. goto out_free_sq;
  1087. }
  1088. /* Create PBL */
  1089. num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
  1090. page = fcport->sq_dma;
  1091. pbl = (u32 *)fcport->sq_pbl;
  1092. while (num_pages--) {
  1093. *pbl = U64_LO(page);
  1094. pbl++;
  1095. *pbl = U64_HI(page);
  1096. pbl++;
  1097. page += QEDF_PAGE_SIZE;
  1098. }
  1099. return rval;
  1100. out_free_sq:
  1101. dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
  1102. fcport->sq_dma);
  1103. out:
  1104. return rval;
  1105. }
  1106. static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
  1107. {
  1108. if (fcport->sq_pbl)
  1109. dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
  1110. fcport->sq_pbl, fcport->sq_pbl_dma);
  1111. if (fcport->sq)
  1112. dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
  1113. fcport->sq, fcport->sq_dma);
  1114. }
  1115. static int qedf_offload_connection(struct qedf_ctx *qedf,
  1116. struct qedf_rport *fcport)
  1117. {
  1118. struct qed_fcoe_params_offload conn_info;
  1119. u32 port_id;
  1120. int rval;
  1121. uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
  1122. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
  1123. "portid=%06x.\n", fcport->rdata->ids.port_id);
  1124. rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
  1125. &fcport->fw_cid, &fcport->p_doorbell);
  1126. if (rval) {
  1127. QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
  1128. "for portid=%06x.\n", fcport->rdata->ids.port_id);
  1129. rval = 1; /* For some reason qed returns 0 on failure here */
  1130. goto out;
  1131. }
  1132. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
  1133. "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
  1134. fcport->fw_cid, fcport->handle);
  1135. memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
  1136. /* Fill in the offload connection info */
  1137. conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
  1138. conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
  1139. conn_info.sq_next_page_addr =
  1140. (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
  1141. /* Need to use our FCoE MAC for the offload session */
  1142. ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
  1143. ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
  1144. conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
  1145. conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
  1146. conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
  1147. conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
  1148. /* Set VLAN data */
  1149. conn_info.vlan_tag = qedf->vlan_id <<
  1150. FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
  1151. conn_info.vlan_tag |=
  1152. qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
  1153. conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
  1154. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
  1155. /* Set host port source id */
  1156. port_id = fc_host_port_id(qedf->lport->host);
  1157. fcport->sid = port_id;
  1158. conn_info.s_id.addr_hi = (port_id & 0x000000FF);
  1159. conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
  1160. conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
  1161. conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
  1162. /* Set remote port destination id */
  1163. port_id = fcport->rdata->rport->port_id;
  1164. conn_info.d_id.addr_hi = (port_id & 0x000000FF);
  1165. conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
  1166. conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
  1167. conn_info.def_q_idx = 0; /* Default index for send queue? */
  1168. /* Set FC-TAPE specific flags if needed */
  1169. if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
  1170. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
  1171. "Enable CONF, REC for portid=%06x.\n",
  1172. fcport->rdata->ids.port_id);
  1173. conn_info.flags |= 1 <<
  1174. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
  1175. conn_info.flags |=
  1176. ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
  1177. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
  1178. }
  1179. rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
  1180. if (rval) {
  1181. QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
  1182. "for portid=%06x.\n", fcport->rdata->ids.port_id);
  1183. goto out_free_conn;
  1184. } else
  1185. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
  1186. "succeeded portid=%06x total_sqe=%d.\n",
  1187. fcport->rdata->ids.port_id, total_sqe);
  1188. spin_lock_init(&fcport->rport_lock);
  1189. atomic_set(&fcport->free_sqes, total_sqe);
  1190. return 0;
  1191. out_free_conn:
  1192. qed_ops->release_conn(qedf->cdev, fcport->handle);
  1193. out:
  1194. return rval;
  1195. }
  1196. #define QEDF_TERM_BUFF_SIZE 10
  1197. static void qedf_upload_connection(struct qedf_ctx *qedf,
  1198. struct qedf_rport *fcport)
  1199. {
  1200. void *term_params;
  1201. dma_addr_t term_params_dma;
  1202. /* Term params needs to be a DMA coherent buffer as qed shared the
  1203. * physical DMA address with the firmware. The buffer may be used in
  1204. * the receive path so we may eventually have to move this.
  1205. */
  1206. term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
  1207. &term_params_dma, GFP_KERNEL);
  1208. if (!term_params)
  1209. return;
  1210. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
  1211. "port_id=%06x.\n", fcport->rdata->ids.port_id);
  1212. qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
  1213. qed_ops->release_conn(qedf->cdev, fcport->handle);
  1214. dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
  1215. term_params_dma);
  1216. }
  1217. static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
  1218. struct qedf_rport *fcport)
  1219. {
  1220. struct fc_rport_priv *rdata = fcport->rdata;
  1221. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
  1222. fcport->rdata->ids.port_id);
  1223. /* Flush any remaining i/o's before we upload the connection */
  1224. qedf_flush_active_ios(fcport, -1);
  1225. if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
  1226. qedf_upload_connection(qedf, fcport);
  1227. qedf_free_sq(qedf, fcport);
  1228. fcport->rdata = NULL;
  1229. fcport->qedf = NULL;
  1230. kref_put(&rdata->kref, fc_rport_destroy);
  1231. }
  1232. /*
  1233. * This event_callback is called after successful completion of libfc
  1234. * initiated target login. qedf can proceed with initiating the session
  1235. * establishment.
  1236. */
  1237. static void qedf_rport_event_handler(struct fc_lport *lport,
  1238. struct fc_rport_priv *rdata,
  1239. enum fc_rport_event event)
  1240. {
  1241. struct qedf_ctx *qedf = lport_priv(lport);
  1242. struct fc_rport *rport = rdata->rport;
  1243. struct fc_rport_libfc_priv *rp;
  1244. struct qedf_rport *fcport;
  1245. u32 port_id;
  1246. int rval;
  1247. unsigned long flags;
  1248. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
  1249. "port_id = 0x%x\n", event, rdata->ids.port_id);
  1250. switch (event) {
  1251. case RPORT_EV_READY:
  1252. if (!rport) {
  1253. QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
  1254. break;
  1255. }
  1256. rp = rport->dd_data;
  1257. fcport = (struct qedf_rport *)&rp[1];
  1258. fcport->qedf = qedf;
  1259. if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
  1260. QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
  1261. "portid=0x%x as max number of offloaded sessions "
  1262. "reached.\n", rdata->ids.port_id);
  1263. return;
  1264. }
  1265. /*
  1266. * Don't try to offload the session again. Can happen when we
  1267. * get an ADISC
  1268. */
  1269. if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  1270. QEDF_WARN(&(qedf->dbg_ctx), "Session already "
  1271. "offloaded, portid=0x%x.\n",
  1272. rdata->ids.port_id);
  1273. return;
  1274. }
  1275. if (rport->port_id == FC_FID_DIR_SERV) {
  1276. /*
  1277. * qedf_rport structure doesn't exist for
  1278. * directory server.
  1279. * We should not come here, as lport will
  1280. * take care of fabric login
  1281. */
  1282. QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
  1283. "exist for dir server port_id=%x\n",
  1284. rdata->ids.port_id);
  1285. break;
  1286. }
  1287. if (rdata->spp_type != FC_TYPE_FCP) {
  1288. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1289. "Not offloading since spp type isn't FCP\n");
  1290. break;
  1291. }
  1292. if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
  1293. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1294. "Not FCP target so not offloading\n");
  1295. break;
  1296. }
  1297. /* Initial reference held on entry, so this can't fail */
  1298. kref_get(&rdata->kref);
  1299. fcport->rdata = rdata;
  1300. fcport->rport = rport;
  1301. rval = qedf_alloc_sq(qedf, fcport);
  1302. if (rval) {
  1303. qedf_cleanup_fcport(qedf, fcport);
  1304. break;
  1305. }
  1306. /* Set device type */
  1307. if (rdata->flags & FC_RP_FLAGS_RETRY &&
  1308. rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
  1309. !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
  1310. fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
  1311. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1312. "portid=%06x is a TAPE device.\n",
  1313. rdata->ids.port_id);
  1314. } else {
  1315. fcport->dev_type = QEDF_RPORT_TYPE_DISK;
  1316. }
  1317. rval = qedf_offload_connection(qedf, fcport);
  1318. if (rval) {
  1319. qedf_cleanup_fcport(qedf, fcport);
  1320. break;
  1321. }
  1322. /* Add fcport to list of qedf_ctx list of offloaded ports */
  1323. spin_lock_irqsave(&qedf->hba_lock, flags);
  1324. list_add_rcu(&fcport->peers, &qedf->fcports);
  1325. spin_unlock_irqrestore(&qedf->hba_lock, flags);
  1326. /*
  1327. * Set the session ready bit to let everyone know that this
  1328. * connection is ready for I/O
  1329. */
  1330. set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
  1331. atomic_inc(&qedf->num_offloads);
  1332. break;
  1333. case RPORT_EV_LOGO:
  1334. case RPORT_EV_FAILED:
  1335. case RPORT_EV_STOP:
  1336. port_id = rdata->ids.port_id;
  1337. if (port_id == FC_FID_DIR_SERV)
  1338. break;
  1339. if (rdata->spp_type != FC_TYPE_FCP) {
  1340. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1341. "No action since spp type isn't FCP\n");
  1342. break;
  1343. }
  1344. if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
  1345. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1346. "Not FCP target so no action\n");
  1347. break;
  1348. }
  1349. if (!rport) {
  1350. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1351. "port_id=%x - rport notcreated Yet!!\n", port_id);
  1352. break;
  1353. }
  1354. rp = rport->dd_data;
  1355. /*
  1356. * Perform session upload. Note that rdata->peers is already
  1357. * removed from disc->rports list before we get this event.
  1358. */
  1359. fcport = (struct qedf_rport *)&rp[1];
  1360. spin_lock_irqsave(&fcport->rport_lock, flags);
  1361. /* Only free this fcport if it is offloaded already */
  1362. if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
  1363. !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1364. &fcport->flags)) {
  1365. set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1366. &fcport->flags);
  1367. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  1368. qedf_cleanup_fcport(qedf, fcport);
  1369. /*
  1370. * Remove fcport to list of qedf_ctx list of offloaded
  1371. * ports
  1372. */
  1373. spin_lock_irqsave(&qedf->hba_lock, flags);
  1374. list_del_rcu(&fcport->peers);
  1375. spin_unlock_irqrestore(&qedf->hba_lock, flags);
  1376. clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1377. &fcport->flags);
  1378. atomic_dec(&qedf->num_offloads);
  1379. } else {
  1380. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  1381. }
  1382. break;
  1383. case RPORT_EV_NONE:
  1384. break;
  1385. }
  1386. }
  1387. static void qedf_abort_io(struct fc_lport *lport)
  1388. {
  1389. /* NO-OP but need to fill in the template */
  1390. }
  1391. static void qedf_fcp_cleanup(struct fc_lport *lport)
  1392. {
  1393. /*
  1394. * NO-OP but need to fill in template to prevent a NULL
  1395. * function pointer dereference during link down. I/Os
  1396. * will be flushed when port is uploaded.
  1397. */
  1398. }
  1399. static struct libfc_function_template qedf_lport_template = {
  1400. .frame_send = qedf_xmit,
  1401. .fcp_abort_io = qedf_abort_io,
  1402. .fcp_cleanup = qedf_fcp_cleanup,
  1403. .rport_event_callback = qedf_rport_event_handler,
  1404. .elsct_send = qedf_elsct_send,
  1405. };
  1406. static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
  1407. {
  1408. fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
  1409. qedf->ctlr.send = qedf_fip_send;
  1410. qedf->ctlr.get_src_addr = qedf_get_src_mac;
  1411. ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
  1412. }
  1413. static void qedf_setup_fdmi(struct qedf_ctx *qedf)
  1414. {
  1415. struct fc_lport *lport = qedf->lport;
  1416. u8 buf[8];
  1417. int pos;
  1418. uint32_t i;
  1419. /*
  1420. * fdmi_enabled needs to be set for libfc
  1421. * to execute FDMI registration
  1422. */
  1423. lport->fdmi_enabled = 1;
  1424. /*
  1425. * Setup the necessary fc_host attributes to that will be used to fill
  1426. * in the FDMI information.
  1427. */
  1428. /* Get the PCI-e Device Serial Number Capability */
  1429. pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
  1430. if (pos) {
  1431. pos += 4;
  1432. for (i = 0; i < 8; i++)
  1433. pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
  1434. snprintf(fc_host_serial_number(lport->host),
  1435. FC_SERIAL_NUMBER_SIZE,
  1436. "%02X%02X%02X%02X%02X%02X%02X%02X",
  1437. buf[7], buf[6], buf[5], buf[4],
  1438. buf[3], buf[2], buf[1], buf[0]);
  1439. } else
  1440. snprintf(fc_host_serial_number(lport->host),
  1441. FC_SERIAL_NUMBER_SIZE, "Unknown");
  1442. snprintf(fc_host_manufacturer(lport->host),
  1443. FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
  1444. if (qedf->pdev->device == QL45xxx) {
  1445. snprintf(fc_host_model(lport->host),
  1446. FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
  1447. snprintf(fc_host_model_description(lport->host),
  1448. FC_SYMBOLIC_NAME_SIZE, "%s",
  1449. "Marvell FastLinQ QL45xxx FCoE Adapter");
  1450. }
  1451. if (qedf->pdev->device == QL41xxx) {
  1452. snprintf(fc_host_model(lport->host),
  1453. FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
  1454. snprintf(fc_host_model_description(lport->host),
  1455. FC_SYMBOLIC_NAME_SIZE, "%s",
  1456. "Marvell FastLinQ QL41xxx FCoE Adapter");
  1457. }
  1458. snprintf(fc_host_hardware_version(lport->host),
  1459. FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
  1460. snprintf(fc_host_driver_version(lport->host),
  1461. FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
  1462. snprintf(fc_host_firmware_version(lport->host),
  1463. FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
  1464. FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
  1465. FW_ENGINEERING_VERSION);
  1466. snprintf(fc_host_vendor_identifier(lport->host),
  1467. FC_VENDOR_IDENTIFIER, "%s", "Marvell");
  1468. }
  1469. static int qedf_lport_setup(struct qedf_ctx *qedf)
  1470. {
  1471. struct fc_lport *lport = qedf->lport;
  1472. lport->link_up = 0;
  1473. lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
  1474. lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
  1475. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  1476. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  1477. lport->boot_time = jiffies;
  1478. lport->e_d_tov = 2 * 1000;
  1479. lport->r_a_tov = 10 * 1000;
  1480. /* Set NPIV support */
  1481. lport->does_npiv = 1;
  1482. fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
  1483. fc_set_wwnn(lport, qedf->wwnn);
  1484. fc_set_wwpn(lport, qedf->wwpn);
  1485. if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
  1486. QEDF_ERR(&qedf->dbg_ctx,
  1487. "fcoe_libfc_config failed.\n");
  1488. return -ENOMEM;
  1489. }
  1490. /* Allocate the exchange manager */
  1491. fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
  1492. 0xfffe, NULL);
  1493. if (fc_lport_init_stats(lport))
  1494. return -ENOMEM;
  1495. /* Finish lport config */
  1496. fc_lport_config(lport);
  1497. /* Set max frame size */
  1498. fc_set_mfs(lport, QEDF_MFS);
  1499. fc_host_maxframe_size(lport->host) = lport->mfs;
  1500. /* Set default dev_loss_tmo based on module parameter */
  1501. fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
  1502. /* Set symbolic node name */
  1503. if (qedf->pdev->device == QL45xxx)
  1504. snprintf(fc_host_symbolic_name(lport->host), 256,
  1505. "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
  1506. if (qedf->pdev->device == QL41xxx)
  1507. snprintf(fc_host_symbolic_name(lport->host), 256,
  1508. "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
  1509. qedf_setup_fdmi(qedf);
  1510. return 0;
  1511. }
  1512. /*
  1513. * NPIV functions
  1514. */
  1515. static int qedf_vport_libfc_config(struct fc_vport *vport,
  1516. struct fc_lport *lport)
  1517. {
  1518. lport->link_up = 0;
  1519. lport->qfull = 0;
  1520. lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
  1521. lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
  1522. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  1523. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  1524. lport->boot_time = jiffies;
  1525. lport->e_d_tov = 2 * 1000;
  1526. lport->r_a_tov = 10 * 1000;
  1527. lport->does_npiv = 1; /* Temporary until we add NPIV support */
  1528. /* Allocate stats for vport */
  1529. if (fc_lport_init_stats(lport))
  1530. return -ENOMEM;
  1531. /* Finish lport config */
  1532. fc_lport_config(lport);
  1533. /* offload related configuration */
  1534. lport->crc_offload = 0;
  1535. lport->seq_offload = 0;
  1536. lport->lro_enabled = 0;
  1537. lport->lro_xid = 0;
  1538. lport->lso_max = 0;
  1539. return 0;
  1540. }
  1541. static int qedf_vport_create(struct fc_vport *vport, bool disabled)
  1542. {
  1543. struct Scsi_Host *shost = vport_to_shost(vport);
  1544. struct fc_lport *n_port = shost_priv(shost);
  1545. struct fc_lport *vn_port;
  1546. struct qedf_ctx *base_qedf = lport_priv(n_port);
  1547. struct qedf_ctx *vport_qedf;
  1548. char buf[32];
  1549. int rc = 0;
  1550. rc = fcoe_validate_vport_create(vport);
  1551. if (rc) {
  1552. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  1553. QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
  1554. "WWPN (0x%s) already exists.\n", buf);
  1555. return rc;
  1556. }
  1557. if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
  1558. QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
  1559. "because link is not up.\n");
  1560. return -EIO;
  1561. }
  1562. vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
  1563. if (!vn_port) {
  1564. QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
  1565. "for vport.\n");
  1566. return -ENOMEM;
  1567. }
  1568. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  1569. QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
  1570. buf);
  1571. /* Copy some fields from base_qedf */
  1572. vport_qedf = lport_priv(vn_port);
  1573. memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
  1574. /* Set qedf data specific to this vport */
  1575. vport_qedf->lport = vn_port;
  1576. /* Use same hba_lock as base_qedf */
  1577. vport_qedf->hba_lock = base_qedf->hba_lock;
  1578. vport_qedf->pdev = base_qedf->pdev;
  1579. vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
  1580. init_completion(&vport_qedf->flogi_compl);
  1581. INIT_LIST_HEAD(&vport_qedf->fcports);
  1582. INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
  1583. rc = qedf_vport_libfc_config(vport, vn_port);
  1584. if (rc) {
  1585. QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
  1586. "for lport stats.\n");
  1587. goto err;
  1588. }
  1589. fc_set_wwnn(vn_port, vport->node_name);
  1590. fc_set_wwpn(vn_port, vport->port_name);
  1591. vport_qedf->wwnn = vn_port->wwnn;
  1592. vport_qedf->wwpn = vn_port->wwpn;
  1593. vn_port->host->transportt = qedf_fc_vport_transport_template;
  1594. vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
  1595. vn_port->host->max_lun = qedf_max_lun;
  1596. vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
  1597. vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
  1598. vn_port->host->max_id = QEDF_MAX_SESSIONS;
  1599. rc = scsi_add_host(vn_port->host, &vport->dev);
  1600. if (rc) {
  1601. QEDF_WARN(&base_qedf->dbg_ctx,
  1602. "Error adding Scsi_Host rc=0x%x.\n", rc);
  1603. goto err;
  1604. }
  1605. /* Set default dev_loss_tmo based on module parameter */
  1606. fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
  1607. /* Init libfc stuffs */
  1608. memcpy(&vn_port->tt, &qedf_lport_template,
  1609. sizeof(qedf_lport_template));
  1610. fc_exch_init(vn_port);
  1611. fc_elsct_init(vn_port);
  1612. fc_lport_init(vn_port);
  1613. fc_disc_init(vn_port);
  1614. fc_disc_config(vn_port, vn_port);
  1615. /* Allocate the exchange manager */
  1616. shost = vport_to_shost(vport);
  1617. n_port = shost_priv(shost);
  1618. fc_exch_mgr_list_clone(n_port, vn_port);
  1619. /* Set max frame size */
  1620. fc_set_mfs(vn_port, QEDF_MFS);
  1621. fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
  1622. if (disabled) {
  1623. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1624. } else {
  1625. vn_port->boot_time = jiffies;
  1626. fc_fabric_login(vn_port);
  1627. fc_vport_setlink(vn_port);
  1628. }
  1629. /* Set symbolic node name */
  1630. if (base_qedf->pdev->device == QL45xxx)
  1631. snprintf(fc_host_symbolic_name(vn_port->host), 256,
  1632. "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
  1633. if (base_qedf->pdev->device == QL41xxx)
  1634. snprintf(fc_host_symbolic_name(vn_port->host), 256,
  1635. "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
  1636. /* Set supported speed */
  1637. fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
  1638. /* Set speed */
  1639. vn_port->link_speed = n_port->link_speed;
  1640. /* Set port type */
  1641. fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
  1642. /* Set maxframe size */
  1643. fc_host_maxframe_size(vn_port->host) = n_port->mfs;
  1644. QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
  1645. vn_port);
  1646. /* Set up debug context for vport */
  1647. vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
  1648. vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
  1649. return 0;
  1650. err:
  1651. scsi_host_put(vn_port->host);
  1652. return rc;
  1653. }
  1654. static int qedf_vport_destroy(struct fc_vport *vport)
  1655. {
  1656. struct Scsi_Host *shost = vport_to_shost(vport);
  1657. struct fc_lport *n_port = shost_priv(shost);
  1658. struct fc_lport *vn_port = vport->dd_data;
  1659. struct qedf_ctx *qedf = lport_priv(vn_port);
  1660. if (!qedf) {
  1661. QEDF_ERR(NULL, "qedf is NULL.\n");
  1662. goto out;
  1663. }
  1664. /* Set unloading bit on vport qedf_ctx to prevent more I/O */
  1665. set_bit(QEDF_UNLOADING, &qedf->flags);
  1666. mutex_lock(&n_port->lp_mutex);
  1667. list_del(&vn_port->list);
  1668. mutex_unlock(&n_port->lp_mutex);
  1669. fc_fabric_logoff(vn_port);
  1670. fc_lport_destroy(vn_port);
  1671. /* Detach from scsi-ml */
  1672. fc_remove_host(vn_port->host);
  1673. scsi_remove_host(vn_port->host);
  1674. /*
  1675. * Only try to release the exchange manager if the vn_port
  1676. * configuration is complete.
  1677. */
  1678. if (vn_port->state == LPORT_ST_READY)
  1679. fc_exch_mgr_free(vn_port);
  1680. /* Free memory used by statistical counters */
  1681. fc_lport_free_stats(vn_port);
  1682. /* Release Scsi_Host */
  1683. scsi_host_put(vn_port->host);
  1684. out:
  1685. return 0;
  1686. }
  1687. static int qedf_vport_disable(struct fc_vport *vport, bool disable)
  1688. {
  1689. struct fc_lport *lport = vport->dd_data;
  1690. if (disable) {
  1691. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1692. fc_fabric_logoff(lport);
  1693. } else {
  1694. lport->boot_time = jiffies;
  1695. fc_fabric_login(lport);
  1696. fc_vport_setlink(lport);
  1697. }
  1698. return 0;
  1699. }
  1700. /*
  1701. * During removal we need to wait for all the vports associated with a port
  1702. * to be destroyed so we avoid a race condition where libfc is still trying
  1703. * to reap vports while the driver remove function has already reaped the
  1704. * driver contexts associated with the physical port.
  1705. */
  1706. static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
  1707. {
  1708. struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
  1709. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
  1710. "Entered.\n");
  1711. while (fc_host->npiv_vports_inuse > 0) {
  1712. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
  1713. "Waiting for all vports to be reaped.\n");
  1714. msleep(1000);
  1715. }
  1716. }
  1717. /**
  1718. * qedf_fcoe_reset - Resets the fcoe
  1719. *
  1720. * @shost: shost the reset is from
  1721. *
  1722. * Returns: always 0
  1723. */
  1724. static int qedf_fcoe_reset(struct Scsi_Host *shost)
  1725. {
  1726. struct fc_lport *lport = shost_priv(shost);
  1727. qedf_ctx_soft_reset(lport);
  1728. return 0;
  1729. }
  1730. static void qedf_get_host_port_id(struct Scsi_Host *shost)
  1731. {
  1732. struct fc_lport *lport = shost_priv(shost);
  1733. fc_host_port_id(shost) = lport->port_id;
  1734. }
  1735. static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
  1736. *shost)
  1737. {
  1738. struct fc_host_statistics *qedf_stats;
  1739. struct fc_lport *lport = shost_priv(shost);
  1740. struct qedf_ctx *qedf = lport_priv(lport);
  1741. struct qed_fcoe_stats *fw_fcoe_stats;
  1742. qedf_stats = fc_get_host_stats(shost);
  1743. /* We don't collect offload stats for specific NPIV ports */
  1744. if (lport->vport)
  1745. goto out;
  1746. fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
  1747. if (!fw_fcoe_stats) {
  1748. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
  1749. "fw_fcoe_stats.\n");
  1750. goto out;
  1751. }
  1752. mutex_lock(&qedf->stats_mutex);
  1753. /* Query firmware for offload stats */
  1754. qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
  1755. /*
  1756. * The expectation is that we add our offload stats to the stats
  1757. * being maintained by libfc each time the fc_get_host_status callback
  1758. * is invoked. The additions are not carried over for each call to
  1759. * the fc_get_host_stats callback.
  1760. */
  1761. qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
  1762. fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
  1763. fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
  1764. qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
  1765. fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
  1766. fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
  1767. qedf_stats->fcp_input_megabytes +=
  1768. do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
  1769. qedf_stats->fcp_output_megabytes +=
  1770. do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
  1771. qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
  1772. qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
  1773. qedf_stats->invalid_crc_count +=
  1774. fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
  1775. qedf_stats->dumped_frames =
  1776. fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
  1777. qedf_stats->error_frames +=
  1778. fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
  1779. qedf_stats->fcp_input_requests += qedf->input_requests;
  1780. qedf_stats->fcp_output_requests += qedf->output_requests;
  1781. qedf_stats->fcp_control_requests += qedf->control_requests;
  1782. qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
  1783. qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
  1784. mutex_unlock(&qedf->stats_mutex);
  1785. kfree(fw_fcoe_stats);
  1786. out:
  1787. return qedf_stats;
  1788. }
  1789. static struct fc_function_template qedf_fc_transport_fn = {
  1790. .show_host_node_name = 1,
  1791. .show_host_port_name = 1,
  1792. .show_host_supported_classes = 1,
  1793. .show_host_supported_fc4s = 1,
  1794. .show_host_active_fc4s = 1,
  1795. .show_host_maxframe_size = 1,
  1796. .get_host_port_id = qedf_get_host_port_id,
  1797. .show_host_port_id = 1,
  1798. .show_host_supported_speeds = 1,
  1799. .get_host_speed = fc_get_host_speed,
  1800. .show_host_speed = 1,
  1801. .show_host_port_type = 1,
  1802. .get_host_port_state = fc_get_host_port_state,
  1803. .show_host_port_state = 1,
  1804. .show_host_symbolic_name = 1,
  1805. /*
  1806. * Tell FC transport to allocate enough space to store the backpointer
  1807. * for the associate qedf_rport struct.
  1808. */
  1809. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  1810. sizeof(struct qedf_rport)),
  1811. .show_rport_maxframe_size = 1,
  1812. .show_rport_supported_classes = 1,
  1813. .show_host_fabric_name = 1,
  1814. .show_starget_node_name = 1,
  1815. .show_starget_port_name = 1,
  1816. .show_starget_port_id = 1,
  1817. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  1818. .show_rport_dev_loss_tmo = 1,
  1819. .get_fc_host_stats = qedf_fc_get_host_stats,
  1820. .issue_fc_host_lip = qedf_fcoe_reset,
  1821. .vport_create = qedf_vport_create,
  1822. .vport_delete = qedf_vport_destroy,
  1823. .vport_disable = qedf_vport_disable,
  1824. .bsg_request = fc_lport_bsg_request,
  1825. };
  1826. static struct fc_function_template qedf_fc_vport_transport_fn = {
  1827. .show_host_node_name = 1,
  1828. .show_host_port_name = 1,
  1829. .show_host_supported_classes = 1,
  1830. .show_host_supported_fc4s = 1,
  1831. .show_host_active_fc4s = 1,
  1832. .show_host_maxframe_size = 1,
  1833. .show_host_port_id = 1,
  1834. .show_host_supported_speeds = 1,
  1835. .get_host_speed = fc_get_host_speed,
  1836. .show_host_speed = 1,
  1837. .show_host_port_type = 1,
  1838. .get_host_port_state = fc_get_host_port_state,
  1839. .show_host_port_state = 1,
  1840. .show_host_symbolic_name = 1,
  1841. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  1842. sizeof(struct qedf_rport)),
  1843. .show_rport_maxframe_size = 1,
  1844. .show_rport_supported_classes = 1,
  1845. .show_host_fabric_name = 1,
  1846. .show_starget_node_name = 1,
  1847. .show_starget_port_name = 1,
  1848. .show_starget_port_id = 1,
  1849. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  1850. .show_rport_dev_loss_tmo = 1,
  1851. .get_fc_host_stats = fc_get_host_stats,
  1852. .issue_fc_host_lip = qedf_fcoe_reset,
  1853. .bsg_request = fc_lport_bsg_request,
  1854. };
  1855. static bool qedf_fp_has_work(struct qedf_fastpath *fp)
  1856. {
  1857. struct qedf_ctx *qedf = fp->qedf;
  1858. struct global_queue *que;
  1859. struct qed_sb_info *sb_info = fp->sb_info;
  1860. struct status_block *sb = sb_info->sb_virt;
  1861. u16 prod_idx;
  1862. /* Get the pointer to the global CQ this completion is on */
  1863. que = qedf->global_queues[fp->sb_id];
  1864. /* Be sure all responses have been written to PI */
  1865. rmb();
  1866. /* Get the current firmware producer index */
  1867. prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
  1868. return (que->cq_prod_idx != prod_idx);
  1869. }
  1870. /*
  1871. * Interrupt handler code.
  1872. */
  1873. /* Process completion queue and copy CQE contents for deferred processesing
  1874. *
  1875. * Return true if we should wake the I/O thread, false if not.
  1876. */
  1877. static bool qedf_process_completions(struct qedf_fastpath *fp)
  1878. {
  1879. struct qedf_ctx *qedf = fp->qedf;
  1880. struct qed_sb_info *sb_info = fp->sb_info;
  1881. struct status_block *sb = sb_info->sb_virt;
  1882. struct global_queue *que;
  1883. u16 prod_idx;
  1884. struct fcoe_cqe *cqe;
  1885. struct qedf_io_work *io_work;
  1886. unsigned int cpu;
  1887. struct qedf_ioreq *io_req = NULL;
  1888. u16 xid;
  1889. u16 new_cqes;
  1890. u32 comp_type;
  1891. /* Get the current firmware producer index */
  1892. prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
  1893. /* Get the pointer to the global CQ this completion is on */
  1894. que = qedf->global_queues[fp->sb_id];
  1895. /* Calculate the amount of new elements since last processing */
  1896. new_cqes = (prod_idx >= que->cq_prod_idx) ?
  1897. (prod_idx - que->cq_prod_idx) :
  1898. 0x10000 - que->cq_prod_idx + prod_idx;
  1899. /* Save producer index */
  1900. que->cq_prod_idx = prod_idx;
  1901. while (new_cqes) {
  1902. fp->completions++;
  1903. cqe = &que->cq[que->cq_cons_idx];
  1904. comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
  1905. FCOE_CQE_CQE_TYPE_MASK;
  1906. /*
  1907. * Process unsolicited CQEs directly in the interrupt handler
  1908. * sine we need the fastpath ID
  1909. */
  1910. if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
  1911. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
  1912. "Unsolicated CQE.\n");
  1913. qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
  1914. /*
  1915. * Don't add a work list item. Increment consumer
  1916. * consumer index and move on.
  1917. */
  1918. goto inc_idx;
  1919. }
  1920. xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
  1921. io_req = &qedf->cmd_mgr->cmds[xid];
  1922. /*
  1923. * Figure out which percpu thread we should queue this I/O
  1924. * on.
  1925. */
  1926. if (!io_req)
  1927. /* If there is not io_req associated with this CQE
  1928. * just queue it on CPU 0
  1929. */
  1930. cpu = 0;
  1931. else {
  1932. cpu = io_req->cpu;
  1933. io_req->int_cpu = smp_processor_id();
  1934. }
  1935. io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
  1936. if (!io_work) {
  1937. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
  1938. "work for I/O completion.\n");
  1939. continue;
  1940. }
  1941. memset(io_work, 0, sizeof(struct qedf_io_work));
  1942. INIT_WORK(&io_work->work, qedf_fp_io_handler);
  1943. /* Copy contents of CQE for deferred processing */
  1944. memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
  1945. io_work->qedf = fp->qedf;
  1946. io_work->fp = NULL; /* Only used for unsolicited frames */
  1947. queue_work_on(cpu, qedf_io_wq, &io_work->work);
  1948. inc_idx:
  1949. que->cq_cons_idx++;
  1950. if (que->cq_cons_idx == fp->cq_num_entries)
  1951. que->cq_cons_idx = 0;
  1952. new_cqes--;
  1953. }
  1954. return true;
  1955. }
  1956. /* MSI-X fastpath handler code */
  1957. static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
  1958. {
  1959. struct qedf_fastpath *fp = dev_id;
  1960. if (!fp) {
  1961. QEDF_ERR(NULL, "fp is null.\n");
  1962. return IRQ_HANDLED;
  1963. }
  1964. if (!fp->sb_info) {
  1965. QEDF_ERR(NULL, "fp->sb_info in null.");
  1966. return IRQ_HANDLED;
  1967. }
  1968. /*
  1969. * Disable interrupts for this status block while we process new
  1970. * completions
  1971. */
  1972. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
  1973. while (1) {
  1974. qedf_process_completions(fp);
  1975. if (qedf_fp_has_work(fp) == 0) {
  1976. /* Update the sb information */
  1977. qed_sb_update_sb_idx(fp->sb_info);
  1978. /* Check for more work */
  1979. rmb();
  1980. if (qedf_fp_has_work(fp) == 0) {
  1981. /* Re-enable interrupts */
  1982. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
  1983. return IRQ_HANDLED;
  1984. }
  1985. }
  1986. }
  1987. /* Do we ever want to break out of above loop? */
  1988. return IRQ_HANDLED;
  1989. }
  1990. /* simd handler for MSI/INTa */
  1991. static void qedf_simd_int_handler(void *cookie)
  1992. {
  1993. /* Cookie is qedf_ctx struct */
  1994. struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
  1995. QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
  1996. }
  1997. #define QEDF_SIMD_HANDLER_NUM 0
  1998. static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
  1999. {
  2000. int i;
  2001. u16 vector_idx = 0;
  2002. u32 vector;
  2003. if (qedf->int_info.msix_cnt) {
  2004. for (i = 0; i < qedf->int_info.used_cnt; i++) {
  2005. vector_idx = i * qedf->dev_info.common.num_hwfns +
  2006. qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
  2007. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  2008. "Freeing IRQ #%d vector_idx=%d.\n",
  2009. i, vector_idx);
  2010. vector = qedf->int_info.msix[vector_idx].vector;
  2011. synchronize_irq(vector);
  2012. irq_set_affinity_hint(vector, NULL);
  2013. irq_set_affinity_notifier(vector, NULL);
  2014. free_irq(vector, &qedf->fp_array[i]);
  2015. }
  2016. } else
  2017. qed_ops->common->simd_handler_clean(qedf->cdev,
  2018. QEDF_SIMD_HANDLER_NUM);
  2019. qedf->int_info.used_cnt = 0;
  2020. qed_ops->common->set_fp_int(qedf->cdev, 0);
  2021. }
  2022. static int qedf_request_msix_irq(struct qedf_ctx *qedf)
  2023. {
  2024. int i, rc, cpu;
  2025. u16 vector_idx = 0;
  2026. u32 vector;
  2027. cpu = cpumask_first(cpu_online_mask);
  2028. for (i = 0; i < qedf->num_queues; i++) {
  2029. vector_idx = i * qedf->dev_info.common.num_hwfns +
  2030. qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
  2031. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  2032. "Requesting IRQ #%d vector_idx=%d.\n",
  2033. i, vector_idx);
  2034. vector = qedf->int_info.msix[vector_idx].vector;
  2035. rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
  2036. &qedf->fp_array[i]);
  2037. if (rc) {
  2038. QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
  2039. qedf_sync_free_irqs(qedf);
  2040. return rc;
  2041. }
  2042. qedf->int_info.used_cnt++;
  2043. rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
  2044. cpu = cpumask_next(cpu, cpu_online_mask);
  2045. }
  2046. return 0;
  2047. }
  2048. static int qedf_setup_int(struct qedf_ctx *qedf)
  2049. {
  2050. int rc = 0;
  2051. /*
  2052. * Learn interrupt configuration
  2053. */
  2054. rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
  2055. if (rc <= 0)
  2056. return 0;
  2057. rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
  2058. if (rc)
  2059. return 0;
  2060. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
  2061. "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
  2062. num_online_cpus());
  2063. if (qedf->int_info.msix_cnt)
  2064. return qedf_request_msix_irq(qedf);
  2065. qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
  2066. QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
  2067. qedf->int_info.used_cnt = 1;
  2068. QEDF_ERR(&qedf->dbg_ctx,
  2069. "Cannot load driver due to a lack of MSI-X vectors.\n");
  2070. return -EINVAL;
  2071. }
  2072. /* Main function for libfc frame reception */
  2073. static void qedf_recv_frame(struct qedf_ctx *qedf,
  2074. struct sk_buff *skb)
  2075. {
  2076. u32 fr_len;
  2077. struct fc_lport *lport;
  2078. struct fc_frame_header *fh;
  2079. struct fcoe_crc_eof crc_eof;
  2080. struct fc_frame *fp;
  2081. u8 *mac = NULL;
  2082. u8 *dest_mac = NULL;
  2083. struct fcoe_hdr *hp;
  2084. struct qedf_rport *fcport;
  2085. struct fc_lport *vn_port;
  2086. u32 f_ctl;
  2087. lport = qedf->lport;
  2088. if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
  2089. QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
  2090. kfree_skb(skb);
  2091. return;
  2092. }
  2093. if (skb_is_nonlinear(skb))
  2094. skb_linearize(skb);
  2095. mac = eth_hdr(skb)->h_source;
  2096. dest_mac = eth_hdr(skb)->h_dest;
  2097. /* Pull the header */
  2098. hp = (struct fcoe_hdr *)skb->data;
  2099. fh = (struct fc_frame_header *) skb_transport_header(skb);
  2100. skb_pull(skb, sizeof(struct fcoe_hdr));
  2101. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  2102. fp = (struct fc_frame *)skb;
  2103. fc_frame_init(fp);
  2104. fr_dev(fp) = lport;
  2105. fr_sof(fp) = hp->fcoe_sof;
  2106. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  2107. QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
  2108. kfree_skb(skb);
  2109. return;
  2110. }
  2111. fr_eof(fp) = crc_eof.fcoe_eof;
  2112. fr_crc(fp) = crc_eof.fcoe_crc32;
  2113. if (pskb_trim(skb, fr_len)) {
  2114. QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
  2115. kfree_skb(skb);
  2116. return;
  2117. }
  2118. fh = fc_frame_header_get(fp);
  2119. /*
  2120. * Invalid frame filters.
  2121. */
  2122. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  2123. fh->fh_type == FC_TYPE_FCP) {
  2124. /* Drop FCP data. We dont this in L2 path */
  2125. kfree_skb(skb);
  2126. return;
  2127. }
  2128. if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
  2129. fh->fh_type == FC_TYPE_ELS) {
  2130. switch (fc_frame_payload_op(fp)) {
  2131. case ELS_LOGO:
  2132. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
  2133. /* drop non-FIP LOGO */
  2134. kfree_skb(skb);
  2135. return;
  2136. }
  2137. break;
  2138. }
  2139. }
  2140. if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
  2141. /* Drop incoming ABTS */
  2142. kfree_skb(skb);
  2143. return;
  2144. }
  2145. if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
  2146. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  2147. "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
  2148. kfree_skb(skb);
  2149. return;
  2150. }
  2151. if (qedf->ctlr.state) {
  2152. if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
  2153. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  2154. "Wrong source address: mac:%pM dest_addr:%pM.\n",
  2155. mac, qedf->ctlr.dest_addr);
  2156. kfree_skb(skb);
  2157. return;
  2158. }
  2159. }
  2160. vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
  2161. /*
  2162. * If the destination ID from the frame header does not match what we
  2163. * have on record for lport and the search for a NPIV port came up
  2164. * empty then this is not addressed to our port so simply drop it.
  2165. */
  2166. if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
  2167. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
  2168. "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
  2169. lport->port_id, ntoh24(fh->fh_d_id));
  2170. kfree_skb(skb);
  2171. return;
  2172. }
  2173. f_ctl = ntoh24(fh->fh_f_ctl);
  2174. if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
  2175. (f_ctl & FC_FC_EX_CTX)) {
  2176. /* Drop incoming ABTS response that has both SEQ/EX CTX set */
  2177. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
  2178. "Dropping ABTS response as both SEQ/EX CTX set.\n");
  2179. kfree_skb(skb);
  2180. return;
  2181. }
  2182. /*
  2183. * If a connection is uploading, drop incoming FCoE frames as there
  2184. * is a small window where we could try to return a frame while libfc
  2185. * is trying to clean things up.
  2186. */
  2187. /* Get fcport associated with d_id if it exists */
  2188. fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
  2189. if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  2190. &fcport->flags)) {
  2191. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  2192. "Connection uploading, dropping fp=%p.\n", fp);
  2193. kfree_skb(skb);
  2194. return;
  2195. }
  2196. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
  2197. "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
  2198. ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
  2199. fh->fh_type);
  2200. if (qedf_dump_frames)
  2201. print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
  2202. 1, skb->data, skb->len, false);
  2203. fc_exch_recv(lport, fp);
  2204. }
  2205. static void qedf_ll2_process_skb(struct work_struct *work)
  2206. {
  2207. struct qedf_skb_work *skb_work =
  2208. container_of(work, struct qedf_skb_work, work);
  2209. struct qedf_ctx *qedf = skb_work->qedf;
  2210. struct sk_buff *skb = skb_work->skb;
  2211. struct ethhdr *eh;
  2212. if (!qedf) {
  2213. QEDF_ERR(NULL, "qedf is NULL\n");
  2214. goto err_out;
  2215. }
  2216. eh = (struct ethhdr *)skb->data;
  2217. /* Undo VLAN encapsulation */
  2218. if (eh->h_proto == htons(ETH_P_8021Q)) {
  2219. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  2220. eh = skb_pull(skb, VLAN_HLEN);
  2221. skb_reset_mac_header(skb);
  2222. }
  2223. /*
  2224. * Process either a FIP frame or FCoE frame based on the
  2225. * protocol value. If it's not either just drop the
  2226. * frame.
  2227. */
  2228. if (eh->h_proto == htons(ETH_P_FIP)) {
  2229. qedf_fip_recv(qedf, skb);
  2230. goto out;
  2231. } else if (eh->h_proto == htons(ETH_P_FCOE)) {
  2232. __skb_pull(skb, ETH_HLEN);
  2233. qedf_recv_frame(qedf, skb);
  2234. goto out;
  2235. } else
  2236. goto err_out;
  2237. err_out:
  2238. kfree_skb(skb);
  2239. out:
  2240. kfree(skb_work);
  2241. return;
  2242. }
  2243. static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
  2244. u32 arg1, u32 arg2)
  2245. {
  2246. struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
  2247. struct qedf_skb_work *skb_work;
  2248. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  2249. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
  2250. "Dropping frame as link state is down.\n");
  2251. kfree_skb(skb);
  2252. return 0;
  2253. }
  2254. skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
  2255. if (!skb_work) {
  2256. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
  2257. "dropping frame.\n");
  2258. kfree_skb(skb);
  2259. return 0;
  2260. }
  2261. INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
  2262. skb_work->skb = skb;
  2263. skb_work->qedf = qedf;
  2264. queue_work(qedf->ll2_recv_wq, &skb_work->work);
  2265. return 0;
  2266. }
  2267. static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
  2268. .rx_cb = qedf_ll2_rx,
  2269. .tx_cb = NULL,
  2270. };
  2271. /* Main thread to process I/O completions */
  2272. void qedf_fp_io_handler(struct work_struct *work)
  2273. {
  2274. struct qedf_io_work *io_work =
  2275. container_of(work, struct qedf_io_work, work);
  2276. u32 comp_type;
  2277. /*
  2278. * Deferred part of unsolicited CQE sends
  2279. * frame to libfc.
  2280. */
  2281. comp_type = (io_work->cqe.cqe_data >>
  2282. FCOE_CQE_CQE_TYPE_SHIFT) &
  2283. FCOE_CQE_CQE_TYPE_MASK;
  2284. if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
  2285. io_work->fp)
  2286. fc_exch_recv(io_work->qedf->lport, io_work->fp);
  2287. else
  2288. qedf_process_cqe(io_work->qedf, &io_work->cqe);
  2289. kfree(io_work);
  2290. }
  2291. static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
  2292. struct qed_sb_info *sb_info, u16 sb_id)
  2293. {
  2294. struct status_block *sb_virt;
  2295. dma_addr_t sb_phys;
  2296. int ret;
  2297. sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
  2298. sizeof(struct status_block), &sb_phys, GFP_KERNEL);
  2299. if (!sb_virt) {
  2300. QEDF_ERR(&qedf->dbg_ctx,
  2301. "Status block allocation failed for id = %d.\n",
  2302. sb_id);
  2303. return -ENOMEM;
  2304. }
  2305. ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
  2306. sb_id, QED_SB_TYPE_STORAGE);
  2307. if (ret) {
  2308. dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys);
  2309. QEDF_ERR(&qedf->dbg_ctx,
  2310. "Status block initialization failed (0x%x) for id = %d.\n",
  2311. ret, sb_id);
  2312. return ret;
  2313. }
  2314. return 0;
  2315. }
  2316. static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
  2317. {
  2318. if (sb_info->sb_virt)
  2319. dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
  2320. (void *)sb_info->sb_virt, sb_info->sb_phys);
  2321. }
  2322. static void qedf_destroy_sb(struct qedf_ctx *qedf)
  2323. {
  2324. int id;
  2325. struct qedf_fastpath *fp = NULL;
  2326. for (id = 0; id < qedf->num_queues; id++) {
  2327. fp = &(qedf->fp_array[id]);
  2328. if (fp->sb_id == QEDF_SB_ID_NULL)
  2329. break;
  2330. qedf_free_sb(qedf, fp->sb_info);
  2331. kfree(fp->sb_info);
  2332. }
  2333. kfree(qedf->fp_array);
  2334. }
  2335. static int qedf_prepare_sb(struct qedf_ctx *qedf)
  2336. {
  2337. int id;
  2338. struct qedf_fastpath *fp;
  2339. int ret;
  2340. qedf->fp_array =
  2341. kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
  2342. GFP_KERNEL);
  2343. if (!qedf->fp_array) {
  2344. QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
  2345. "failed.\n");
  2346. return -ENOMEM;
  2347. }
  2348. for (id = 0; id < qedf->num_queues; id++) {
  2349. fp = &(qedf->fp_array[id]);
  2350. fp->sb_id = QEDF_SB_ID_NULL;
  2351. fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
  2352. if (!fp->sb_info) {
  2353. QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
  2354. "allocation failed.\n");
  2355. goto err;
  2356. }
  2357. ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
  2358. if (ret) {
  2359. QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
  2360. "initialization failed.\n");
  2361. goto err;
  2362. }
  2363. fp->sb_id = id;
  2364. fp->qedf = qedf;
  2365. fp->cq_num_entries =
  2366. qedf->global_queues[id]->cq_mem_size /
  2367. sizeof(struct fcoe_cqe);
  2368. }
  2369. err:
  2370. return 0;
  2371. }
  2372. void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
  2373. {
  2374. u16 xid;
  2375. struct qedf_ioreq *io_req;
  2376. struct qedf_rport *fcport;
  2377. u32 comp_type;
  2378. u8 io_comp_type;
  2379. unsigned long flags;
  2380. comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
  2381. FCOE_CQE_CQE_TYPE_MASK;
  2382. xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
  2383. io_req = &qedf->cmd_mgr->cmds[xid];
  2384. /* Completion not for a valid I/O anymore so just return */
  2385. if (!io_req) {
  2386. QEDF_ERR(&qedf->dbg_ctx,
  2387. "io_req is NULL for xid=0x%x.\n", xid);
  2388. return;
  2389. }
  2390. fcport = io_req->fcport;
  2391. if (fcport == NULL) {
  2392. QEDF_ERR(&qedf->dbg_ctx,
  2393. "fcport is NULL for xid=0x%x io_req=%p.\n",
  2394. xid, io_req);
  2395. return;
  2396. }
  2397. /*
  2398. * Check that fcport is offloaded. If it isn't then the spinlock
  2399. * isn't valid and shouldn't be taken. We should just return.
  2400. */
  2401. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  2402. QEDF_ERR(&qedf->dbg_ctx,
  2403. "Session not offloaded yet, fcport = %p.\n", fcport);
  2404. return;
  2405. }
  2406. spin_lock_irqsave(&fcport->rport_lock, flags);
  2407. io_comp_type = io_req->cmd_type;
  2408. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  2409. switch (comp_type) {
  2410. case FCOE_GOOD_COMPLETION_CQE_TYPE:
  2411. atomic_inc(&fcport->free_sqes);
  2412. switch (io_comp_type) {
  2413. case QEDF_SCSI_CMD:
  2414. qedf_scsi_completion(qedf, cqe, io_req);
  2415. break;
  2416. case QEDF_ELS:
  2417. qedf_process_els_compl(qedf, cqe, io_req);
  2418. break;
  2419. case QEDF_TASK_MGMT_CMD:
  2420. qedf_process_tmf_compl(qedf, cqe, io_req);
  2421. break;
  2422. case QEDF_SEQ_CLEANUP:
  2423. qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
  2424. break;
  2425. }
  2426. break;
  2427. case FCOE_ERROR_DETECTION_CQE_TYPE:
  2428. atomic_inc(&fcport->free_sqes);
  2429. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2430. "Error detect CQE.\n");
  2431. qedf_process_error_detect(qedf, cqe, io_req);
  2432. break;
  2433. case FCOE_EXCH_CLEANUP_CQE_TYPE:
  2434. atomic_inc(&fcport->free_sqes);
  2435. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2436. "Cleanup CQE.\n");
  2437. qedf_process_cleanup_compl(qedf, cqe, io_req);
  2438. break;
  2439. case FCOE_ABTS_CQE_TYPE:
  2440. atomic_inc(&fcport->free_sqes);
  2441. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2442. "Abort CQE.\n");
  2443. qedf_process_abts_compl(qedf, cqe, io_req);
  2444. break;
  2445. case FCOE_DUMMY_CQE_TYPE:
  2446. atomic_inc(&fcport->free_sqes);
  2447. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2448. "Dummy CQE.\n");
  2449. break;
  2450. case FCOE_LOCAL_COMP_CQE_TYPE:
  2451. atomic_inc(&fcport->free_sqes);
  2452. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2453. "Local completion CQE.\n");
  2454. break;
  2455. case FCOE_WARNING_CQE_TYPE:
  2456. atomic_inc(&fcport->free_sqes);
  2457. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2458. "Warning CQE.\n");
  2459. qedf_process_warning_compl(qedf, cqe, io_req);
  2460. break;
  2461. case MAX_FCOE_CQE_TYPE:
  2462. atomic_inc(&fcport->free_sqes);
  2463. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2464. "Max FCoE CQE.\n");
  2465. break;
  2466. default:
  2467. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2468. "Default CQE.\n");
  2469. break;
  2470. }
  2471. }
  2472. static void qedf_free_bdq(struct qedf_ctx *qedf)
  2473. {
  2474. int i;
  2475. if (qedf->bdq_pbl_list)
  2476. dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
  2477. qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
  2478. if (qedf->bdq_pbl)
  2479. dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
  2480. qedf->bdq_pbl, qedf->bdq_pbl_dma);
  2481. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2482. if (qedf->bdq[i].buf_addr) {
  2483. dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
  2484. qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
  2485. }
  2486. }
  2487. }
  2488. static void qedf_free_global_queues(struct qedf_ctx *qedf)
  2489. {
  2490. int i;
  2491. struct global_queue **gl = qedf->global_queues;
  2492. for (i = 0; i < qedf->num_queues; i++) {
  2493. if (!gl[i])
  2494. continue;
  2495. if (gl[i]->cq)
  2496. dma_free_coherent(&qedf->pdev->dev,
  2497. gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
  2498. if (gl[i]->cq_pbl)
  2499. dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
  2500. gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
  2501. kfree(gl[i]);
  2502. }
  2503. qedf_free_bdq(qedf);
  2504. }
  2505. static int qedf_alloc_bdq(struct qedf_ctx *qedf)
  2506. {
  2507. int i;
  2508. struct scsi_bd *pbl;
  2509. u64 *list;
  2510. /* Alloc dma memory for BDQ buffers */
  2511. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2512. qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
  2513. QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
  2514. if (!qedf->bdq[i].buf_addr) {
  2515. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
  2516. "buffer %d.\n", i);
  2517. return -ENOMEM;
  2518. }
  2519. }
  2520. /* Alloc dma memory for BDQ page buffer list */
  2521. qedf->bdq_pbl_mem_size =
  2522. QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
  2523. qedf->bdq_pbl_mem_size =
  2524. ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
  2525. qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
  2526. qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
  2527. if (!qedf->bdq_pbl) {
  2528. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
  2529. return -ENOMEM;
  2530. }
  2531. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2532. "BDQ PBL addr=0x%p dma=%pad\n",
  2533. qedf->bdq_pbl, &qedf->bdq_pbl_dma);
  2534. /*
  2535. * Populate BDQ PBL with physical and virtual address of individual
  2536. * BDQ buffers
  2537. */
  2538. pbl = (struct scsi_bd *)qedf->bdq_pbl;
  2539. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2540. pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
  2541. pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
  2542. pbl->opaque.fcoe_opaque.hi = 0;
  2543. /* Opaque lo data is an index into the BDQ array */
  2544. pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
  2545. pbl++;
  2546. }
  2547. /* Allocate list of PBL pages */
  2548. qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
  2549. QEDF_PAGE_SIZE,
  2550. &qedf->bdq_pbl_list_dma,
  2551. GFP_KERNEL);
  2552. if (!qedf->bdq_pbl_list) {
  2553. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
  2554. return -ENOMEM;
  2555. }
  2556. /*
  2557. * Now populate PBL list with pages that contain pointers to the
  2558. * individual buffers.
  2559. */
  2560. qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
  2561. QEDF_PAGE_SIZE;
  2562. list = (u64 *)qedf->bdq_pbl_list;
  2563. for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
  2564. *list = qedf->bdq_pbl_dma;
  2565. list++;
  2566. }
  2567. return 0;
  2568. }
  2569. static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
  2570. {
  2571. u32 *list;
  2572. int i;
  2573. int status;
  2574. u32 *pbl;
  2575. dma_addr_t page;
  2576. int num_pages;
  2577. /* Allocate and map CQs, RQs */
  2578. /*
  2579. * Number of global queues (CQ / RQ). This should
  2580. * be <= number of available MSIX vectors for the PF
  2581. */
  2582. if (!qedf->num_queues) {
  2583. QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
  2584. return -ENOMEM;
  2585. }
  2586. /*
  2587. * Make sure we allocated the PBL that will contain the physical
  2588. * addresses of our queues
  2589. */
  2590. if (!qedf->p_cpuq) {
  2591. QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
  2592. return -EINVAL;
  2593. }
  2594. qedf->global_queues = kzalloc((sizeof(struct global_queue *)
  2595. * qedf->num_queues), GFP_KERNEL);
  2596. if (!qedf->global_queues) {
  2597. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
  2598. "queues array ptr memory\n");
  2599. return -ENOMEM;
  2600. }
  2601. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2602. "qedf->global_queues=%p.\n", qedf->global_queues);
  2603. /* Allocate DMA coherent buffers for BDQ */
  2604. status = qedf_alloc_bdq(qedf);
  2605. if (status) {
  2606. QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
  2607. goto mem_alloc_failure;
  2608. }
  2609. /* Allocate a CQ and an associated PBL for each MSI-X vector */
  2610. for (i = 0; i < qedf->num_queues; i++) {
  2611. qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
  2612. GFP_KERNEL);
  2613. if (!qedf->global_queues[i]) {
  2614. QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
  2615. "global queue %d.\n", i);
  2616. status = -ENOMEM;
  2617. goto mem_alloc_failure;
  2618. }
  2619. qedf->global_queues[i]->cq_mem_size =
  2620. FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
  2621. qedf->global_queues[i]->cq_mem_size =
  2622. ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
  2623. qedf->global_queues[i]->cq_pbl_size =
  2624. (qedf->global_queues[i]->cq_mem_size /
  2625. PAGE_SIZE) * sizeof(void *);
  2626. qedf->global_queues[i]->cq_pbl_size =
  2627. ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
  2628. qedf->global_queues[i]->cq =
  2629. dma_alloc_coherent(&qedf->pdev->dev,
  2630. qedf->global_queues[i]->cq_mem_size,
  2631. &qedf->global_queues[i]->cq_dma,
  2632. GFP_KERNEL);
  2633. if (!qedf->global_queues[i]->cq) {
  2634. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
  2635. status = -ENOMEM;
  2636. goto mem_alloc_failure;
  2637. }
  2638. qedf->global_queues[i]->cq_pbl =
  2639. dma_alloc_coherent(&qedf->pdev->dev,
  2640. qedf->global_queues[i]->cq_pbl_size,
  2641. &qedf->global_queues[i]->cq_pbl_dma,
  2642. GFP_KERNEL);
  2643. if (!qedf->global_queues[i]->cq_pbl) {
  2644. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
  2645. status = -ENOMEM;
  2646. goto mem_alloc_failure;
  2647. }
  2648. /* Create PBL */
  2649. num_pages = qedf->global_queues[i]->cq_mem_size /
  2650. QEDF_PAGE_SIZE;
  2651. page = qedf->global_queues[i]->cq_dma;
  2652. pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
  2653. while (num_pages--) {
  2654. *pbl = U64_LO(page);
  2655. pbl++;
  2656. *pbl = U64_HI(page);
  2657. pbl++;
  2658. page += QEDF_PAGE_SIZE;
  2659. }
  2660. /* Set the initial consumer index for cq */
  2661. qedf->global_queues[i]->cq_cons_idx = 0;
  2662. }
  2663. list = (u32 *)qedf->p_cpuq;
  2664. /*
  2665. * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
  2666. * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
  2667. * to the physical address which contains an array of pointers to
  2668. * the physical addresses of the specific queue pages.
  2669. */
  2670. for (i = 0; i < qedf->num_queues; i++) {
  2671. *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
  2672. list++;
  2673. *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
  2674. list++;
  2675. *list = U64_LO(0);
  2676. list++;
  2677. *list = U64_HI(0);
  2678. list++;
  2679. }
  2680. return 0;
  2681. mem_alloc_failure:
  2682. qedf_free_global_queues(qedf);
  2683. return status;
  2684. }
  2685. static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
  2686. {
  2687. u8 sq_num_pbl_pages;
  2688. u32 sq_mem_size;
  2689. u32 cq_mem_size;
  2690. u32 cq_num_entries;
  2691. int rval;
  2692. /*
  2693. * The number of completion queues/fastpath interrupts/status blocks
  2694. * we allocation is the minimum off:
  2695. *
  2696. * Number of CPUs
  2697. * Number allocated by qed for our PCI function
  2698. */
  2699. qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
  2700. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
  2701. qedf->num_queues);
  2702. qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
  2703. qedf->num_queues * sizeof(struct qedf_glbl_q_params),
  2704. &qedf->hw_p_cpuq, GFP_KERNEL);
  2705. if (!qedf->p_cpuq) {
  2706. QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
  2707. return 1;
  2708. }
  2709. rval = qedf_alloc_global_queues(qedf);
  2710. if (rval) {
  2711. QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
  2712. "failed.\n");
  2713. return 1;
  2714. }
  2715. /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
  2716. sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
  2717. sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
  2718. sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
  2719. /* Calculate CQ num entries */
  2720. cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
  2721. cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
  2722. cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
  2723. memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
  2724. /* Setup the value for fcoe PF */
  2725. qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
  2726. qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
  2727. qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
  2728. (u64)qedf->hw_p_cpuq;
  2729. qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
  2730. qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
  2731. qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
  2732. qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
  2733. /* log_page_size: 12 for 4KB pages */
  2734. qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
  2735. qedf->pf_params.fcoe_pf_params.mtu = 9000;
  2736. qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
  2737. qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
  2738. /* BDQ address and size */
  2739. qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
  2740. qedf->bdq_pbl_list_dma;
  2741. qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
  2742. qedf->bdq_pbl_list_num_entries;
  2743. qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
  2744. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2745. "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
  2746. qedf->bdq_pbl_list,
  2747. qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
  2748. qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
  2749. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2750. "cq_num_entries=%d.\n",
  2751. qedf->pf_params.fcoe_pf_params.cq_num_entries);
  2752. return 0;
  2753. }
  2754. /* Free DMA coherent memory for array of queue pointers we pass to qed */
  2755. static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
  2756. {
  2757. size_t size = 0;
  2758. if (qedf->p_cpuq) {
  2759. size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
  2760. dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
  2761. qedf->hw_p_cpuq);
  2762. }
  2763. qedf_free_global_queues(qedf);
  2764. kfree(qedf->global_queues);
  2765. }
  2766. /*
  2767. * PCI driver functions
  2768. */
  2769. static const struct pci_device_id qedf_pci_tbl[] = {
  2770. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
  2771. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
  2772. {0}
  2773. };
  2774. MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
  2775. static struct pci_driver qedf_pci_driver = {
  2776. .name = QEDF_MODULE_NAME,
  2777. .id_table = qedf_pci_tbl,
  2778. .probe = qedf_probe,
  2779. .remove = qedf_remove,
  2780. .shutdown = qedf_shutdown,
  2781. .suspend = qedf_suspend,
  2782. };
  2783. static int __qedf_probe(struct pci_dev *pdev, int mode)
  2784. {
  2785. int rc = -EINVAL;
  2786. struct fc_lport *lport;
  2787. struct qedf_ctx *qedf = NULL;
  2788. struct Scsi_Host *host;
  2789. bool is_vf = false;
  2790. struct qed_ll2_params params;
  2791. char host_buf[20];
  2792. struct qed_link_params link_params;
  2793. int status;
  2794. void *task_start, *task_end;
  2795. struct qed_slowpath_params slowpath_params;
  2796. struct qed_probe_params qed_params;
  2797. u16 retry_cnt = 10;
  2798. /*
  2799. * When doing error recovery we didn't reap the lport so don't try
  2800. * to reallocate it.
  2801. */
  2802. retry_probe:
  2803. if (mode == QEDF_MODE_RECOVERY)
  2804. msleep(2000);
  2805. if (mode != QEDF_MODE_RECOVERY) {
  2806. lport = libfc_host_alloc(&qedf_host_template,
  2807. sizeof(struct qedf_ctx));
  2808. if (!lport) {
  2809. QEDF_ERR(NULL, "Could not allocate lport.\n");
  2810. rc = -ENOMEM;
  2811. goto err0;
  2812. }
  2813. fc_disc_init(lport);
  2814. /* Initialize qedf_ctx */
  2815. qedf = lport_priv(lport);
  2816. set_bit(QEDF_PROBING, &qedf->flags);
  2817. qedf->lport = lport;
  2818. qedf->ctlr.lp = lport;
  2819. qedf->pdev = pdev;
  2820. qedf->dbg_ctx.pdev = pdev;
  2821. qedf->dbg_ctx.host_no = lport->host->host_no;
  2822. spin_lock_init(&qedf->hba_lock);
  2823. INIT_LIST_HEAD(&qedf->fcports);
  2824. qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
  2825. atomic_set(&qedf->num_offloads, 0);
  2826. qedf->stop_io_on_error = false;
  2827. pci_set_drvdata(pdev, qedf);
  2828. init_completion(&qedf->fipvlan_compl);
  2829. mutex_init(&qedf->stats_mutex);
  2830. mutex_init(&qedf->flush_mutex);
  2831. qedf->flogi_pending = 0;
  2832. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
  2833. "QLogic FastLinQ FCoE Module qedf %s, "
  2834. "FW %d.%d.%d.%d\n", QEDF_VERSION,
  2835. FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
  2836. FW_ENGINEERING_VERSION);
  2837. } else {
  2838. /* Init pointers during recovery */
  2839. qedf = pci_get_drvdata(pdev);
  2840. set_bit(QEDF_PROBING, &qedf->flags);
  2841. lport = qedf->lport;
  2842. }
  2843. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
  2844. host = lport->host;
  2845. /* Allocate mempool for qedf_io_work structs */
  2846. qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
  2847. qedf_io_work_cache);
  2848. if (qedf->io_mempool == NULL) {
  2849. QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
  2850. goto err1;
  2851. }
  2852. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
  2853. qedf->io_mempool);
  2854. qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
  2855. 1, qedf->lport->host->host_no);
  2856. INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
  2857. INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
  2858. INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
  2859. INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
  2860. qedf->fipvlan_retries = qedf_fipvlan_retries;
  2861. /* Set a default prio in case DCBX doesn't converge */
  2862. if (qedf_default_prio > -1) {
  2863. /*
  2864. * This is the case where we pass a modparam in so we want to
  2865. * honor it even if dcbx doesn't converge.
  2866. */
  2867. qedf->prio = qedf_default_prio;
  2868. } else
  2869. qedf->prio = QEDF_DEFAULT_PRIO;
  2870. /*
  2871. * Common probe. Takes care of basic hardware init and pci_*
  2872. * functions.
  2873. */
  2874. memset(&qed_params, 0, sizeof(qed_params));
  2875. qed_params.protocol = QED_PROTOCOL_FCOE;
  2876. qed_params.dp_module = qedf_dp_module;
  2877. qed_params.dp_level = qedf_dp_level;
  2878. qed_params.is_vf = is_vf;
  2879. qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
  2880. if (!qedf->cdev) {
  2881. if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
  2882. QEDF_ERR(&qedf->dbg_ctx,
  2883. "Retry %d initialize hardware\n", retry_cnt);
  2884. retry_cnt--;
  2885. goto retry_probe;
  2886. }
  2887. QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
  2888. rc = -ENODEV;
  2889. goto err1;
  2890. }
  2891. /* Learn information crucial for qedf to progress */
  2892. rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
  2893. if (rc) {
  2894. QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
  2895. goto err1;
  2896. }
  2897. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
  2898. "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
  2899. qedf->dev_info.common.num_hwfns,
  2900. qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
  2901. /* queue allocation code should come here
  2902. * order should be
  2903. * slowpath_start
  2904. * status block allocation
  2905. * interrupt registration (to get min number of queues)
  2906. * set_fcoe_pf_param
  2907. * qed_sp_fcoe_func_start
  2908. */
  2909. rc = qedf_set_fcoe_pf_param(qedf);
  2910. if (rc) {
  2911. QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
  2912. goto err2;
  2913. }
  2914. qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
  2915. /* Learn information crucial for qedf to progress */
  2916. rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
  2917. if (rc) {
  2918. QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
  2919. goto err2;
  2920. }
  2921. if (mode != QEDF_MODE_RECOVERY) {
  2922. qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
  2923. if (IS_ERR(qedf->devlink)) {
  2924. QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
  2925. rc = PTR_ERR(qedf->devlink);
  2926. qedf->devlink = NULL;
  2927. goto err2;
  2928. }
  2929. }
  2930. /* Record BDQ producer doorbell addresses */
  2931. qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
  2932. qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
  2933. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2934. "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
  2935. qedf->bdq_secondary_prod);
  2936. qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
  2937. rc = qedf_prepare_sb(qedf);
  2938. if (rc) {
  2939. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
  2940. goto err2;
  2941. }
  2942. /* Start the Slowpath-process */
  2943. memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
  2944. slowpath_params.int_mode = QED_INT_MODE_MSIX;
  2945. slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
  2946. slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
  2947. slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
  2948. slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
  2949. strscpy(slowpath_params.name, "qedf", sizeof(slowpath_params.name));
  2950. rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
  2951. if (rc) {
  2952. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
  2953. goto err2;
  2954. }
  2955. /*
  2956. * update_pf_params needs to be called before and after slowpath
  2957. * start
  2958. */
  2959. qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
  2960. /* Setup interrupts */
  2961. rc = qedf_setup_int(qedf);
  2962. if (rc) {
  2963. QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
  2964. goto err3;
  2965. }
  2966. rc = qed_ops->start(qedf->cdev, &qedf->tasks);
  2967. if (rc) {
  2968. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
  2969. goto err4;
  2970. }
  2971. task_start = qedf_get_task_mem(&qedf->tasks, 0);
  2972. task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
  2973. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
  2974. "end=%p block_size=%u.\n", task_start, task_end,
  2975. qedf->tasks.size);
  2976. /*
  2977. * We need to write the number of BDs in the BDQ we've preallocated so
  2978. * the f/w will do a prefetch and we'll get an unsolicited CQE when a
  2979. * packet arrives.
  2980. */
  2981. qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
  2982. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2983. "Writing %d to primary and secondary BDQ doorbell registers.\n",
  2984. qedf->bdq_prod_idx);
  2985. writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
  2986. readw(qedf->bdq_primary_prod);
  2987. writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
  2988. readw(qedf->bdq_secondary_prod);
  2989. qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
  2990. /* Now that the dev_info struct has been filled in set the MAC
  2991. * address
  2992. */
  2993. ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
  2994. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
  2995. qedf->mac);
  2996. /*
  2997. * Set the WWNN and WWPN in the following way:
  2998. *
  2999. * If the info we get from qed is non-zero then use that to set the
  3000. * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
  3001. * on the MAC address.
  3002. */
  3003. if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
  3004. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  3005. "Setting WWPN and WWNN from qed dev_info.\n");
  3006. qedf->wwnn = qedf->dev_info.wwnn;
  3007. qedf->wwpn = qedf->dev_info.wwpn;
  3008. } else {
  3009. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  3010. "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
  3011. qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
  3012. qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
  3013. }
  3014. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
  3015. "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
  3016. sprintf(host_buf, "host_%d", host->host_no);
  3017. qed_ops->common->set_name(qedf->cdev, host_buf);
  3018. /* Allocate cmd mgr */
  3019. qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
  3020. if (!qedf->cmd_mgr) {
  3021. QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
  3022. rc = -ENOMEM;
  3023. goto err5;
  3024. }
  3025. if (mode != QEDF_MODE_RECOVERY) {
  3026. host->transportt = qedf_fc_transport_template;
  3027. host->max_lun = qedf_max_lun;
  3028. host->max_cmd_len = QEDF_MAX_CDB_LEN;
  3029. host->max_id = QEDF_MAX_SESSIONS;
  3030. host->can_queue = FCOE_PARAMS_NUM_TASKS;
  3031. rc = scsi_add_host(host, &pdev->dev);
  3032. if (rc) {
  3033. QEDF_WARN(&qedf->dbg_ctx,
  3034. "Error adding Scsi_Host rc=0x%x.\n", rc);
  3035. goto err6;
  3036. }
  3037. }
  3038. memset(&params, 0, sizeof(params));
  3039. params.mtu = QEDF_LL2_BUF_SIZE;
  3040. ether_addr_copy(params.ll2_mac_address, qedf->mac);
  3041. /* Start LL2 processing thread */
  3042. qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
  3043. host->host_no);
  3044. if (!qedf->ll2_recv_wq) {
  3045. QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
  3046. rc = -ENOMEM;
  3047. goto err7;
  3048. }
  3049. #ifdef CONFIG_DEBUG_FS
  3050. qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
  3051. qedf_dbg_fops);
  3052. #endif
  3053. /* Start LL2 */
  3054. qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
  3055. rc = qed_ops->ll2->start(qedf->cdev, &params);
  3056. if (rc) {
  3057. QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
  3058. goto err7;
  3059. }
  3060. set_bit(QEDF_LL2_STARTED, &qedf->flags);
  3061. /* Set initial FIP/FCoE VLAN to NULL */
  3062. qedf->vlan_id = 0;
  3063. /*
  3064. * No need to setup fcoe_ctlr or fc_lport objects during recovery since
  3065. * they were not reaped during the unload process.
  3066. */
  3067. if (mode != QEDF_MODE_RECOVERY) {
  3068. /* Setup imbedded fcoe controller */
  3069. qedf_fcoe_ctlr_setup(qedf);
  3070. /* Setup lport */
  3071. rc = qedf_lport_setup(qedf);
  3072. if (rc) {
  3073. QEDF_ERR(&(qedf->dbg_ctx),
  3074. "qedf_lport_setup failed.\n");
  3075. goto err7;
  3076. }
  3077. }
  3078. qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
  3079. WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
  3080. if (!qedf->timer_work_queue) {
  3081. QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
  3082. "workqueue.\n");
  3083. rc = -ENOMEM;
  3084. goto err7;
  3085. }
  3086. /* DPC workqueue is not reaped during recovery unload */
  3087. if (mode != QEDF_MODE_RECOVERY) {
  3088. sprintf(host_buf, "qedf_%u_dpc",
  3089. qedf->lport->host->host_no);
  3090. qedf->dpc_wq =
  3091. alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
  3092. }
  3093. INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
  3094. /*
  3095. * GRC dump and sysfs parameters are not reaped during the recovery
  3096. * unload process.
  3097. */
  3098. if (mode != QEDF_MODE_RECOVERY) {
  3099. qedf->grcdump_size =
  3100. qed_ops->common->dbg_all_data_size(qedf->cdev);
  3101. if (qedf->grcdump_size) {
  3102. rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
  3103. qedf->grcdump_size);
  3104. if (rc) {
  3105. QEDF_ERR(&(qedf->dbg_ctx),
  3106. "GRC Dump buffer alloc failed.\n");
  3107. qedf->grcdump = NULL;
  3108. }
  3109. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  3110. "grcdump: addr=%p, size=%u.\n",
  3111. qedf->grcdump, qedf->grcdump_size);
  3112. }
  3113. qedf_create_sysfs_ctx_attr(qedf);
  3114. /* Initialize I/O tracing for this adapter */
  3115. spin_lock_init(&qedf->io_trace_lock);
  3116. qedf->io_trace_idx = 0;
  3117. }
  3118. init_completion(&qedf->flogi_compl);
  3119. status = qed_ops->common->update_drv_state(qedf->cdev, true);
  3120. if (status)
  3121. QEDF_ERR(&(qedf->dbg_ctx),
  3122. "Failed to send drv state to MFW.\n");
  3123. memset(&link_params, 0, sizeof(struct qed_link_params));
  3124. link_params.link_up = true;
  3125. status = qed_ops->common->set_link(qedf->cdev, &link_params);
  3126. if (status)
  3127. QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
  3128. /* Start/restart discovery */
  3129. if (mode == QEDF_MODE_RECOVERY)
  3130. fcoe_ctlr_link_up(&qedf->ctlr);
  3131. else
  3132. fc_fabric_login(lport);
  3133. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
  3134. clear_bit(QEDF_PROBING, &qedf->flags);
  3135. /* All good */
  3136. return 0;
  3137. err7:
  3138. if (qedf->ll2_recv_wq)
  3139. destroy_workqueue(qedf->ll2_recv_wq);
  3140. fc_remove_host(qedf->lport->host);
  3141. scsi_remove_host(qedf->lport->host);
  3142. #ifdef CONFIG_DEBUG_FS
  3143. qedf_dbg_host_exit(&(qedf->dbg_ctx));
  3144. #endif
  3145. err6:
  3146. qedf_cmd_mgr_free(qedf->cmd_mgr);
  3147. err5:
  3148. qed_ops->stop(qedf->cdev);
  3149. err4:
  3150. qedf_free_fcoe_pf_param(qedf);
  3151. qedf_sync_free_irqs(qedf);
  3152. err3:
  3153. qed_ops->common->slowpath_stop(qedf->cdev);
  3154. err2:
  3155. qed_ops->common->remove(qedf->cdev);
  3156. err1:
  3157. scsi_host_put(lport->host);
  3158. err0:
  3159. return rc;
  3160. }
  3161. static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  3162. {
  3163. return __qedf_probe(pdev, QEDF_MODE_NORMAL);
  3164. }
  3165. static void __qedf_remove(struct pci_dev *pdev, int mode)
  3166. {
  3167. struct qedf_ctx *qedf;
  3168. int rc;
  3169. int cnt = 0;
  3170. if (!pdev) {
  3171. QEDF_ERR(NULL, "pdev is NULL.\n");
  3172. return;
  3173. }
  3174. qedf = pci_get_drvdata(pdev);
  3175. /*
  3176. * Prevent race where we're in board disable work and then try to
  3177. * rmmod the module.
  3178. */
  3179. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  3180. QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
  3181. return;
  3182. }
  3183. stag_in_prog:
  3184. if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
  3185. QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
  3186. cnt++;
  3187. if (cnt < 5) {
  3188. msleep(500);
  3189. goto stag_in_prog;
  3190. }
  3191. }
  3192. if (mode != QEDF_MODE_RECOVERY)
  3193. set_bit(QEDF_UNLOADING, &qedf->flags);
  3194. /* Logoff the fabric to upload all connections */
  3195. if (mode == QEDF_MODE_RECOVERY)
  3196. fcoe_ctlr_link_down(&qedf->ctlr);
  3197. else
  3198. fc_fabric_logoff(qedf->lport);
  3199. if (!qedf_wait_for_upload(qedf))
  3200. QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
  3201. #ifdef CONFIG_DEBUG_FS
  3202. qedf_dbg_host_exit(&(qedf->dbg_ctx));
  3203. #endif
  3204. /* Stop any link update handling */
  3205. cancel_delayed_work_sync(&qedf->link_update);
  3206. destroy_workqueue(qedf->link_update_wq);
  3207. qedf->link_update_wq = NULL;
  3208. if (qedf->timer_work_queue)
  3209. destroy_workqueue(qedf->timer_work_queue);
  3210. /* Stop Light L2 */
  3211. clear_bit(QEDF_LL2_STARTED, &qedf->flags);
  3212. qed_ops->ll2->stop(qedf->cdev);
  3213. if (qedf->ll2_recv_wq)
  3214. destroy_workqueue(qedf->ll2_recv_wq);
  3215. /* Stop fastpath */
  3216. qedf_sync_free_irqs(qedf);
  3217. qedf_destroy_sb(qedf);
  3218. /*
  3219. * During recovery don't destroy OS constructs that represent the
  3220. * physical port.
  3221. */
  3222. if (mode != QEDF_MODE_RECOVERY) {
  3223. qedf_free_grc_dump_buf(&qedf->grcdump);
  3224. qedf_remove_sysfs_ctx_attr(qedf);
  3225. /* Remove all SCSI/libfc/libfcoe structures */
  3226. fcoe_ctlr_destroy(&qedf->ctlr);
  3227. fc_lport_destroy(qedf->lport);
  3228. fc_remove_host(qedf->lport->host);
  3229. scsi_remove_host(qedf->lport->host);
  3230. }
  3231. qedf_cmd_mgr_free(qedf->cmd_mgr);
  3232. if (mode != QEDF_MODE_RECOVERY) {
  3233. fc_exch_mgr_free(qedf->lport);
  3234. fc_lport_free_stats(qedf->lport);
  3235. /* Wait for all vports to be reaped */
  3236. qedf_wait_for_vport_destroy(qedf);
  3237. }
  3238. /*
  3239. * Now that all connections have been uploaded we can stop the
  3240. * rest of the qed operations
  3241. */
  3242. qed_ops->stop(qedf->cdev);
  3243. if (mode != QEDF_MODE_RECOVERY) {
  3244. if (qedf->dpc_wq) {
  3245. /* Stop general DPC handling */
  3246. destroy_workqueue(qedf->dpc_wq);
  3247. qedf->dpc_wq = NULL;
  3248. }
  3249. }
  3250. /* Final shutdown for the board */
  3251. qedf_free_fcoe_pf_param(qedf);
  3252. if (mode != QEDF_MODE_RECOVERY) {
  3253. qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
  3254. pci_set_drvdata(pdev, NULL);
  3255. }
  3256. rc = qed_ops->common->update_drv_state(qedf->cdev, false);
  3257. if (rc)
  3258. QEDF_ERR(&(qedf->dbg_ctx),
  3259. "Failed to send drv state to MFW.\n");
  3260. if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
  3261. qed_ops->common->devlink_unregister(qedf->devlink);
  3262. qedf->devlink = NULL;
  3263. }
  3264. qed_ops->common->slowpath_stop(qedf->cdev);
  3265. qed_ops->common->remove(qedf->cdev);
  3266. mempool_destroy(qedf->io_mempool);
  3267. /* Only reap the Scsi_host on a real removal */
  3268. if (mode != QEDF_MODE_RECOVERY)
  3269. scsi_host_put(qedf->lport->host);
  3270. }
  3271. static void qedf_remove(struct pci_dev *pdev)
  3272. {
  3273. /* Check to make sure this function wasn't already disabled */
  3274. if (!atomic_read(&pdev->enable_cnt))
  3275. return;
  3276. __qedf_remove(pdev, QEDF_MODE_NORMAL);
  3277. }
  3278. void qedf_wq_grcdump(struct work_struct *work)
  3279. {
  3280. struct qedf_ctx *qedf =
  3281. container_of(work, struct qedf_ctx, grcdump_work.work);
  3282. QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
  3283. qedf_capture_grc_dump(qedf);
  3284. }
  3285. void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
  3286. {
  3287. struct qedf_ctx *qedf = dev;
  3288. QEDF_ERR(&(qedf->dbg_ctx),
  3289. "Hardware error handler scheduled, event=%d.\n",
  3290. err_type);
  3291. if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
  3292. QEDF_ERR(&(qedf->dbg_ctx),
  3293. "Already in recovery, not scheduling board disable work.\n");
  3294. return;
  3295. }
  3296. switch (err_type) {
  3297. case QED_HW_ERR_FAN_FAIL:
  3298. schedule_delayed_work(&qedf->board_disable_work, 0);
  3299. break;
  3300. case QED_HW_ERR_MFW_RESP_FAIL:
  3301. case QED_HW_ERR_HW_ATTN:
  3302. case QED_HW_ERR_DMAE_FAIL:
  3303. case QED_HW_ERR_FW_ASSERT:
  3304. /* Prevent HW attentions from being reasserted */
  3305. qed_ops->common->attn_clr_enable(qedf->cdev, true);
  3306. break;
  3307. case QED_HW_ERR_RAMROD_FAIL:
  3308. /* Prevent HW attentions from being reasserted */
  3309. qed_ops->common->attn_clr_enable(qedf->cdev, true);
  3310. if (qedf_enable_recovery && qedf->devlink)
  3311. qed_ops->common->report_fatal_error(qedf->devlink,
  3312. err_type);
  3313. break;
  3314. default:
  3315. break;
  3316. }
  3317. }
  3318. /*
  3319. * Protocol TLV handler
  3320. */
  3321. void qedf_get_protocol_tlv_data(void *dev, void *data)
  3322. {
  3323. struct qedf_ctx *qedf = dev;
  3324. struct qed_mfw_tlv_fcoe *fcoe = data;
  3325. struct fc_lport *lport;
  3326. struct Scsi_Host *host;
  3327. struct fc_host_attrs *fc_host;
  3328. struct fc_host_statistics *hst;
  3329. if (!qedf) {
  3330. QEDF_ERR(NULL, "qedf is null.\n");
  3331. return;
  3332. }
  3333. if (test_bit(QEDF_PROBING, &qedf->flags)) {
  3334. QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
  3335. return;
  3336. }
  3337. lport = qedf->lport;
  3338. host = lport->host;
  3339. fc_host = shost_to_fc_host(host);
  3340. /* Force a refresh of the fc_host stats including offload stats */
  3341. hst = qedf_fc_get_host_stats(host);
  3342. fcoe->qos_pri_set = true;
  3343. fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
  3344. fcoe->ra_tov_set = true;
  3345. fcoe->ra_tov = lport->r_a_tov;
  3346. fcoe->ed_tov_set = true;
  3347. fcoe->ed_tov = lport->e_d_tov;
  3348. fcoe->npiv_state_set = true;
  3349. fcoe->npiv_state = 1; /* NPIV always enabled */
  3350. fcoe->num_npiv_ids_set = true;
  3351. fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
  3352. /* Certain attributes we only want to set if we've selected an FCF */
  3353. if (qedf->ctlr.sel_fcf) {
  3354. fcoe->switch_name_set = true;
  3355. u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
  3356. }
  3357. fcoe->port_state_set = true;
  3358. /* For qedf we're either link down or fabric attach */
  3359. if (lport->link_up)
  3360. fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
  3361. else
  3362. fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
  3363. fcoe->link_failures_set = true;
  3364. fcoe->link_failures = (u16)hst->link_failure_count;
  3365. fcoe->fcoe_txq_depth_set = true;
  3366. fcoe->fcoe_rxq_depth_set = true;
  3367. fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
  3368. fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
  3369. fcoe->fcoe_rx_frames_set = true;
  3370. fcoe->fcoe_rx_frames = hst->rx_frames;
  3371. fcoe->fcoe_tx_frames_set = true;
  3372. fcoe->fcoe_tx_frames = hst->tx_frames;
  3373. fcoe->fcoe_rx_bytes_set = true;
  3374. fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
  3375. fcoe->fcoe_tx_bytes_set = true;
  3376. fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
  3377. fcoe->crc_count_set = true;
  3378. fcoe->crc_count = hst->invalid_crc_count;
  3379. fcoe->tx_abts_set = true;
  3380. fcoe->tx_abts = hst->fcp_packet_aborts;
  3381. fcoe->tx_lun_rst_set = true;
  3382. fcoe->tx_lun_rst = qedf->lun_resets;
  3383. fcoe->abort_task_sets_set = true;
  3384. fcoe->abort_task_sets = qedf->packet_aborts;
  3385. fcoe->scsi_busy_set = true;
  3386. fcoe->scsi_busy = qedf->busy;
  3387. fcoe->scsi_tsk_full_set = true;
  3388. fcoe->scsi_tsk_full = qedf->task_set_fulls;
  3389. }
  3390. /* Deferred work function to perform soft context reset on STAG change */
  3391. void qedf_stag_change_work(struct work_struct *work)
  3392. {
  3393. struct qedf_ctx *qedf =
  3394. container_of(work, struct qedf_ctx, stag_work.work);
  3395. if (!qedf) {
  3396. QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
  3397. return;
  3398. }
  3399. if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
  3400. QEDF_ERR(&qedf->dbg_ctx,
  3401. "Already is in recovery, hence not calling software context reset.\n");
  3402. return;
  3403. }
  3404. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  3405. QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
  3406. return;
  3407. }
  3408. set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
  3409. printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
  3410. dev_name(&qedf->pdev->dev), __func__, __LINE__,
  3411. qedf->dbg_ctx.host_no);
  3412. qedf_ctx_soft_reset(qedf->lport);
  3413. }
  3414. static void qedf_shutdown(struct pci_dev *pdev)
  3415. {
  3416. __qedf_remove(pdev, QEDF_MODE_NORMAL);
  3417. }
  3418. static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
  3419. {
  3420. struct qedf_ctx *qedf;
  3421. if (!pdev) {
  3422. QEDF_ERR(NULL, "pdev is NULL.\n");
  3423. return -ENODEV;
  3424. }
  3425. qedf = pci_get_drvdata(pdev);
  3426. QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
  3427. return -EPERM;
  3428. }
  3429. /*
  3430. * Recovery handler code
  3431. */
  3432. static void qedf_schedule_recovery_handler(void *dev)
  3433. {
  3434. struct qedf_ctx *qedf = dev;
  3435. QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
  3436. schedule_delayed_work(&qedf->recovery_work, 0);
  3437. }
  3438. static void qedf_recovery_handler(struct work_struct *work)
  3439. {
  3440. struct qedf_ctx *qedf =
  3441. container_of(work, struct qedf_ctx, recovery_work.work);
  3442. if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
  3443. return;
  3444. /*
  3445. * Call common_ops->recovery_prolog to allow the MFW to quiesce
  3446. * any PCI transactions.
  3447. */
  3448. qed_ops->common->recovery_prolog(qedf->cdev);
  3449. QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
  3450. __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
  3451. /*
  3452. * Reset link and dcbx to down state since we will not get a link down
  3453. * event from the MFW but calling __qedf_remove will essentially be a
  3454. * link down event.
  3455. */
  3456. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  3457. atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
  3458. __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
  3459. clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
  3460. QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
  3461. }
  3462. /* Generic TLV data callback */
  3463. void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
  3464. {
  3465. struct qedf_ctx *qedf;
  3466. if (!dev) {
  3467. QEDF_INFO(NULL, QEDF_LOG_EVT,
  3468. "dev is NULL so ignoring get_generic_tlv_data request.\n");
  3469. return;
  3470. }
  3471. qedf = (struct qedf_ctx *)dev;
  3472. memset(data, 0, sizeof(struct qed_generic_tlvs));
  3473. ether_addr_copy(data->mac[0], qedf->mac);
  3474. }
  3475. /*
  3476. * Module Init/Remove
  3477. */
  3478. static int __init qedf_init(void)
  3479. {
  3480. int ret;
  3481. /* If debug=1 passed, set the default log mask */
  3482. if (qedf_debug == QEDF_LOG_DEFAULT)
  3483. qedf_debug = QEDF_DEFAULT_LOG_MASK;
  3484. /*
  3485. * Check that default prio for FIP/FCoE traffic is between 0..7 if a
  3486. * value has been set
  3487. */
  3488. if (qedf_default_prio > -1)
  3489. if (qedf_default_prio > 7) {
  3490. qedf_default_prio = QEDF_DEFAULT_PRIO;
  3491. QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
  3492. QEDF_DEFAULT_PRIO);
  3493. }
  3494. /* Print driver banner */
  3495. QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
  3496. QEDF_VERSION);
  3497. /* Create kmem_cache for qedf_io_work structs */
  3498. qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
  3499. sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
  3500. if (qedf_io_work_cache == NULL) {
  3501. QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
  3502. goto err1;
  3503. }
  3504. QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
  3505. qedf_io_work_cache);
  3506. qed_ops = qed_get_fcoe_ops();
  3507. if (!qed_ops) {
  3508. QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
  3509. goto err1;
  3510. }
  3511. #ifdef CONFIG_DEBUG_FS
  3512. qedf_dbg_init("qedf");
  3513. #endif
  3514. qedf_fc_transport_template =
  3515. fc_attach_transport(&qedf_fc_transport_fn);
  3516. if (!qedf_fc_transport_template) {
  3517. QEDF_ERR(NULL, "Could not register with FC transport\n");
  3518. goto err2;
  3519. }
  3520. qedf_fc_vport_transport_template =
  3521. fc_attach_transport(&qedf_fc_vport_transport_fn);
  3522. if (!qedf_fc_vport_transport_template) {
  3523. QEDF_ERR(NULL, "Could not register vport template with FC "
  3524. "transport\n");
  3525. goto err3;
  3526. }
  3527. qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
  3528. if (!qedf_io_wq) {
  3529. QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
  3530. goto err4;
  3531. }
  3532. qedf_cb_ops.get_login_failures = qedf_get_login_failures;
  3533. ret = pci_register_driver(&qedf_pci_driver);
  3534. if (ret) {
  3535. QEDF_ERR(NULL, "Failed to register driver\n");
  3536. goto err5;
  3537. }
  3538. return 0;
  3539. err5:
  3540. destroy_workqueue(qedf_io_wq);
  3541. err4:
  3542. fc_release_transport(qedf_fc_vport_transport_template);
  3543. err3:
  3544. fc_release_transport(qedf_fc_transport_template);
  3545. err2:
  3546. #ifdef CONFIG_DEBUG_FS
  3547. qedf_dbg_exit();
  3548. #endif
  3549. qed_put_fcoe_ops();
  3550. err1:
  3551. return -EINVAL;
  3552. }
  3553. static void __exit qedf_cleanup(void)
  3554. {
  3555. pci_unregister_driver(&qedf_pci_driver);
  3556. destroy_workqueue(qedf_io_wq);
  3557. fc_release_transport(qedf_fc_vport_transport_template);
  3558. fc_release_transport(qedf_fc_transport_template);
  3559. #ifdef CONFIG_DEBUG_FS
  3560. qedf_dbg_exit();
  3561. #endif
  3562. qed_put_fcoe_ops();
  3563. kmem_cache_destroy(qedf_io_work_cache);
  3564. }
  3565. MODULE_LICENSE("GPL");
  3566. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
  3567. MODULE_AUTHOR("QLogic Corporation");
  3568. MODULE_VERSION(QEDF_VERSION);
  3569. module_init(qedf_init);
  3570. module_exit(qedf_cleanup);