qla_attr.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_target.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/slab.h>
  11. #include <linux/delay.h>
  12. static int qla24xx_vport_disable(struct fc_vport *, bool);
  13. /* SYSFS attributes --------------------------------------------------------- */
  14. static ssize_t
  15. qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
  16. struct bin_attribute *bin_attr,
  17. char *buf, loff_t off, size_t count)
  18. {
  19. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  20. struct device, kobj)));
  21. struct qla_hw_data *ha = vha->hw;
  22. int rval = 0;
  23. if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
  24. ha->mpi_fw_dump_reading))
  25. return 0;
  26. mutex_lock(&ha->optrom_mutex);
  27. if (IS_P3P_TYPE(ha)) {
  28. if (off < ha->md_template_size) {
  29. rval = memory_read_from_buffer(buf, count,
  30. &off, ha->md_tmplt_hdr, ha->md_template_size);
  31. } else {
  32. off -= ha->md_template_size;
  33. rval = memory_read_from_buffer(buf, count,
  34. &off, ha->md_dump, ha->md_dump_size);
  35. }
  36. } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
  37. rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
  38. MCTP_DUMP_SIZE);
  39. } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
  40. rval = memory_read_from_buffer(buf, count, &off,
  41. ha->mpi_fw_dump,
  42. ha->mpi_fw_dump_len);
  43. } else if (ha->fw_dump_reading) {
  44. rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
  45. ha->fw_dump_len);
  46. } else {
  47. rval = 0;
  48. }
  49. mutex_unlock(&ha->optrom_mutex);
  50. return rval;
  51. }
  52. static ssize_t
  53. qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
  54. struct bin_attribute *bin_attr,
  55. char *buf, loff_t off, size_t count)
  56. {
  57. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  58. struct device, kobj)));
  59. struct qla_hw_data *ha = vha->hw;
  60. int reading;
  61. if (off != 0)
  62. return (0);
  63. reading = simple_strtol(buf, NULL, 10);
  64. switch (reading) {
  65. case 0:
  66. if (!ha->fw_dump_reading)
  67. break;
  68. ql_log(ql_log_info, vha, 0x705d,
  69. "Firmware dump cleared on (%ld).\n", vha->host_no);
  70. if (IS_P3P_TYPE(ha)) {
  71. qla82xx_md_free(vha);
  72. qla82xx_md_prep(vha);
  73. }
  74. ha->fw_dump_reading = 0;
  75. ha->fw_dumped = false;
  76. break;
  77. case 1:
  78. if (ha->fw_dumped && !ha->fw_dump_reading) {
  79. ha->fw_dump_reading = 1;
  80. ql_log(ql_log_info, vha, 0x705e,
  81. "Raw firmware dump ready for read on (%ld).\n",
  82. vha->host_no);
  83. }
  84. break;
  85. case 2:
  86. qla2x00_alloc_fw_dump(vha);
  87. break;
  88. case 3:
  89. if (IS_QLA82XX(ha)) {
  90. qla82xx_idc_lock(ha);
  91. qla82xx_set_reset_owner(vha);
  92. qla82xx_idc_unlock(ha);
  93. } else if (IS_QLA8044(ha)) {
  94. qla8044_idc_lock(ha);
  95. qla82xx_set_reset_owner(vha);
  96. qla8044_idc_unlock(ha);
  97. } else {
  98. qla2x00_system_error(vha);
  99. }
  100. break;
  101. case 4:
  102. if (IS_P3P_TYPE(ha)) {
  103. if (ha->md_tmplt_hdr)
  104. ql_dbg(ql_dbg_user, vha, 0x705b,
  105. "MiniDump supported with this firmware.\n");
  106. else
  107. ql_dbg(ql_dbg_user, vha, 0x709d,
  108. "MiniDump not supported with this firmware.\n");
  109. }
  110. break;
  111. case 5:
  112. if (IS_P3P_TYPE(ha))
  113. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  114. break;
  115. case 6:
  116. if (!ha->mctp_dump_reading)
  117. break;
  118. ql_log(ql_log_info, vha, 0x70c1,
  119. "MCTP dump cleared on (%ld).\n", vha->host_no);
  120. ha->mctp_dump_reading = 0;
  121. ha->mctp_dumped = 0;
  122. break;
  123. case 7:
  124. if (ha->mctp_dumped && !ha->mctp_dump_reading) {
  125. ha->mctp_dump_reading = 1;
  126. ql_log(ql_log_info, vha, 0x70c2,
  127. "Raw mctp dump ready for read on (%ld).\n",
  128. vha->host_no);
  129. }
  130. break;
  131. case 8:
  132. if (!ha->mpi_fw_dump_reading)
  133. break;
  134. ql_log(ql_log_info, vha, 0x70e7,
  135. "MPI firmware dump cleared on (%ld).\n", vha->host_no);
  136. ha->mpi_fw_dump_reading = 0;
  137. ha->mpi_fw_dumped = 0;
  138. break;
  139. case 9:
  140. if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
  141. ha->mpi_fw_dump_reading = 1;
  142. ql_log(ql_log_info, vha, 0x70e8,
  143. "Raw MPI firmware dump ready for read on (%ld).\n",
  144. vha->host_no);
  145. }
  146. break;
  147. case 10:
  148. if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  149. ql_log(ql_log_info, vha, 0x70e9,
  150. "Issuing MPI firmware dump on host#%ld.\n",
  151. vha->host_no);
  152. ha->isp_ops->mpi_fw_dump(vha, 0);
  153. }
  154. break;
  155. }
  156. return count;
  157. }
  158. static struct bin_attribute sysfs_fw_dump_attr = {
  159. .attr = {
  160. .name = "fw_dump",
  161. .mode = S_IRUSR | S_IWUSR,
  162. },
  163. .size = 0,
  164. .read = qla2x00_sysfs_read_fw_dump,
  165. .write = qla2x00_sysfs_write_fw_dump,
  166. };
  167. static ssize_t
  168. qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
  169. struct bin_attribute *bin_attr,
  170. char *buf, loff_t off, size_t count)
  171. {
  172. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  173. struct device, kobj)));
  174. struct qla_hw_data *ha = vha->hw;
  175. uint32_t faddr;
  176. struct active_regions active_regions = { };
  177. if (!capable(CAP_SYS_ADMIN))
  178. return 0;
  179. mutex_lock(&ha->optrom_mutex);
  180. if (qla2x00_chip_is_down(vha)) {
  181. mutex_unlock(&ha->optrom_mutex);
  182. return -EAGAIN;
  183. }
  184. if (!IS_NOCACHE_VPD_TYPE(ha)) {
  185. mutex_unlock(&ha->optrom_mutex);
  186. goto skip;
  187. }
  188. faddr = ha->flt_region_nvram;
  189. if (IS_QLA28XX(ha)) {
  190. qla28xx_get_aux_images(vha, &active_regions);
  191. if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
  192. faddr = ha->flt_region_nvram_sec;
  193. }
  194. ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
  195. mutex_unlock(&ha->optrom_mutex);
  196. skip:
  197. return memory_read_from_buffer(buf, count, &off, ha->nvram,
  198. ha->nvram_size);
  199. }
  200. static ssize_t
  201. qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
  202. struct bin_attribute *bin_attr,
  203. char *buf, loff_t off, size_t count)
  204. {
  205. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  206. struct device, kobj)));
  207. struct qla_hw_data *ha = vha->hw;
  208. uint16_t cnt;
  209. if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
  210. !ha->isp_ops->write_nvram)
  211. return -EINVAL;
  212. /* Checksum NVRAM. */
  213. if (IS_FWI2_CAPABLE(ha)) {
  214. __le32 *iter = (__force __le32 *)buf;
  215. uint32_t chksum;
  216. chksum = 0;
  217. for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
  218. chksum += le32_to_cpu(*iter);
  219. chksum = ~chksum + 1;
  220. *iter = cpu_to_le32(chksum);
  221. } else {
  222. uint8_t *iter;
  223. uint8_t chksum;
  224. iter = (uint8_t *)buf;
  225. chksum = 0;
  226. for (cnt = 0; cnt < count - 1; cnt++)
  227. chksum += *iter++;
  228. chksum = ~chksum + 1;
  229. *iter = chksum;
  230. }
  231. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  232. ql_log(ql_log_warn, vha, 0x705f,
  233. "HBA not online, failing NVRAM update.\n");
  234. return -EAGAIN;
  235. }
  236. mutex_lock(&ha->optrom_mutex);
  237. if (qla2x00_chip_is_down(vha)) {
  238. mutex_unlock(&ha->optrom_mutex);
  239. return -EAGAIN;
  240. }
  241. /* Write NVRAM. */
  242. ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
  243. ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
  244. count);
  245. mutex_unlock(&ha->optrom_mutex);
  246. ql_dbg(ql_dbg_user, vha, 0x7060,
  247. "Setting ISP_ABORT_NEEDED\n");
  248. /* NVRAM settings take effect immediately. */
  249. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  250. qla2xxx_wake_dpc(vha);
  251. qla2x00_wait_for_chip_reset(vha);
  252. return count;
  253. }
  254. static struct bin_attribute sysfs_nvram_attr = {
  255. .attr = {
  256. .name = "nvram",
  257. .mode = S_IRUSR | S_IWUSR,
  258. },
  259. .size = 512,
  260. .read = qla2x00_sysfs_read_nvram,
  261. .write = qla2x00_sysfs_write_nvram,
  262. };
  263. static ssize_t
  264. qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
  265. struct bin_attribute *bin_attr,
  266. char *buf, loff_t off, size_t count)
  267. {
  268. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  269. struct device, kobj)));
  270. struct qla_hw_data *ha = vha->hw;
  271. ssize_t rval = 0;
  272. mutex_lock(&ha->optrom_mutex);
  273. if (ha->optrom_state != QLA_SREADING)
  274. goto out;
  275. rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
  276. ha->optrom_region_size);
  277. out:
  278. mutex_unlock(&ha->optrom_mutex);
  279. return rval;
  280. }
  281. static ssize_t
  282. qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
  283. struct bin_attribute *bin_attr,
  284. char *buf, loff_t off, size_t count)
  285. {
  286. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  287. struct device, kobj)));
  288. struct qla_hw_data *ha = vha->hw;
  289. mutex_lock(&ha->optrom_mutex);
  290. if (ha->optrom_state != QLA_SWRITING) {
  291. mutex_unlock(&ha->optrom_mutex);
  292. return -EINVAL;
  293. }
  294. if (off > ha->optrom_region_size) {
  295. mutex_unlock(&ha->optrom_mutex);
  296. return -ERANGE;
  297. }
  298. if (off + count > ha->optrom_region_size)
  299. count = ha->optrom_region_size - off;
  300. memcpy(&ha->optrom_buffer[off], buf, count);
  301. mutex_unlock(&ha->optrom_mutex);
  302. return count;
  303. }
  304. static struct bin_attribute sysfs_optrom_attr = {
  305. .attr = {
  306. .name = "optrom",
  307. .mode = S_IRUSR | S_IWUSR,
  308. },
  309. .size = 0,
  310. .read = qla2x00_sysfs_read_optrom,
  311. .write = qla2x00_sysfs_write_optrom,
  312. };
  313. static ssize_t
  314. qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
  315. struct bin_attribute *bin_attr,
  316. char *buf, loff_t off, size_t count)
  317. {
  318. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  319. struct device, kobj)));
  320. struct qla_hw_data *ha = vha->hw;
  321. uint32_t start = 0;
  322. uint32_t size = ha->optrom_size;
  323. int val, valid;
  324. ssize_t rval = count;
  325. if (off)
  326. return -EINVAL;
  327. if (unlikely(pci_channel_offline(ha->pdev)))
  328. return -EAGAIN;
  329. if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
  330. return -EINVAL;
  331. if (start > ha->optrom_size)
  332. return -EINVAL;
  333. if (size > ha->optrom_size - start)
  334. size = ha->optrom_size - start;
  335. mutex_lock(&ha->optrom_mutex);
  336. if (qla2x00_chip_is_down(vha)) {
  337. mutex_unlock(&ha->optrom_mutex);
  338. return -EAGAIN;
  339. }
  340. switch (val) {
  341. case 0:
  342. if (ha->optrom_state != QLA_SREADING &&
  343. ha->optrom_state != QLA_SWRITING) {
  344. rval = -EINVAL;
  345. goto out;
  346. }
  347. ha->optrom_state = QLA_SWAITING;
  348. ql_dbg(ql_dbg_user, vha, 0x7061,
  349. "Freeing flash region allocation -- 0x%x bytes.\n",
  350. ha->optrom_region_size);
  351. vfree(ha->optrom_buffer);
  352. ha->optrom_buffer = NULL;
  353. break;
  354. case 1:
  355. if (ha->optrom_state != QLA_SWAITING) {
  356. rval = -EINVAL;
  357. goto out;
  358. }
  359. ha->optrom_region_start = start;
  360. ha->optrom_region_size = size;
  361. ha->optrom_state = QLA_SREADING;
  362. ha->optrom_buffer = vzalloc(ha->optrom_region_size);
  363. if (ha->optrom_buffer == NULL) {
  364. ql_log(ql_log_warn, vha, 0x7062,
  365. "Unable to allocate memory for optrom retrieval "
  366. "(%x).\n", ha->optrom_region_size);
  367. ha->optrom_state = QLA_SWAITING;
  368. rval = -ENOMEM;
  369. goto out;
  370. }
  371. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  372. ql_log(ql_log_warn, vha, 0x7063,
  373. "HBA not online, failing NVRAM update.\n");
  374. rval = -EAGAIN;
  375. goto out;
  376. }
  377. ql_dbg(ql_dbg_user, vha, 0x7064,
  378. "Reading flash region -- 0x%x/0x%x.\n",
  379. ha->optrom_region_start, ha->optrom_region_size);
  380. ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
  381. ha->optrom_region_start, ha->optrom_region_size);
  382. break;
  383. case 2:
  384. if (ha->optrom_state != QLA_SWAITING) {
  385. rval = -EINVAL;
  386. goto out;
  387. }
  388. /*
  389. * We need to be more restrictive on which FLASH regions are
  390. * allowed to be updated via user-space. Regions accessible
  391. * via this method include:
  392. *
  393. * ISP21xx/ISP22xx/ISP23xx type boards:
  394. *
  395. * 0x000000 -> 0x020000 -- Boot code.
  396. *
  397. * ISP2322/ISP24xx type boards:
  398. *
  399. * 0x000000 -> 0x07ffff -- Boot code.
  400. * 0x080000 -> 0x0fffff -- Firmware.
  401. *
  402. * ISP25xx type boards:
  403. *
  404. * 0x000000 -> 0x07ffff -- Boot code.
  405. * 0x080000 -> 0x0fffff -- Firmware.
  406. * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
  407. *
  408. * > ISP25xx type boards:
  409. *
  410. * None -- should go through BSG.
  411. */
  412. valid = 0;
  413. if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
  414. valid = 1;
  415. else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
  416. valid = 1;
  417. if (!valid) {
  418. ql_log(ql_log_warn, vha, 0x7065,
  419. "Invalid start region 0x%x/0x%x.\n", start, size);
  420. rval = -EINVAL;
  421. goto out;
  422. }
  423. ha->optrom_region_start = start;
  424. ha->optrom_region_size = size;
  425. ha->optrom_state = QLA_SWRITING;
  426. ha->optrom_buffer = vzalloc(ha->optrom_region_size);
  427. if (ha->optrom_buffer == NULL) {
  428. ql_log(ql_log_warn, vha, 0x7066,
  429. "Unable to allocate memory for optrom update "
  430. "(%x)\n", ha->optrom_region_size);
  431. ha->optrom_state = QLA_SWAITING;
  432. rval = -ENOMEM;
  433. goto out;
  434. }
  435. ql_dbg(ql_dbg_user, vha, 0x7067,
  436. "Staging flash region write -- 0x%x/0x%x.\n",
  437. ha->optrom_region_start, ha->optrom_region_size);
  438. break;
  439. case 3:
  440. if (ha->optrom_state != QLA_SWRITING) {
  441. rval = -EINVAL;
  442. goto out;
  443. }
  444. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  445. ql_log(ql_log_warn, vha, 0x7068,
  446. "HBA not online, failing flash update.\n");
  447. rval = -EAGAIN;
  448. goto out;
  449. }
  450. ql_dbg(ql_dbg_user, vha, 0x7069,
  451. "Writing flash region -- 0x%x/0x%x.\n",
  452. ha->optrom_region_start, ha->optrom_region_size);
  453. rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
  454. ha->optrom_region_start, ha->optrom_region_size);
  455. if (rval)
  456. rval = -EIO;
  457. break;
  458. default:
  459. rval = -EINVAL;
  460. }
  461. out:
  462. mutex_unlock(&ha->optrom_mutex);
  463. return rval;
  464. }
  465. static struct bin_attribute sysfs_optrom_ctl_attr = {
  466. .attr = {
  467. .name = "optrom_ctl",
  468. .mode = S_IWUSR,
  469. },
  470. .size = 0,
  471. .write = qla2x00_sysfs_write_optrom_ctl,
  472. };
  473. static ssize_t
  474. qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
  475. struct bin_attribute *bin_attr,
  476. char *buf, loff_t off, size_t count)
  477. {
  478. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  479. struct device, kobj)));
  480. struct qla_hw_data *ha = vha->hw;
  481. uint32_t faddr;
  482. struct active_regions active_regions = { };
  483. if (unlikely(pci_channel_offline(ha->pdev)))
  484. return -EAGAIN;
  485. if (!capable(CAP_SYS_ADMIN))
  486. return -EINVAL;
  487. if (!IS_NOCACHE_VPD_TYPE(ha))
  488. goto skip;
  489. faddr = ha->flt_region_vpd << 2;
  490. if (IS_QLA28XX(ha)) {
  491. qla28xx_get_aux_images(vha, &active_regions);
  492. if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
  493. faddr = ha->flt_region_vpd_sec << 2;
  494. ql_dbg(ql_dbg_init, vha, 0x7070,
  495. "Loading %s nvram image.\n",
  496. active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
  497. "primary" : "secondary");
  498. }
  499. mutex_lock(&ha->optrom_mutex);
  500. if (qla2x00_chip_is_down(vha)) {
  501. mutex_unlock(&ha->optrom_mutex);
  502. return -EAGAIN;
  503. }
  504. ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
  505. mutex_unlock(&ha->optrom_mutex);
  506. ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
  507. skip:
  508. return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
  509. }
  510. static ssize_t
  511. qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
  512. struct bin_attribute *bin_attr,
  513. char *buf, loff_t off, size_t count)
  514. {
  515. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  516. struct device, kobj)));
  517. struct qla_hw_data *ha = vha->hw;
  518. uint8_t *tmp_data;
  519. if (unlikely(pci_channel_offline(ha->pdev)))
  520. return 0;
  521. if (qla2x00_chip_is_down(vha))
  522. return 0;
  523. if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
  524. !ha->isp_ops->write_nvram)
  525. return 0;
  526. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  527. ql_log(ql_log_warn, vha, 0x706a,
  528. "HBA not online, failing VPD update.\n");
  529. return -EAGAIN;
  530. }
  531. mutex_lock(&ha->optrom_mutex);
  532. if (qla2x00_chip_is_down(vha)) {
  533. mutex_unlock(&ha->optrom_mutex);
  534. return -EAGAIN;
  535. }
  536. /* Write NVRAM. */
  537. ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
  538. ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
  539. /* Update flash version information for 4Gb & above. */
  540. if (!IS_FWI2_CAPABLE(ha)) {
  541. mutex_unlock(&ha->optrom_mutex);
  542. return -EINVAL;
  543. }
  544. tmp_data = vmalloc(256);
  545. if (!tmp_data) {
  546. mutex_unlock(&ha->optrom_mutex);
  547. ql_log(ql_log_warn, vha, 0x706b,
  548. "Unable to allocate memory for VPD information update.\n");
  549. return -ENOMEM;
  550. }
  551. ha->isp_ops->get_flash_version(vha, tmp_data);
  552. vfree(tmp_data);
  553. mutex_unlock(&ha->optrom_mutex);
  554. return count;
  555. }
  556. static struct bin_attribute sysfs_vpd_attr = {
  557. .attr = {
  558. .name = "vpd",
  559. .mode = S_IRUSR | S_IWUSR,
  560. },
  561. .size = 0,
  562. .read = qla2x00_sysfs_read_vpd,
  563. .write = qla2x00_sysfs_write_vpd,
  564. };
  565. static ssize_t
  566. qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
  567. struct bin_attribute *bin_attr,
  568. char *buf, loff_t off, size_t count)
  569. {
  570. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  571. struct device, kobj)));
  572. int rval;
  573. if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
  574. return 0;
  575. mutex_lock(&vha->hw->optrom_mutex);
  576. if (qla2x00_chip_is_down(vha)) {
  577. mutex_unlock(&vha->hw->optrom_mutex);
  578. return 0;
  579. }
  580. rval = qla2x00_read_sfp_dev(vha, buf, count);
  581. mutex_unlock(&vha->hw->optrom_mutex);
  582. if (rval)
  583. return -EIO;
  584. return count;
  585. }
  586. static struct bin_attribute sysfs_sfp_attr = {
  587. .attr = {
  588. .name = "sfp",
  589. .mode = S_IRUSR | S_IWUSR,
  590. },
  591. .size = SFP_DEV_SIZE,
  592. .read = qla2x00_sysfs_read_sfp,
  593. };
  594. static ssize_t
  595. qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
  596. struct bin_attribute *bin_attr,
  597. char *buf, loff_t off, size_t count)
  598. {
  599. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  600. struct device, kobj)));
  601. struct qla_hw_data *ha = vha->hw;
  602. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  603. int type;
  604. uint32_t idc_control;
  605. uint8_t *tmp_data = NULL;
  606. if (off != 0)
  607. return -EINVAL;
  608. type = simple_strtol(buf, NULL, 10);
  609. switch (type) {
  610. case 0x2025c:
  611. ql_log(ql_log_info, vha, 0x706e,
  612. "Issuing ISP reset.\n");
  613. if (vha->hw->flags.port_isolated) {
  614. ql_log(ql_log_info, vha, 0x706e,
  615. "Port is isolated, returning.\n");
  616. return -EINVAL;
  617. }
  618. scsi_block_requests(vha->host);
  619. if (IS_QLA82XX(ha)) {
  620. ha->flags.isp82xx_no_md_cap = 1;
  621. qla82xx_idc_lock(ha);
  622. qla82xx_set_reset_owner(vha);
  623. qla82xx_idc_unlock(ha);
  624. } else if (IS_QLA8044(ha)) {
  625. qla8044_idc_lock(ha);
  626. idc_control = qla8044_rd_reg(ha,
  627. QLA8044_IDC_DRV_CTRL);
  628. qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
  629. (idc_control | GRACEFUL_RESET_BIT1));
  630. qla82xx_set_reset_owner(vha);
  631. qla8044_idc_unlock(ha);
  632. } else {
  633. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  634. qla2xxx_wake_dpc(vha);
  635. }
  636. qla2x00_wait_for_chip_reset(vha);
  637. scsi_unblock_requests(vha->host);
  638. break;
  639. case 0x2025d:
  640. if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  641. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  642. return -EPERM;
  643. ql_log(ql_log_info, vha, 0x706f,
  644. "Issuing MPI reset.\n");
  645. if (IS_QLA83XX(ha)) {
  646. uint32_t idc_control;
  647. qla83xx_idc_lock(vha, 0);
  648. __qla83xx_get_idc_control(vha, &idc_control);
  649. idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
  650. __qla83xx_set_idc_control(vha, idc_control);
  651. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
  652. QLA8XXX_DEV_NEED_RESET);
  653. qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
  654. qla83xx_idc_unlock(vha, 0);
  655. break;
  656. } else {
  657. /* Make sure FC side is not in reset */
  658. WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
  659. QLA_SUCCESS);
  660. /* Issue MPI reset */
  661. scsi_block_requests(vha->host);
  662. if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
  663. ql_log(ql_log_warn, vha, 0x7070,
  664. "MPI reset failed.\n");
  665. scsi_unblock_requests(vha->host);
  666. break;
  667. }
  668. break;
  669. case 0x2025e:
  670. if (!IS_P3P_TYPE(ha) || vha != base_vha) {
  671. ql_log(ql_log_info, vha, 0x7071,
  672. "FCoE ctx reset not supported.\n");
  673. return -EPERM;
  674. }
  675. ql_log(ql_log_info, vha, 0x7072,
  676. "Issuing FCoE ctx reset.\n");
  677. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  678. qla2xxx_wake_dpc(vha);
  679. qla2x00_wait_for_fcoe_ctx_reset(vha);
  680. break;
  681. case 0x2025f:
  682. if (!IS_QLA8031(ha))
  683. return -EPERM;
  684. ql_log(ql_log_info, vha, 0x70bc,
  685. "Disabling Reset by IDC control\n");
  686. qla83xx_idc_lock(vha, 0);
  687. __qla83xx_get_idc_control(vha, &idc_control);
  688. idc_control |= QLA83XX_IDC_RESET_DISABLED;
  689. __qla83xx_set_idc_control(vha, idc_control);
  690. qla83xx_idc_unlock(vha, 0);
  691. break;
  692. case 0x20260:
  693. if (!IS_QLA8031(ha))
  694. return -EPERM;
  695. ql_log(ql_log_info, vha, 0x70bd,
  696. "Enabling Reset by IDC control\n");
  697. qla83xx_idc_lock(vha, 0);
  698. __qla83xx_get_idc_control(vha, &idc_control);
  699. idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
  700. __qla83xx_set_idc_control(vha, idc_control);
  701. qla83xx_idc_unlock(vha, 0);
  702. break;
  703. case 0x20261:
  704. ql_dbg(ql_dbg_user, vha, 0x70e0,
  705. "Updating cache versions without reset ");
  706. tmp_data = vmalloc(256);
  707. if (!tmp_data) {
  708. ql_log(ql_log_warn, vha, 0x70e1,
  709. "Unable to allocate memory for VPD information update.\n");
  710. return -ENOMEM;
  711. }
  712. ha->isp_ops->get_flash_version(vha, tmp_data);
  713. vfree(tmp_data);
  714. break;
  715. }
  716. return count;
  717. }
  718. static struct bin_attribute sysfs_reset_attr = {
  719. .attr = {
  720. .name = "reset",
  721. .mode = S_IWUSR,
  722. },
  723. .size = 0,
  724. .write = qla2x00_sysfs_write_reset,
  725. };
  726. static ssize_t
  727. qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
  728. struct bin_attribute *bin_attr,
  729. char *buf, loff_t off, size_t count)
  730. {
  731. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  732. struct device, kobj)));
  733. int type;
  734. port_id_t did;
  735. if (!capable(CAP_SYS_ADMIN))
  736. return 0;
  737. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  738. return 0;
  739. if (qla2x00_chip_is_down(vha))
  740. return 0;
  741. type = simple_strtol(buf, NULL, 10);
  742. did.b.domain = (type & 0x00ff0000) >> 16;
  743. did.b.area = (type & 0x0000ff00) >> 8;
  744. did.b.al_pa = (type & 0x000000ff);
  745. ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
  746. did.b.domain, did.b.area, did.b.al_pa);
  747. ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
  748. qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
  749. return count;
  750. }
  751. static struct bin_attribute sysfs_issue_logo_attr = {
  752. .attr = {
  753. .name = "issue_logo",
  754. .mode = S_IWUSR,
  755. },
  756. .size = 0,
  757. .write = qla2x00_issue_logo,
  758. };
  759. static ssize_t
  760. qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
  761. struct bin_attribute *bin_attr,
  762. char *buf, loff_t off, size_t count)
  763. {
  764. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  765. struct device, kobj)));
  766. struct qla_hw_data *ha = vha->hw;
  767. int rval;
  768. uint16_t actual_size;
  769. if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
  770. return 0;
  771. if (unlikely(pci_channel_offline(ha->pdev)))
  772. return 0;
  773. mutex_lock(&vha->hw->optrom_mutex);
  774. if (qla2x00_chip_is_down(vha)) {
  775. mutex_unlock(&vha->hw->optrom_mutex);
  776. return 0;
  777. }
  778. if (ha->xgmac_data)
  779. goto do_read;
  780. ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
  781. &ha->xgmac_data_dma, GFP_KERNEL);
  782. if (!ha->xgmac_data) {
  783. mutex_unlock(&vha->hw->optrom_mutex);
  784. ql_log(ql_log_warn, vha, 0x7076,
  785. "Unable to allocate memory for XGMAC read-data.\n");
  786. return 0;
  787. }
  788. do_read:
  789. actual_size = 0;
  790. memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
  791. rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
  792. XGMAC_DATA_SIZE, &actual_size);
  793. mutex_unlock(&vha->hw->optrom_mutex);
  794. if (rval != QLA_SUCCESS) {
  795. ql_log(ql_log_warn, vha, 0x7077,
  796. "Unable to read XGMAC data (%x).\n", rval);
  797. count = 0;
  798. }
  799. count = actual_size > count ? count : actual_size;
  800. memcpy(buf, ha->xgmac_data, count);
  801. return count;
  802. }
  803. static struct bin_attribute sysfs_xgmac_stats_attr = {
  804. .attr = {
  805. .name = "xgmac_stats",
  806. .mode = S_IRUSR,
  807. },
  808. .size = 0,
  809. .read = qla2x00_sysfs_read_xgmac_stats,
  810. };
  811. static ssize_t
  812. qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
  813. struct bin_attribute *bin_attr,
  814. char *buf, loff_t off, size_t count)
  815. {
  816. struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
  817. struct device, kobj)));
  818. struct qla_hw_data *ha = vha->hw;
  819. int rval;
  820. if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
  821. return 0;
  822. mutex_lock(&vha->hw->optrom_mutex);
  823. if (ha->dcbx_tlv)
  824. goto do_read;
  825. if (qla2x00_chip_is_down(vha)) {
  826. mutex_unlock(&vha->hw->optrom_mutex);
  827. return 0;
  828. }
  829. ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
  830. &ha->dcbx_tlv_dma, GFP_KERNEL);
  831. if (!ha->dcbx_tlv) {
  832. mutex_unlock(&vha->hw->optrom_mutex);
  833. ql_log(ql_log_warn, vha, 0x7078,
  834. "Unable to allocate memory for DCBX TLV read-data.\n");
  835. return -ENOMEM;
  836. }
  837. do_read:
  838. memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
  839. rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
  840. DCBX_TLV_DATA_SIZE);
  841. mutex_unlock(&vha->hw->optrom_mutex);
  842. if (rval != QLA_SUCCESS) {
  843. ql_log(ql_log_warn, vha, 0x7079,
  844. "Unable to read DCBX TLV (%x).\n", rval);
  845. return -EIO;
  846. }
  847. memcpy(buf, ha->dcbx_tlv, count);
  848. return count;
  849. }
  850. static struct bin_attribute sysfs_dcbx_tlv_attr = {
  851. .attr = {
  852. .name = "dcbx_tlv",
  853. .mode = S_IRUSR,
  854. },
  855. .size = 0,
  856. .read = qla2x00_sysfs_read_dcbx_tlv,
  857. };
  858. static struct sysfs_entry {
  859. char *name;
  860. struct bin_attribute *attr;
  861. int type;
  862. } bin_file_entries[] = {
  863. { "fw_dump", &sysfs_fw_dump_attr, },
  864. { "nvram", &sysfs_nvram_attr, },
  865. { "optrom", &sysfs_optrom_attr, },
  866. { "optrom_ctl", &sysfs_optrom_ctl_attr, },
  867. { "vpd", &sysfs_vpd_attr, 1 },
  868. { "sfp", &sysfs_sfp_attr, 1 },
  869. { "reset", &sysfs_reset_attr, },
  870. { "issue_logo", &sysfs_issue_logo_attr, },
  871. { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
  872. { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
  873. { NULL },
  874. };
  875. void
  876. qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
  877. {
  878. struct Scsi_Host *host = vha->host;
  879. struct sysfs_entry *iter;
  880. int ret;
  881. for (iter = bin_file_entries; iter->name; iter++) {
  882. if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
  883. continue;
  884. if (iter->type == 2 && !IS_QLA25XX(vha->hw))
  885. continue;
  886. if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
  887. continue;
  888. ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
  889. iter->attr);
  890. if (ret)
  891. ql_log(ql_log_warn, vha, 0x00f3,
  892. "Unable to create sysfs %s binary attribute (%d).\n",
  893. iter->name, ret);
  894. else
  895. ql_dbg(ql_dbg_init, vha, 0x00f4,
  896. "Successfully created sysfs %s binary attribute.\n",
  897. iter->name);
  898. }
  899. }
  900. void
  901. qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
  902. {
  903. struct Scsi_Host *host = vha->host;
  904. struct sysfs_entry *iter;
  905. struct qla_hw_data *ha = vha->hw;
  906. for (iter = bin_file_entries; iter->name; iter++) {
  907. if (iter->type && !IS_FWI2_CAPABLE(ha))
  908. continue;
  909. if (iter->type == 2 && !IS_QLA25XX(ha))
  910. continue;
  911. if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
  912. continue;
  913. sysfs_remove_bin_file(&host->shost_gendev.kobj,
  914. iter->attr);
  915. }
  916. if (stop_beacon && ha->beacon_blink_led == 1)
  917. ha->isp_ops->beacon_off(vha);
  918. }
  919. /* Scsi_Host attributes. */
  920. static ssize_t
  921. qla2x00_fw_version_show(struct device *dev,
  922. struct device_attribute *attr, char *buf)
  923. {
  924. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  925. struct qla_hw_data *ha = vha->hw;
  926. char fw_str[128];
  927. return scnprintf(buf, PAGE_SIZE, "%s\n",
  928. ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
  929. }
  930. static ssize_t
  931. qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
  932. char *buf)
  933. {
  934. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  935. struct qla_hw_data *ha = vha->hw;
  936. uint32_t sn;
  937. if (IS_QLAFX00(vha->hw)) {
  938. return scnprintf(buf, PAGE_SIZE, "%s\n",
  939. vha->hw->mr.serial_num);
  940. } else if (IS_FWI2_CAPABLE(ha)) {
  941. qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
  942. return strlen(strcat(buf, "\n"));
  943. }
  944. sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
  945. return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
  946. sn % 100000);
  947. }
  948. static ssize_t
  949. qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
  950. char *buf)
  951. {
  952. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  953. return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
  954. }
  955. static ssize_t
  956. qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
  957. char *buf)
  958. {
  959. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  960. struct qla_hw_data *ha = vha->hw;
  961. if (IS_QLAFX00(vha->hw))
  962. return scnprintf(buf, PAGE_SIZE, "%s\n",
  963. vha->hw->mr.hw_version);
  964. return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
  965. ha->product_id[0], ha->product_id[1], ha->product_id[2],
  966. ha->product_id[3]);
  967. }
  968. static ssize_t
  969. qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
  970. char *buf)
  971. {
  972. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  973. return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
  974. }
  975. static ssize_t
  976. qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
  977. char *buf)
  978. {
  979. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  980. return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
  981. }
  982. static ssize_t
  983. qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
  984. char *buf)
  985. {
  986. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  987. char pci_info[30];
  988. return scnprintf(buf, PAGE_SIZE, "%s\n",
  989. vha->hw->isp_ops->pci_info_str(vha, pci_info,
  990. sizeof(pci_info)));
  991. }
  992. static ssize_t
  993. qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
  994. char *buf)
  995. {
  996. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  997. struct qla_hw_data *ha = vha->hw;
  998. int len = 0;
  999. if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
  1000. atomic_read(&vha->loop_state) == LOOP_DEAD ||
  1001. vha->device_flags & DFLG_NO_CABLE)
  1002. len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
  1003. else if (atomic_read(&vha->loop_state) != LOOP_READY ||
  1004. qla2x00_chip_is_down(vha))
  1005. len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
  1006. else {
  1007. len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
  1008. switch (ha->current_topology) {
  1009. case ISP_CFG_NL:
  1010. len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
  1011. break;
  1012. case ISP_CFG_FL:
  1013. len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
  1014. break;
  1015. case ISP_CFG_N:
  1016. len += scnprintf(buf + len, PAGE_SIZE-len,
  1017. "N_Port to N_Port\n");
  1018. break;
  1019. case ISP_CFG_F:
  1020. len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
  1021. break;
  1022. default:
  1023. len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
  1024. break;
  1025. }
  1026. }
  1027. return len;
  1028. }
  1029. static ssize_t
  1030. qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
  1031. char *buf)
  1032. {
  1033. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1034. int len = 0;
  1035. switch (vha->hw->zio_mode) {
  1036. case QLA_ZIO_MODE_6:
  1037. len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
  1038. break;
  1039. case QLA_ZIO_DISABLED:
  1040. len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
  1041. break;
  1042. }
  1043. return len;
  1044. }
  1045. static ssize_t
  1046. qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
  1047. const char *buf, size_t count)
  1048. {
  1049. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1050. struct qla_hw_data *ha = vha->hw;
  1051. int val = 0;
  1052. uint16_t zio_mode;
  1053. if (!IS_ZIO_SUPPORTED(ha))
  1054. return -ENOTSUPP;
  1055. if (sscanf(buf, "%d", &val) != 1)
  1056. return -EINVAL;
  1057. if (val)
  1058. zio_mode = QLA_ZIO_MODE_6;
  1059. else
  1060. zio_mode = QLA_ZIO_DISABLED;
  1061. /* Update per-hba values and queue a reset. */
  1062. if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
  1063. ha->zio_mode = zio_mode;
  1064. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1065. }
  1066. return strlen(buf);
  1067. }
  1068. static ssize_t
  1069. qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
  1070. char *buf)
  1071. {
  1072. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1073. return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
  1074. }
  1075. static ssize_t
  1076. qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
  1077. const char *buf, size_t count)
  1078. {
  1079. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1080. int val = 0;
  1081. uint16_t zio_timer;
  1082. if (sscanf(buf, "%d", &val) != 1)
  1083. return -EINVAL;
  1084. if (val > 25500 || val < 100)
  1085. return -ERANGE;
  1086. zio_timer = (uint16_t)(val / 100);
  1087. vha->hw->zio_timer = zio_timer;
  1088. return strlen(buf);
  1089. }
  1090. static ssize_t
  1091. qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
  1092. char *buf)
  1093. {
  1094. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1095. return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
  1096. vha->hw->last_zio_threshold);
  1097. }
  1098. static ssize_t
  1099. qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
  1100. const char *buf, size_t count)
  1101. {
  1102. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1103. int val = 0;
  1104. if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
  1105. return -EINVAL;
  1106. if (sscanf(buf, "%d", &val) != 1)
  1107. return -EINVAL;
  1108. if (val < 0 || val > 256)
  1109. return -ERANGE;
  1110. atomic_set(&vha->hw->zio_threshold, val);
  1111. return strlen(buf);
  1112. }
  1113. static ssize_t
  1114. qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
  1115. char *buf)
  1116. {
  1117. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1118. int len = 0;
  1119. if (vha->hw->beacon_blink_led)
  1120. len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
  1121. else
  1122. len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
  1123. return len;
  1124. }
  1125. static ssize_t
  1126. qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
  1127. const char *buf, size_t count)
  1128. {
  1129. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1130. struct qla_hw_data *ha = vha->hw;
  1131. int val = 0;
  1132. int rval;
  1133. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  1134. return -EPERM;
  1135. if (sscanf(buf, "%d", &val) != 1)
  1136. return -EINVAL;
  1137. mutex_lock(&vha->hw->optrom_mutex);
  1138. if (qla2x00_chip_is_down(vha)) {
  1139. mutex_unlock(&vha->hw->optrom_mutex);
  1140. ql_log(ql_log_warn, vha, 0x707a,
  1141. "Abort ISP active -- ignoring beacon request.\n");
  1142. return -EBUSY;
  1143. }
  1144. if (val)
  1145. rval = ha->isp_ops->beacon_on(vha);
  1146. else
  1147. rval = ha->isp_ops->beacon_off(vha);
  1148. if (rval != QLA_SUCCESS)
  1149. count = 0;
  1150. mutex_unlock(&vha->hw->optrom_mutex);
  1151. return count;
  1152. }
  1153. static ssize_t
  1154. qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
  1155. char *buf)
  1156. {
  1157. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1158. struct qla_hw_data *ha = vha->hw;
  1159. uint16_t led[3] = { 0 };
  1160. if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1161. return -EPERM;
  1162. if (ql26xx_led_config(vha, 0, led))
  1163. return scnprintf(buf, PAGE_SIZE, "\n");
  1164. return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
  1165. led[0], led[1], led[2]);
  1166. }
  1167. static ssize_t
  1168. qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
  1169. const char *buf, size_t count)
  1170. {
  1171. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1172. struct qla_hw_data *ha = vha->hw;
  1173. uint16_t options = BIT_0;
  1174. uint16_t led[3] = { 0 };
  1175. uint16_t word[4];
  1176. int n;
  1177. if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1178. return -EPERM;
  1179. n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
  1180. if (n == 4) {
  1181. if (word[0] == 3) {
  1182. options |= BIT_3|BIT_2|BIT_1;
  1183. led[0] = word[1];
  1184. led[1] = word[2];
  1185. led[2] = word[3];
  1186. goto write;
  1187. }
  1188. return -EINVAL;
  1189. }
  1190. if (n == 2) {
  1191. /* check led index */
  1192. if (word[0] == 0) {
  1193. options |= BIT_2;
  1194. led[0] = word[1];
  1195. goto write;
  1196. }
  1197. if (word[0] == 1) {
  1198. options |= BIT_3;
  1199. led[1] = word[1];
  1200. goto write;
  1201. }
  1202. if (word[0] == 2) {
  1203. options |= BIT_1;
  1204. led[2] = word[1];
  1205. goto write;
  1206. }
  1207. return -EINVAL;
  1208. }
  1209. return -EINVAL;
  1210. write:
  1211. if (ql26xx_led_config(vha, options, led))
  1212. return -EFAULT;
  1213. return count;
  1214. }
  1215. static ssize_t
  1216. qla2x00_optrom_bios_version_show(struct device *dev,
  1217. struct device_attribute *attr, char *buf)
  1218. {
  1219. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1220. struct qla_hw_data *ha = vha->hw;
  1221. return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
  1222. ha->bios_revision[0]);
  1223. }
  1224. static ssize_t
  1225. qla2x00_optrom_efi_version_show(struct device *dev,
  1226. struct device_attribute *attr, char *buf)
  1227. {
  1228. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1229. struct qla_hw_data *ha = vha->hw;
  1230. return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
  1231. ha->efi_revision[0]);
  1232. }
  1233. static ssize_t
  1234. qla2x00_optrom_fcode_version_show(struct device *dev,
  1235. struct device_attribute *attr, char *buf)
  1236. {
  1237. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1238. struct qla_hw_data *ha = vha->hw;
  1239. return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
  1240. ha->fcode_revision[0]);
  1241. }
  1242. static ssize_t
  1243. qla2x00_optrom_fw_version_show(struct device *dev,
  1244. struct device_attribute *attr, char *buf)
  1245. {
  1246. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1247. struct qla_hw_data *ha = vha->hw;
  1248. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
  1249. ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
  1250. ha->fw_revision[3]);
  1251. }
  1252. static ssize_t
  1253. qla2x00_optrom_gold_fw_version_show(struct device *dev,
  1254. struct device_attribute *attr, char *buf)
  1255. {
  1256. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1257. struct qla_hw_data *ha = vha->hw;
  1258. if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  1259. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1260. return scnprintf(buf, PAGE_SIZE, "\n");
  1261. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
  1262. ha->gold_fw_version[0], ha->gold_fw_version[1],
  1263. ha->gold_fw_version[2], ha->gold_fw_version[3]);
  1264. }
  1265. static ssize_t
  1266. qla2x00_total_isp_aborts_show(struct device *dev,
  1267. struct device_attribute *attr, char *buf)
  1268. {
  1269. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1270. return scnprintf(buf, PAGE_SIZE, "%d\n",
  1271. vha->qla_stats.total_isp_aborts);
  1272. }
  1273. static ssize_t
  1274. qla24xx_84xx_fw_version_show(struct device *dev,
  1275. struct device_attribute *attr, char *buf)
  1276. {
  1277. int rval = QLA_SUCCESS;
  1278. uint16_t status[2] = { 0 };
  1279. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1280. struct qla_hw_data *ha = vha->hw;
  1281. if (!IS_QLA84XX(ha))
  1282. return scnprintf(buf, PAGE_SIZE, "\n");
  1283. if (!ha->cs84xx->op_fw_version) {
  1284. rval = qla84xx_verify_chip(vha, status);
  1285. if (!rval && !status[0])
  1286. return scnprintf(buf, PAGE_SIZE, "%u\n",
  1287. (uint32_t)ha->cs84xx->op_fw_version);
  1288. }
  1289. return scnprintf(buf, PAGE_SIZE, "\n");
  1290. }
  1291. static ssize_t
  1292. qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
  1293. char *buf)
  1294. {
  1295. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1296. struct qla_hw_data *ha = vha->hw;
  1297. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1298. return scnprintf(buf, PAGE_SIZE, "\n");
  1299. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
  1300. ha->serdes_version[0], ha->serdes_version[1],
  1301. ha->serdes_version[2]);
  1302. }
  1303. static ssize_t
  1304. qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
  1305. char *buf)
  1306. {
  1307. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1308. struct qla_hw_data *ha = vha->hw;
  1309. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
  1310. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1311. return scnprintf(buf, PAGE_SIZE, "\n");
  1312. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
  1313. ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
  1314. ha->mpi_capabilities);
  1315. }
  1316. static ssize_t
  1317. qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
  1318. char *buf)
  1319. {
  1320. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1321. struct qla_hw_data *ha = vha->hw;
  1322. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  1323. return scnprintf(buf, PAGE_SIZE, "\n");
  1324. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
  1325. ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
  1326. }
  1327. static ssize_t
  1328. qla2x00_flash_block_size_show(struct device *dev,
  1329. struct device_attribute *attr, char *buf)
  1330. {
  1331. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1332. struct qla_hw_data *ha = vha->hw;
  1333. return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
  1334. }
  1335. static ssize_t
  1336. qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
  1337. char *buf)
  1338. {
  1339. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1340. if (!IS_CNA_CAPABLE(vha->hw))
  1341. return scnprintf(buf, PAGE_SIZE, "\n");
  1342. return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
  1343. }
  1344. static ssize_t
  1345. qla2x00_vn_port_mac_address_show(struct device *dev,
  1346. struct device_attribute *attr, char *buf)
  1347. {
  1348. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1349. if (!IS_CNA_CAPABLE(vha->hw))
  1350. return scnprintf(buf, PAGE_SIZE, "\n");
  1351. return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
  1352. }
  1353. static ssize_t
  1354. qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
  1355. char *buf)
  1356. {
  1357. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1358. return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
  1359. }
  1360. static ssize_t
  1361. qla2x00_thermal_temp_show(struct device *dev,
  1362. struct device_attribute *attr, char *buf)
  1363. {
  1364. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1365. uint16_t temp = 0;
  1366. int rc;
  1367. mutex_lock(&vha->hw->optrom_mutex);
  1368. if (qla2x00_chip_is_down(vha)) {
  1369. mutex_unlock(&vha->hw->optrom_mutex);
  1370. ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
  1371. goto done;
  1372. }
  1373. if (vha->hw->flags.eeh_busy) {
  1374. mutex_unlock(&vha->hw->optrom_mutex);
  1375. ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
  1376. goto done;
  1377. }
  1378. rc = qla2x00_get_thermal_temp(vha, &temp);
  1379. mutex_unlock(&vha->hw->optrom_mutex);
  1380. if (rc == QLA_SUCCESS)
  1381. return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
  1382. done:
  1383. return scnprintf(buf, PAGE_SIZE, "\n");
  1384. }
  1385. static ssize_t
  1386. qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
  1387. char *buf)
  1388. {
  1389. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1390. int rval = QLA_FUNCTION_FAILED;
  1391. uint16_t state[6];
  1392. uint32_t pstate;
  1393. if (IS_QLAFX00(vha->hw)) {
  1394. pstate = qlafx00_fw_state_show(dev, attr, buf);
  1395. return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
  1396. }
  1397. mutex_lock(&vha->hw->optrom_mutex);
  1398. if (qla2x00_chip_is_down(vha)) {
  1399. mutex_unlock(&vha->hw->optrom_mutex);
  1400. ql_log(ql_log_warn, vha, 0x707c,
  1401. "ISP reset active.\n");
  1402. goto out;
  1403. } else if (vha->hw->flags.eeh_busy) {
  1404. mutex_unlock(&vha->hw->optrom_mutex);
  1405. goto out;
  1406. }
  1407. rval = qla2x00_get_firmware_state(vha, state);
  1408. mutex_unlock(&vha->hw->optrom_mutex);
  1409. out:
  1410. if (rval != QLA_SUCCESS) {
  1411. memset(state, -1, sizeof(state));
  1412. rval = qla2x00_get_firmware_state(vha, state);
  1413. }
  1414. return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  1415. state[0], state[1], state[2], state[3], state[4], state[5]);
  1416. }
  1417. static ssize_t
  1418. qla2x00_diag_requests_show(struct device *dev,
  1419. struct device_attribute *attr, char *buf)
  1420. {
  1421. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1422. if (!IS_BIDI_CAPABLE(vha->hw))
  1423. return scnprintf(buf, PAGE_SIZE, "\n");
  1424. return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
  1425. }
  1426. static ssize_t
  1427. qla2x00_diag_megabytes_show(struct device *dev,
  1428. struct device_attribute *attr, char *buf)
  1429. {
  1430. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1431. if (!IS_BIDI_CAPABLE(vha->hw))
  1432. return scnprintf(buf, PAGE_SIZE, "\n");
  1433. return scnprintf(buf, PAGE_SIZE, "%llu\n",
  1434. vha->bidi_stats.transfer_bytes >> 20);
  1435. }
  1436. static ssize_t
  1437. qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
  1438. char *buf)
  1439. {
  1440. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1441. struct qla_hw_data *ha = vha->hw;
  1442. uint32_t size;
  1443. if (!ha->fw_dumped)
  1444. size = 0;
  1445. else if (IS_P3P_TYPE(ha))
  1446. size = ha->md_template_size + ha->md_dump_size;
  1447. else
  1448. size = ha->fw_dump_len;
  1449. return scnprintf(buf, PAGE_SIZE, "%d\n", size);
  1450. }
  1451. static ssize_t
  1452. qla2x00_allow_cna_fw_dump_show(struct device *dev,
  1453. struct device_attribute *attr, char *buf)
  1454. {
  1455. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1456. if (!IS_P3P_TYPE(vha->hw))
  1457. return scnprintf(buf, PAGE_SIZE, "\n");
  1458. else
  1459. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1460. vha->hw->allow_cna_fw_dump ? "true" : "false");
  1461. }
  1462. static ssize_t
  1463. qla2x00_allow_cna_fw_dump_store(struct device *dev,
  1464. struct device_attribute *attr, const char *buf, size_t count)
  1465. {
  1466. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1467. int val = 0;
  1468. if (!IS_P3P_TYPE(vha->hw))
  1469. return -EINVAL;
  1470. if (sscanf(buf, "%d", &val) != 1)
  1471. return -EINVAL;
  1472. vha->hw->allow_cna_fw_dump = val != 0;
  1473. return strlen(buf);
  1474. }
  1475. static ssize_t
  1476. qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
  1477. char *buf)
  1478. {
  1479. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1480. struct qla_hw_data *ha = vha->hw;
  1481. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1482. return scnprintf(buf, PAGE_SIZE, "\n");
  1483. return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
  1484. ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
  1485. }
  1486. static ssize_t
  1487. qla2x00_min_supported_speed_show(struct device *dev,
  1488. struct device_attribute *attr, char *buf)
  1489. {
  1490. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1491. struct qla_hw_data *ha = vha->hw;
  1492. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1493. return scnprintf(buf, PAGE_SIZE, "\n");
  1494. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1495. ha->min_supported_speed == 6 ? "64Gps" :
  1496. ha->min_supported_speed == 5 ? "32Gps" :
  1497. ha->min_supported_speed == 4 ? "16Gps" :
  1498. ha->min_supported_speed == 3 ? "8Gps" :
  1499. ha->min_supported_speed == 2 ? "4Gps" :
  1500. ha->min_supported_speed != 0 ? "unknown" : "");
  1501. }
  1502. static ssize_t
  1503. qla2x00_max_supported_speed_show(struct device *dev,
  1504. struct device_attribute *attr, char *buf)
  1505. {
  1506. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1507. struct qla_hw_data *ha = vha->hw;
  1508. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  1509. return scnprintf(buf, PAGE_SIZE, "\n");
  1510. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1511. ha->max_supported_speed == 2 ? "64Gps" :
  1512. ha->max_supported_speed == 1 ? "32Gps" :
  1513. ha->max_supported_speed == 0 ? "16Gps" : "unknown");
  1514. }
  1515. static ssize_t
  1516. qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
  1517. const char *buf, size_t count)
  1518. {
  1519. struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
  1520. ulong type, speed;
  1521. int oldspeed, rval;
  1522. int mode = QLA_SET_DATA_RATE_LR;
  1523. struct qla_hw_data *ha = vha->hw;
  1524. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
  1525. ql_log(ql_log_warn, vha, 0x70d8,
  1526. "Speed setting not supported \n");
  1527. return -EINVAL;
  1528. }
  1529. rval = kstrtol(buf, 10, &type);
  1530. if (rval)
  1531. return rval;
  1532. speed = type;
  1533. if (type == 40 || type == 80 || type == 160 ||
  1534. type == 320) {
  1535. ql_dbg(ql_dbg_user, vha, 0x70d9,
  1536. "Setting will be affected after a loss of sync\n");
  1537. type = type/10;
  1538. mode = QLA_SET_DATA_RATE_NOLR;
  1539. }
  1540. oldspeed = ha->set_data_rate;
  1541. switch (type) {
  1542. case 0:
  1543. ha->set_data_rate = PORT_SPEED_AUTO;
  1544. break;
  1545. case 4:
  1546. ha->set_data_rate = PORT_SPEED_4GB;
  1547. break;
  1548. case 8:
  1549. ha->set_data_rate = PORT_SPEED_8GB;
  1550. break;
  1551. case 16:
  1552. ha->set_data_rate = PORT_SPEED_16GB;
  1553. break;
  1554. case 32:
  1555. ha->set_data_rate = PORT_SPEED_32GB;
  1556. break;
  1557. default:
  1558. ql_log(ql_log_warn, vha, 0x1199,
  1559. "Unrecognized speed setting:%lx. Setting Autoneg\n",
  1560. speed);
  1561. ha->set_data_rate = PORT_SPEED_AUTO;
  1562. }
  1563. if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
  1564. return -EINVAL;
  1565. ql_log(ql_log_info, vha, 0x70da,
  1566. "Setting speed to %lx Gbps \n", type);
  1567. rval = qla2x00_set_data_rate(vha, mode);
  1568. if (rval != QLA_SUCCESS)
  1569. return -EIO;
  1570. return strlen(buf);
  1571. }
  1572. static const struct {
  1573. u16 rate;
  1574. char *str;
  1575. } port_speed_str[] = {
  1576. { PORT_SPEED_4GB, "4" },
  1577. { PORT_SPEED_8GB, "8" },
  1578. { PORT_SPEED_16GB, "16" },
  1579. { PORT_SPEED_32GB, "32" },
  1580. { PORT_SPEED_64GB, "64" },
  1581. { PORT_SPEED_10GB, "10" },
  1582. };
  1583. static ssize_t
  1584. qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
  1585. char *buf)
  1586. {
  1587. struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
  1588. struct qla_hw_data *ha = vha->hw;
  1589. ssize_t rval;
  1590. u16 i;
  1591. char *speed = "Unknown";
  1592. rval = qla2x00_get_data_rate(vha);
  1593. if (rval != QLA_SUCCESS) {
  1594. ql_log(ql_log_warn, vha, 0x70db,
  1595. "Unable to get port speed rval:%zd\n", rval);
  1596. return -EINVAL;
  1597. }
  1598. for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
  1599. if (port_speed_str[i].rate != ha->link_data_rate)
  1600. continue;
  1601. speed = port_speed_str[i].str;
  1602. break;
  1603. }
  1604. return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
  1605. }
  1606. static ssize_t
  1607. qla2x00_mpi_pause_store(struct device *dev,
  1608. struct device_attribute *attr, const char *buf, size_t count)
  1609. {
  1610. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1611. int rval = 0;
  1612. if (sscanf(buf, "%d", &rval) != 1)
  1613. return -EINVAL;
  1614. ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n");
  1615. rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001);
  1616. if (rval != QLA_SUCCESS) {
  1617. ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n");
  1618. count = 0;
  1619. }
  1620. return count;
  1621. }
  1622. static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store);
  1623. /* ----- */
  1624. static ssize_t
  1625. qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
  1626. {
  1627. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1628. int len = 0;
  1629. len += scnprintf(buf + len, PAGE_SIZE-len,
  1630. "Supported options: enabled | disabled | dual | exclusive\n");
  1631. /* --- */
  1632. len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
  1633. switch (vha->qlini_mode) {
  1634. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1635. len += scnprintf(buf + len, PAGE_SIZE-len,
  1636. QLA2XXX_INI_MODE_STR_EXCLUSIVE);
  1637. break;
  1638. case QLA2XXX_INI_MODE_DISABLED:
  1639. len += scnprintf(buf + len, PAGE_SIZE-len,
  1640. QLA2XXX_INI_MODE_STR_DISABLED);
  1641. break;
  1642. case QLA2XXX_INI_MODE_ENABLED:
  1643. len += scnprintf(buf + len, PAGE_SIZE-len,
  1644. QLA2XXX_INI_MODE_STR_ENABLED);
  1645. break;
  1646. case QLA2XXX_INI_MODE_DUAL:
  1647. len += scnprintf(buf + len, PAGE_SIZE-len,
  1648. QLA2XXX_INI_MODE_STR_DUAL);
  1649. break;
  1650. }
  1651. len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
  1652. return len;
  1653. }
  1654. static char *mode_to_str[] = {
  1655. "exclusive",
  1656. "disabled",
  1657. "enabled",
  1658. "dual",
  1659. };
  1660. #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
  1661. static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
  1662. {
  1663. enum {
  1664. NO_ACTION,
  1665. MODE_CHANGE_ACCEPT,
  1666. MODE_CHANGE_NO_ACTION,
  1667. TARGET_STILL_ACTIVE,
  1668. };
  1669. int action = NO_ACTION;
  1670. int set_mode = 0;
  1671. u8 eo_toggle = 0; /* exchange offload flipped */
  1672. switch (vha->qlini_mode) {
  1673. case QLA2XXX_INI_MODE_DISABLED:
  1674. switch (op) {
  1675. case QLA2XXX_INI_MODE_DISABLED:
  1676. if (qla_tgt_mode_enabled(vha)) {
  1677. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1678. vha->hw->flags.exchoffld_enabled)
  1679. eo_toggle = 1;
  1680. if (((vha->ql2xexchoffld !=
  1681. vha->u_ql2xexchoffld) &&
  1682. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1683. eo_toggle) {
  1684. /*
  1685. * The number of exchange to be offload
  1686. * was tweaked or offload option was
  1687. * flipped
  1688. */
  1689. action = MODE_CHANGE_ACCEPT;
  1690. } else {
  1691. action = MODE_CHANGE_NO_ACTION;
  1692. }
  1693. } else {
  1694. action = MODE_CHANGE_NO_ACTION;
  1695. }
  1696. break;
  1697. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1698. if (qla_tgt_mode_enabled(vha)) {
  1699. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1700. vha->hw->flags.exchoffld_enabled)
  1701. eo_toggle = 1;
  1702. if (((vha->ql2xexchoffld !=
  1703. vha->u_ql2xexchoffld) &&
  1704. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1705. eo_toggle) {
  1706. /*
  1707. * The number of exchange to be offload
  1708. * was tweaked or offload option was
  1709. * flipped
  1710. */
  1711. action = MODE_CHANGE_ACCEPT;
  1712. } else {
  1713. action = MODE_CHANGE_NO_ACTION;
  1714. }
  1715. } else {
  1716. action = MODE_CHANGE_ACCEPT;
  1717. }
  1718. break;
  1719. case QLA2XXX_INI_MODE_DUAL:
  1720. action = MODE_CHANGE_ACCEPT;
  1721. /* active_mode is target only, reset it to dual */
  1722. if (qla_tgt_mode_enabled(vha)) {
  1723. set_mode = 1;
  1724. action = MODE_CHANGE_ACCEPT;
  1725. } else {
  1726. action = MODE_CHANGE_NO_ACTION;
  1727. }
  1728. break;
  1729. case QLA2XXX_INI_MODE_ENABLED:
  1730. if (qla_tgt_mode_enabled(vha))
  1731. action = TARGET_STILL_ACTIVE;
  1732. else {
  1733. action = MODE_CHANGE_ACCEPT;
  1734. set_mode = 1;
  1735. }
  1736. break;
  1737. }
  1738. break;
  1739. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1740. switch (op) {
  1741. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1742. if (qla_tgt_mode_enabled(vha)) {
  1743. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1744. vha->hw->flags.exchoffld_enabled)
  1745. eo_toggle = 1;
  1746. if (((vha->ql2xexchoffld !=
  1747. vha->u_ql2xexchoffld) &&
  1748. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1749. eo_toggle)
  1750. /*
  1751. * The number of exchange to be offload
  1752. * was tweaked or offload option was
  1753. * flipped
  1754. */
  1755. action = MODE_CHANGE_ACCEPT;
  1756. else
  1757. action = NO_ACTION;
  1758. } else
  1759. action = NO_ACTION;
  1760. break;
  1761. case QLA2XXX_INI_MODE_DISABLED:
  1762. if (qla_tgt_mode_enabled(vha)) {
  1763. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
  1764. vha->hw->flags.exchoffld_enabled)
  1765. eo_toggle = 1;
  1766. if (((vha->ql2xexchoffld !=
  1767. vha->u_ql2xexchoffld) &&
  1768. NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
  1769. eo_toggle)
  1770. action = MODE_CHANGE_ACCEPT;
  1771. else
  1772. action = MODE_CHANGE_NO_ACTION;
  1773. } else
  1774. action = MODE_CHANGE_NO_ACTION;
  1775. break;
  1776. case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
  1777. if (qla_tgt_mode_enabled(vha)) {
  1778. action = MODE_CHANGE_ACCEPT;
  1779. set_mode = 1;
  1780. } else
  1781. action = MODE_CHANGE_ACCEPT;
  1782. break;
  1783. case QLA2XXX_INI_MODE_ENABLED:
  1784. if (qla_tgt_mode_enabled(vha))
  1785. action = TARGET_STILL_ACTIVE;
  1786. else {
  1787. if (vha->hw->flags.fw_started)
  1788. action = MODE_CHANGE_NO_ACTION;
  1789. else
  1790. action = MODE_CHANGE_ACCEPT;
  1791. }
  1792. break;
  1793. }
  1794. break;
  1795. case QLA2XXX_INI_MODE_ENABLED:
  1796. switch (op) {
  1797. case QLA2XXX_INI_MODE_ENABLED:
  1798. if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
  1799. vha->hw->flags.exchoffld_enabled)
  1800. eo_toggle = 1;
  1801. if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
  1802. NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
  1803. eo_toggle)
  1804. action = MODE_CHANGE_ACCEPT;
  1805. else
  1806. action = NO_ACTION;
  1807. break;
  1808. case QLA2XXX_INI_MODE_DUAL:
  1809. case QLA2XXX_INI_MODE_DISABLED:
  1810. action = MODE_CHANGE_ACCEPT;
  1811. break;
  1812. default:
  1813. action = MODE_CHANGE_NO_ACTION;
  1814. break;
  1815. }
  1816. break;
  1817. case QLA2XXX_INI_MODE_DUAL:
  1818. switch (op) {
  1819. case QLA2XXX_INI_MODE_DUAL:
  1820. if (qla_tgt_mode_enabled(vha) ||
  1821. qla_dual_mode_enabled(vha)) {
  1822. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
  1823. vha->u_ql2xiniexchg) !=
  1824. vha->hw->flags.exchoffld_enabled)
  1825. eo_toggle = 1;
  1826. if ((((vha->ql2xexchoffld +
  1827. vha->ql2xiniexchg) !=
  1828. (vha->u_ql2xiniexchg +
  1829. vha->u_ql2xexchoffld)) &&
  1830. NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
  1831. vha->u_ql2xexchoffld)) || eo_toggle)
  1832. action = MODE_CHANGE_ACCEPT;
  1833. else
  1834. action = NO_ACTION;
  1835. } else {
  1836. if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
  1837. vha->u_ql2xiniexchg) !=
  1838. vha->hw->flags.exchoffld_enabled)
  1839. eo_toggle = 1;
  1840. if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
  1841. != (vha->u_ql2xiniexchg +
  1842. vha->u_ql2xexchoffld)) &&
  1843. NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
  1844. vha->u_ql2xexchoffld)) || eo_toggle)
  1845. action = MODE_CHANGE_NO_ACTION;
  1846. else
  1847. action = NO_ACTION;
  1848. }
  1849. break;
  1850. case QLA2XXX_INI_MODE_DISABLED:
  1851. if (qla_tgt_mode_enabled(vha) ||
  1852. qla_dual_mode_enabled(vha)) {
  1853. /* turning off initiator mode */
  1854. set_mode = 1;
  1855. action = MODE_CHANGE_ACCEPT;
  1856. } else {
  1857. action = MODE_CHANGE_NO_ACTION;
  1858. }
  1859. break;
  1860. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1861. if (qla_tgt_mode_enabled(vha) ||
  1862. qla_dual_mode_enabled(vha)) {
  1863. set_mode = 1;
  1864. action = MODE_CHANGE_ACCEPT;
  1865. } else {
  1866. action = MODE_CHANGE_ACCEPT;
  1867. }
  1868. break;
  1869. case QLA2XXX_INI_MODE_ENABLED:
  1870. if (qla_tgt_mode_enabled(vha) ||
  1871. qla_dual_mode_enabled(vha)) {
  1872. action = TARGET_STILL_ACTIVE;
  1873. } else {
  1874. action = MODE_CHANGE_ACCEPT;
  1875. }
  1876. }
  1877. break;
  1878. }
  1879. switch (action) {
  1880. case MODE_CHANGE_ACCEPT:
  1881. ql_log(ql_log_warn, vha, 0xffff,
  1882. "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
  1883. mode_to_str[vha->qlini_mode], mode_to_str[op],
  1884. vha->ql2xexchoffld, vha->u_ql2xexchoffld,
  1885. vha->ql2xiniexchg, vha->u_ql2xiniexchg);
  1886. vha->qlini_mode = op;
  1887. vha->ql2xexchoffld = vha->u_ql2xexchoffld;
  1888. vha->ql2xiniexchg = vha->u_ql2xiniexchg;
  1889. if (set_mode)
  1890. qlt_set_mode(vha);
  1891. vha->flags.online = 1;
  1892. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1893. break;
  1894. case MODE_CHANGE_NO_ACTION:
  1895. ql_log(ql_log_warn, vha, 0xffff,
  1896. "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
  1897. mode_to_str[vha->qlini_mode], mode_to_str[op],
  1898. vha->ql2xexchoffld, vha->u_ql2xexchoffld,
  1899. vha->ql2xiniexchg, vha->u_ql2xiniexchg);
  1900. vha->qlini_mode = op;
  1901. vha->ql2xexchoffld = vha->u_ql2xexchoffld;
  1902. vha->ql2xiniexchg = vha->u_ql2xiniexchg;
  1903. break;
  1904. case TARGET_STILL_ACTIVE:
  1905. ql_log(ql_log_warn, vha, 0xffff,
  1906. "Target Mode is active. Unable to change Mode.\n");
  1907. break;
  1908. case NO_ACTION:
  1909. default:
  1910. ql_log(ql_log_warn, vha, 0xffff,
  1911. "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
  1912. vha->qlini_mode, op,
  1913. vha->ql2xexchoffld, vha->u_ql2xexchoffld);
  1914. break;
  1915. }
  1916. }
  1917. static ssize_t
  1918. qlini_mode_store(struct device *dev, struct device_attribute *attr,
  1919. const char *buf, size_t count)
  1920. {
  1921. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1922. int ini;
  1923. if (!buf)
  1924. return -EINVAL;
  1925. if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
  1926. strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
  1927. ini = QLA2XXX_INI_MODE_EXCLUSIVE;
  1928. else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
  1929. strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
  1930. ini = QLA2XXX_INI_MODE_DISABLED;
  1931. else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
  1932. strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
  1933. ini = QLA2XXX_INI_MODE_ENABLED;
  1934. else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
  1935. strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
  1936. ini = QLA2XXX_INI_MODE_DUAL;
  1937. else
  1938. return -EINVAL;
  1939. qla_set_ini_mode(vha, ini);
  1940. return strlen(buf);
  1941. }
  1942. static ssize_t
  1943. ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
  1944. char *buf)
  1945. {
  1946. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1947. int len = 0;
  1948. len += scnprintf(buf + len, PAGE_SIZE-len,
  1949. "target exchange: new %d : current: %d\n\n",
  1950. vha->u_ql2xexchoffld, vha->ql2xexchoffld);
  1951. len += scnprintf(buf + len, PAGE_SIZE-len,
  1952. "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
  1953. vha->host_no);
  1954. return len;
  1955. }
  1956. static ssize_t
  1957. ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
  1958. const char *buf, size_t count)
  1959. {
  1960. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1961. int val = 0;
  1962. if (sscanf(buf, "%d", &val) != 1)
  1963. return -EINVAL;
  1964. if (val > FW_MAX_EXCHANGES_CNT)
  1965. val = FW_MAX_EXCHANGES_CNT;
  1966. else if (val < 0)
  1967. val = 0;
  1968. vha->u_ql2xexchoffld = val;
  1969. return strlen(buf);
  1970. }
  1971. static ssize_t
  1972. ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
  1973. char *buf)
  1974. {
  1975. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1976. int len = 0;
  1977. len += scnprintf(buf + len, PAGE_SIZE-len,
  1978. "target exchange: new %d : current: %d\n\n",
  1979. vha->u_ql2xiniexchg, vha->ql2xiniexchg);
  1980. len += scnprintf(buf + len, PAGE_SIZE-len,
  1981. "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
  1982. vha->host_no);
  1983. return len;
  1984. }
  1985. static ssize_t
  1986. ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
  1987. const char *buf, size_t count)
  1988. {
  1989. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1990. int val = 0;
  1991. if (sscanf(buf, "%d", &val) != 1)
  1992. return -EINVAL;
  1993. if (val > FW_MAX_EXCHANGES_CNT)
  1994. val = FW_MAX_EXCHANGES_CNT;
  1995. else if (val < 0)
  1996. val = 0;
  1997. vha->u_ql2xiniexchg = val;
  1998. return strlen(buf);
  1999. }
  2000. static ssize_t
  2001. qla2x00_dif_bundle_statistics_show(struct device *dev,
  2002. struct device_attribute *attr, char *buf)
  2003. {
  2004. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2005. struct qla_hw_data *ha = vha->hw;
  2006. return scnprintf(buf, PAGE_SIZE,
  2007. "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
  2008. ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
  2009. ha->dif_bundle_writes, ha->dif_bundle_kallocs,
  2010. ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
  2011. }
  2012. static ssize_t
  2013. qla2x00_fw_attr_show(struct device *dev,
  2014. struct device_attribute *attr, char *buf)
  2015. {
  2016. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2017. struct qla_hw_data *ha = vha->hw;
  2018. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  2019. return scnprintf(buf, PAGE_SIZE, "\n");
  2020. return scnprintf(buf, PAGE_SIZE, "%llx\n",
  2021. (uint64_t)ha->fw_attributes_ext[1] << 48 |
  2022. (uint64_t)ha->fw_attributes_ext[0] << 32 |
  2023. (uint64_t)ha->fw_attributes_h << 16 |
  2024. (uint64_t)ha->fw_attributes);
  2025. }
  2026. static ssize_t
  2027. qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
  2028. char *buf)
  2029. {
  2030. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2031. return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
  2032. }
  2033. static ssize_t
  2034. qla2x00_dport_diagnostics_show(struct device *dev,
  2035. struct device_attribute *attr, char *buf)
  2036. {
  2037. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  2038. if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
  2039. !IS_QLA28XX(vha->hw))
  2040. return scnprintf(buf, PAGE_SIZE, "\n");
  2041. if (!*vha->dport_data)
  2042. return scnprintf(buf, PAGE_SIZE, "\n");
  2043. return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
  2044. vha->dport_data[0], vha->dport_data[1],
  2045. vha->dport_data[2], vha->dport_data[3]);
  2046. }
  2047. static DEVICE_ATTR(dport_diagnostics, 0444,
  2048. qla2x00_dport_diagnostics_show, NULL);
  2049. static DEVICE_STRING_ATTR_RO(driver_version, S_IRUGO, qla2x00_version_str);
  2050. static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
  2051. static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
  2052. static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
  2053. static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
  2054. static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
  2055. static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
  2056. static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
  2057. static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
  2058. static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
  2059. static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
  2060. qla2x00_zio_timer_store);
  2061. static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
  2062. qla2x00_beacon_store);
  2063. static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
  2064. qla2x00_beacon_config_store);
  2065. static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
  2066. qla2x00_optrom_bios_version_show, NULL);
  2067. static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
  2068. qla2x00_optrom_efi_version_show, NULL);
  2069. static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
  2070. qla2x00_optrom_fcode_version_show, NULL);
  2071. static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
  2072. NULL);
  2073. static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
  2074. qla2x00_optrom_gold_fw_version_show, NULL);
  2075. static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
  2076. NULL);
  2077. static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
  2078. NULL);
  2079. static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
  2080. static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
  2081. static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
  2082. static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
  2083. NULL);
  2084. static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
  2085. static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
  2086. qla2x00_vn_port_mac_address_show, NULL);
  2087. static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
  2088. static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
  2089. static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
  2090. static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
  2091. static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
  2092. static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
  2093. static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
  2094. qla2x00_allow_cna_fw_dump_show,
  2095. qla2x00_allow_cna_fw_dump_store);
  2096. static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
  2097. static DEVICE_ATTR(min_supported_speed, 0444,
  2098. qla2x00_min_supported_speed_show, NULL);
  2099. static DEVICE_ATTR(max_supported_speed, 0444,
  2100. qla2x00_max_supported_speed_show, NULL);
  2101. static DEVICE_ATTR(zio_threshold, 0644,
  2102. qla_zio_threshold_show,
  2103. qla_zio_threshold_store);
  2104. static DEVICE_ATTR_RW(qlini_mode);
  2105. static DEVICE_ATTR_RW(ql2xexchoffld);
  2106. static DEVICE_ATTR_RW(ql2xiniexchg);
  2107. static DEVICE_ATTR(dif_bundle_statistics, 0444,
  2108. qla2x00_dif_bundle_statistics_show, NULL);
  2109. static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
  2110. qla2x00_port_speed_store);
  2111. static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
  2112. static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
  2113. static struct attribute *qla2x00_host_attrs[] = {
  2114. &dev_attr_driver_version.attr.attr,
  2115. &dev_attr_fw_version.attr,
  2116. &dev_attr_serial_num.attr,
  2117. &dev_attr_isp_name.attr,
  2118. &dev_attr_isp_id.attr,
  2119. &dev_attr_model_name.attr,
  2120. &dev_attr_model_desc.attr,
  2121. &dev_attr_pci_info.attr,
  2122. &dev_attr_link_state.attr,
  2123. &dev_attr_zio.attr,
  2124. &dev_attr_zio_timer.attr,
  2125. &dev_attr_beacon.attr,
  2126. &dev_attr_beacon_config.attr,
  2127. &dev_attr_optrom_bios_version.attr,
  2128. &dev_attr_optrom_efi_version.attr,
  2129. &dev_attr_optrom_fcode_version.attr,
  2130. &dev_attr_optrom_fw_version.attr,
  2131. &dev_attr_84xx_fw_version.attr,
  2132. &dev_attr_total_isp_aborts.attr,
  2133. &dev_attr_serdes_version.attr,
  2134. &dev_attr_mpi_version.attr,
  2135. &dev_attr_phy_version.attr,
  2136. &dev_attr_flash_block_size.attr,
  2137. &dev_attr_vlan_id.attr,
  2138. &dev_attr_vn_port_mac_address.attr,
  2139. &dev_attr_fabric_param.attr,
  2140. &dev_attr_fw_state.attr,
  2141. &dev_attr_optrom_gold_fw_version.attr,
  2142. &dev_attr_thermal_temp.attr,
  2143. &dev_attr_diag_requests.attr,
  2144. &dev_attr_diag_megabytes.attr,
  2145. &dev_attr_fw_dump_size.attr,
  2146. &dev_attr_allow_cna_fw_dump.attr,
  2147. &dev_attr_pep_version.attr,
  2148. &dev_attr_min_supported_speed.attr,
  2149. &dev_attr_max_supported_speed.attr,
  2150. &dev_attr_zio_threshold.attr,
  2151. &dev_attr_dif_bundle_statistics.attr,
  2152. &dev_attr_port_speed.attr,
  2153. &dev_attr_port_no.attr,
  2154. &dev_attr_fw_attr.attr,
  2155. &dev_attr_dport_diagnostics.attr,
  2156. &dev_attr_mpi_pause.attr,
  2157. &dev_attr_qlini_mode.attr,
  2158. &dev_attr_ql2xiniexchg.attr,
  2159. &dev_attr_ql2xexchoffld.attr,
  2160. NULL,
  2161. };
  2162. static umode_t qla_host_attr_is_visible(struct kobject *kobj,
  2163. struct attribute *attr, int i)
  2164. {
  2165. if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL &&
  2166. (attr == &dev_attr_qlini_mode.attr ||
  2167. attr == &dev_attr_ql2xiniexchg.attr ||
  2168. attr == &dev_attr_ql2xexchoffld.attr))
  2169. return 0;
  2170. return attr->mode;
  2171. }
  2172. static const struct attribute_group qla2x00_host_attr_group = {
  2173. .is_visible = qla_host_attr_is_visible,
  2174. .attrs = qla2x00_host_attrs
  2175. };
  2176. const struct attribute_group *qla2x00_host_groups[] = {
  2177. &qla2x00_host_attr_group,
  2178. NULL
  2179. };
  2180. /* Host attributes. */
  2181. static void
  2182. qla2x00_get_host_port_id(struct Scsi_Host *shost)
  2183. {
  2184. scsi_qla_host_t *vha = shost_priv(shost);
  2185. fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
  2186. vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
  2187. }
  2188. static void
  2189. qla2x00_get_host_speed(struct Scsi_Host *shost)
  2190. {
  2191. scsi_qla_host_t *vha = shost_priv(shost);
  2192. u32 speed;
  2193. if (IS_QLAFX00(vha->hw)) {
  2194. qlafx00_get_host_speed(shost);
  2195. return;
  2196. }
  2197. switch (vha->hw->link_data_rate) {
  2198. case PORT_SPEED_1GB:
  2199. speed = FC_PORTSPEED_1GBIT;
  2200. break;
  2201. case PORT_SPEED_2GB:
  2202. speed = FC_PORTSPEED_2GBIT;
  2203. break;
  2204. case PORT_SPEED_4GB:
  2205. speed = FC_PORTSPEED_4GBIT;
  2206. break;
  2207. case PORT_SPEED_8GB:
  2208. speed = FC_PORTSPEED_8GBIT;
  2209. break;
  2210. case PORT_SPEED_10GB:
  2211. speed = FC_PORTSPEED_10GBIT;
  2212. break;
  2213. case PORT_SPEED_16GB:
  2214. speed = FC_PORTSPEED_16GBIT;
  2215. break;
  2216. case PORT_SPEED_32GB:
  2217. speed = FC_PORTSPEED_32GBIT;
  2218. break;
  2219. case PORT_SPEED_64GB:
  2220. speed = FC_PORTSPEED_64GBIT;
  2221. break;
  2222. default:
  2223. speed = FC_PORTSPEED_UNKNOWN;
  2224. break;
  2225. }
  2226. fc_host_speed(shost) = speed;
  2227. }
  2228. static void
  2229. qla2x00_get_host_port_type(struct Scsi_Host *shost)
  2230. {
  2231. scsi_qla_host_t *vha = shost_priv(shost);
  2232. uint32_t port_type;
  2233. if (vha->vp_idx) {
  2234. fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
  2235. return;
  2236. }
  2237. switch (vha->hw->current_topology) {
  2238. case ISP_CFG_NL:
  2239. port_type = FC_PORTTYPE_LPORT;
  2240. break;
  2241. case ISP_CFG_FL:
  2242. port_type = FC_PORTTYPE_NLPORT;
  2243. break;
  2244. case ISP_CFG_N:
  2245. port_type = FC_PORTTYPE_PTP;
  2246. break;
  2247. case ISP_CFG_F:
  2248. port_type = FC_PORTTYPE_NPORT;
  2249. break;
  2250. default:
  2251. port_type = FC_PORTTYPE_UNKNOWN;
  2252. break;
  2253. }
  2254. fc_host_port_type(shost) = port_type;
  2255. }
  2256. static void
  2257. qla2x00_get_starget_node_name(struct scsi_target *starget)
  2258. {
  2259. struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
  2260. scsi_qla_host_t *vha = shost_priv(host);
  2261. fc_port_t *fcport;
  2262. u64 node_name = 0;
  2263. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2264. if (fcport->rport &&
  2265. starget->id == fcport->rport->scsi_target_id) {
  2266. node_name = wwn_to_u64(fcport->node_name);
  2267. break;
  2268. }
  2269. }
  2270. fc_starget_node_name(starget) = node_name;
  2271. }
  2272. static void
  2273. qla2x00_get_starget_port_name(struct scsi_target *starget)
  2274. {
  2275. struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
  2276. scsi_qla_host_t *vha = shost_priv(host);
  2277. fc_port_t *fcport;
  2278. u64 port_name = 0;
  2279. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2280. if (fcport->rport &&
  2281. starget->id == fcport->rport->scsi_target_id) {
  2282. port_name = wwn_to_u64(fcport->port_name);
  2283. break;
  2284. }
  2285. }
  2286. fc_starget_port_name(starget) = port_name;
  2287. }
  2288. static void
  2289. qla2x00_get_starget_port_id(struct scsi_target *starget)
  2290. {
  2291. struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
  2292. scsi_qla_host_t *vha = shost_priv(host);
  2293. fc_port_t *fcport;
  2294. uint32_t port_id = ~0U;
  2295. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  2296. if (fcport->rport &&
  2297. starget->id == fcport->rport->scsi_target_id) {
  2298. port_id = fcport->d_id.b.domain << 16 |
  2299. fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
  2300. break;
  2301. }
  2302. }
  2303. fc_starget_port_id(starget) = port_id;
  2304. }
  2305. static inline void
  2306. qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
  2307. {
  2308. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  2309. rport->dev_loss_tmo = timeout ? timeout : 1;
  2310. if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
  2311. nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
  2312. rport->dev_loss_tmo);
  2313. }
  2314. static void
  2315. qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
  2316. {
  2317. struct Scsi_Host *host = rport_to_shost(rport);
  2318. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  2319. unsigned long flags;
  2320. if (!fcport)
  2321. return;
  2322. ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
  2323. DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
  2324. rport->port_state));
  2325. /*
  2326. * Now that the rport has been deleted, set the fcport state to
  2327. * FCS_DEVICE_DEAD, if the fcport is still lost.
  2328. */
  2329. if (fcport->scan_state != QLA_FCPORT_FOUND)
  2330. qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
  2331. /*
  2332. * Transport has effectively 'deleted' the rport, clear
  2333. * all local references.
  2334. */
  2335. spin_lock_irqsave(host->host_lock, flags);
  2336. /* Confirm port has not reappeared before clearing pointers. */
  2337. if (rport->port_state != FC_PORTSTATE_ONLINE) {
  2338. fcport->rport = NULL;
  2339. *((fc_port_t **)rport->dd_data) = NULL;
  2340. }
  2341. spin_unlock_irqrestore(host->host_lock, flags);
  2342. if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
  2343. return;
  2344. if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
  2345. /* Will wait for wind down of adapter */
  2346. ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
  2347. "%s pci offline detected (id %06x)\n", __func__,
  2348. fcport->d_id.b24);
  2349. qla_pci_set_eeh_busy(fcport->vha);
  2350. qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
  2351. 0, WAIT_TARGET);
  2352. return;
  2353. }
  2354. }
  2355. static void
  2356. qla2x00_terminate_rport_io(struct fc_rport *rport)
  2357. {
  2358. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  2359. scsi_qla_host_t *vha;
  2360. if (!fcport)
  2361. return;
  2362. if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
  2363. return;
  2364. if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
  2365. return;
  2366. vha = fcport->vha;
  2367. if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
  2368. /* Will wait for wind down of adapter */
  2369. ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
  2370. "%s pci offline detected (id %06x)\n", __func__,
  2371. fcport->d_id.b24);
  2372. qla_pci_set_eeh_busy(vha);
  2373. qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
  2374. 0, WAIT_TARGET);
  2375. return;
  2376. }
  2377. /*
  2378. * At this point all fcport's software-states are cleared. Perform any
  2379. * final cleanup of firmware resources (PCBs and XCBs).
  2380. *
  2381. * Attempt to cleanup only lost devices.
  2382. */
  2383. if (fcport->loop_id != FC_NO_LOOP_ID) {
  2384. if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
  2385. fcport->scan_state != QLA_FCPORT_FOUND) {
  2386. if (fcport->loop_id != FC_NO_LOOP_ID)
  2387. fcport->logout_on_delete = 1;
  2388. if (!EDIF_NEGOTIATION_PENDING(fcport)) {
  2389. ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
  2390. "%s %d schedule session deletion\n", __func__,
  2391. __LINE__);
  2392. qlt_schedule_sess_for_deletion(fcport);
  2393. }
  2394. } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
  2395. qla2x00_port_logout(fcport->vha, fcport);
  2396. }
  2397. }
  2398. /* check for any straggling io left behind */
  2399. if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
  2400. ql_log(ql_log_warn, vha, 0x300b,
  2401. "IO not return. Resetting. \n");
  2402. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2403. qla2xxx_wake_dpc(vha);
  2404. qla2x00_wait_for_chip_reset(vha);
  2405. }
  2406. }
  2407. static int
  2408. qla2x00_issue_lip(struct Scsi_Host *shost)
  2409. {
  2410. scsi_qla_host_t *vha = shost_priv(shost);
  2411. if (IS_QLAFX00(vha->hw))
  2412. return 0;
  2413. if (vha->hw->flags.port_isolated)
  2414. return 0;
  2415. qla2x00_loop_reset(vha);
  2416. return 0;
  2417. }
  2418. static struct fc_host_statistics *
  2419. qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
  2420. {
  2421. scsi_qla_host_t *vha = shost_priv(shost);
  2422. struct qla_hw_data *ha = vha->hw;
  2423. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2424. int rval;
  2425. struct link_statistics *stats;
  2426. dma_addr_t stats_dma;
  2427. struct fc_host_statistics *p = &vha->fc_host_stat;
  2428. struct qla_qpair *qpair;
  2429. int i;
  2430. u64 ib = 0, ob = 0, ir = 0, or = 0;
  2431. memset(p, -1, sizeof(*p));
  2432. if (IS_QLAFX00(vha->hw))
  2433. goto done;
  2434. if (test_bit(UNLOADING, &vha->dpc_flags))
  2435. goto done;
  2436. if (unlikely(pci_channel_offline(ha->pdev)))
  2437. goto done;
  2438. if (qla2x00_chip_is_down(vha))
  2439. goto done;
  2440. stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
  2441. GFP_KERNEL);
  2442. if (!stats) {
  2443. ql_log(ql_log_warn, vha, 0x707d,
  2444. "Failed to allocate memory for stats.\n");
  2445. goto done;
  2446. }
  2447. rval = QLA_FUNCTION_FAILED;
  2448. if (IS_FWI2_CAPABLE(ha)) {
  2449. rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
  2450. } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
  2451. !ha->dpc_active) {
  2452. /* Must be in a 'READY' state for statistics retrieval. */
  2453. rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
  2454. stats, stats_dma);
  2455. }
  2456. if (rval != QLA_SUCCESS)
  2457. goto done_free;
  2458. /* --- */
  2459. for (i = 0; i < vha->hw->max_qpairs; i++) {
  2460. qpair = vha->hw->queue_pair_map[i];
  2461. if (!qpair)
  2462. continue;
  2463. ir += qpair->counters.input_requests;
  2464. or += qpair->counters.output_requests;
  2465. ib += qpair->counters.input_bytes;
  2466. ob += qpair->counters.output_bytes;
  2467. }
  2468. ir += ha->base_qpair->counters.input_requests;
  2469. or += ha->base_qpair->counters.output_requests;
  2470. ib += ha->base_qpair->counters.input_bytes;
  2471. ob += ha->base_qpair->counters.output_bytes;
  2472. ir += vha->qla_stats.input_requests;
  2473. or += vha->qla_stats.output_requests;
  2474. ib += vha->qla_stats.input_bytes;
  2475. ob += vha->qla_stats.output_bytes;
  2476. /* --- */
  2477. p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
  2478. p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
  2479. p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
  2480. p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
  2481. p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
  2482. p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
  2483. if (IS_FWI2_CAPABLE(ha)) {
  2484. p->lip_count = le32_to_cpu(stats->lip_cnt);
  2485. p->tx_frames = le32_to_cpu(stats->tx_frames);
  2486. p->rx_frames = le32_to_cpu(stats->rx_frames);
  2487. p->dumped_frames = le32_to_cpu(stats->discarded_frames);
  2488. p->nos_count = le32_to_cpu(stats->nos_rcvd);
  2489. p->error_frames =
  2490. le32_to_cpu(stats->dropped_frames) +
  2491. le32_to_cpu(stats->discarded_frames);
  2492. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  2493. p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
  2494. p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
  2495. } else {
  2496. p->rx_words = ib >> 2;
  2497. p->tx_words = ob >> 2;
  2498. }
  2499. }
  2500. p->fcp_control_requests = vha->qla_stats.control_requests;
  2501. p->fcp_input_requests = ir;
  2502. p->fcp_output_requests = or;
  2503. p->fcp_input_megabytes = ib >> 20;
  2504. p->fcp_output_megabytes = ob >> 20;
  2505. p->seconds_since_last_reset =
  2506. get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
  2507. do_div(p->seconds_since_last_reset, HZ);
  2508. done_free:
  2509. dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
  2510. stats, stats_dma);
  2511. done:
  2512. return p;
  2513. }
  2514. static void
  2515. qla2x00_reset_host_stats(struct Scsi_Host *shost)
  2516. {
  2517. scsi_qla_host_t *vha = shost_priv(shost);
  2518. struct qla_hw_data *ha = vha->hw;
  2519. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2520. struct link_statistics *stats;
  2521. dma_addr_t stats_dma;
  2522. int i;
  2523. struct qla_qpair *qpair;
  2524. memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
  2525. memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
  2526. for (i = 0; i < vha->hw->max_qpairs; i++) {
  2527. qpair = vha->hw->queue_pair_map[i];
  2528. if (!qpair)
  2529. continue;
  2530. memset(&qpair->counters, 0, sizeof(qpair->counters));
  2531. }
  2532. memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
  2533. vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
  2534. if (IS_FWI2_CAPABLE(ha)) {
  2535. int rval;
  2536. stats = dma_alloc_coherent(&ha->pdev->dev,
  2537. sizeof(*stats), &stats_dma, GFP_KERNEL);
  2538. if (!stats) {
  2539. ql_log(ql_log_warn, vha, 0x70d7,
  2540. "Failed to allocate memory for stats.\n");
  2541. return;
  2542. }
  2543. /* reset firmware statistics */
  2544. rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
  2545. if (rval != QLA_SUCCESS)
  2546. ql_log(ql_log_warn, vha, 0x70de,
  2547. "Resetting ISP statistics failed: rval = %d\n",
  2548. rval);
  2549. dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
  2550. stats, stats_dma);
  2551. }
  2552. }
  2553. static void
  2554. qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
  2555. {
  2556. scsi_qla_host_t *vha = shost_priv(shost);
  2557. qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
  2558. sizeof(fc_host_symbolic_name(shost)));
  2559. }
  2560. static void
  2561. qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
  2562. {
  2563. scsi_qla_host_t *vha = shost_priv(shost);
  2564. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  2565. }
  2566. static void
  2567. qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
  2568. {
  2569. scsi_qla_host_t *vha = shost_priv(shost);
  2570. static const uint8_t node_name[WWN_SIZE] = {
  2571. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
  2572. };
  2573. u64 fabric_name = wwn_to_u64(node_name);
  2574. if (vha->device_flags & SWITCH_FOUND)
  2575. fabric_name = wwn_to_u64(vha->fabric_node_name);
  2576. fc_host_fabric_name(shost) = fabric_name;
  2577. }
  2578. static void
  2579. qla2x00_get_host_port_state(struct Scsi_Host *shost)
  2580. {
  2581. scsi_qla_host_t *vha = shost_priv(shost);
  2582. struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
  2583. if (!base_vha->flags.online) {
  2584. fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
  2585. return;
  2586. }
  2587. switch (atomic_read(&base_vha->loop_state)) {
  2588. case LOOP_UPDATE:
  2589. fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
  2590. break;
  2591. case LOOP_DOWN:
  2592. if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
  2593. fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
  2594. else
  2595. fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
  2596. break;
  2597. case LOOP_DEAD:
  2598. fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
  2599. break;
  2600. case LOOP_READY:
  2601. fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
  2602. break;
  2603. default:
  2604. fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
  2605. break;
  2606. }
  2607. }
  2608. static int
  2609. qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
  2610. {
  2611. int ret = 0;
  2612. uint8_t qos = 0;
  2613. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  2614. scsi_qla_host_t *vha = NULL;
  2615. struct qla_hw_data *ha = base_vha->hw;
  2616. int cnt;
  2617. struct req_que *req = ha->req_q_map[0];
  2618. struct qla_qpair *qpair;
  2619. ret = qla24xx_vport_create_req_sanity_check(fc_vport);
  2620. if (ret) {
  2621. ql_log(ql_log_warn, vha, 0x707e,
  2622. "Vport sanity check failed, status %x\n", ret);
  2623. return (ret);
  2624. }
  2625. vha = qla24xx_create_vhost(fc_vport);
  2626. if (vha == NULL) {
  2627. ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
  2628. return FC_VPORT_FAILED;
  2629. }
  2630. if (disable) {
  2631. atomic_set(&vha->vp_state, VP_OFFLINE);
  2632. fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
  2633. } else
  2634. atomic_set(&vha->vp_state, VP_FAILED);
  2635. /* ready to create vport */
  2636. ql_log(ql_log_info, vha, 0x7080,
  2637. "VP entry id %d assigned.\n", vha->vp_idx);
  2638. /* initialized vport states */
  2639. atomic_set(&vha->loop_state, LOOP_DOWN);
  2640. vha->vp_err_state = VP_ERR_PORTDWN;
  2641. vha->vp_prev_err_state = VP_ERR_UNKWN;
  2642. /* Check if physical ha port is Up */
  2643. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  2644. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  2645. /* Don't retry or attempt login of this virtual port */
  2646. ql_dbg(ql_dbg_user, vha, 0x7081,
  2647. "Vport loop state is not UP.\n");
  2648. atomic_set(&vha->loop_state, LOOP_DEAD);
  2649. if (!disable)
  2650. fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
  2651. }
  2652. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
  2653. if (ha->fw_attributes & BIT_4) {
  2654. int prot = 0, guard;
  2655. vha->flags.difdix_supported = 1;
  2656. ql_dbg(ql_dbg_user, vha, 0x7082,
  2657. "Registered for DIF/DIX type 1 and 3 protection.\n");
  2658. scsi_host_set_prot(vha->host,
  2659. prot | SHOST_DIF_TYPE1_PROTECTION
  2660. | SHOST_DIF_TYPE2_PROTECTION
  2661. | SHOST_DIF_TYPE3_PROTECTION
  2662. | SHOST_DIX_TYPE1_PROTECTION
  2663. | SHOST_DIX_TYPE2_PROTECTION
  2664. | SHOST_DIX_TYPE3_PROTECTION);
  2665. guard = SHOST_DIX_GUARD_CRC;
  2666. if (IS_PI_IPGUARD_CAPABLE(ha) &&
  2667. (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
  2668. guard |= SHOST_DIX_GUARD_IP;
  2669. scsi_host_set_guard(vha->host, guard);
  2670. } else
  2671. vha->flags.difdix_supported = 0;
  2672. }
  2673. if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
  2674. &ha->pdev->dev)) {
  2675. ql_dbg(ql_dbg_user, vha, 0x7083,
  2676. "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
  2677. goto vport_create_failed_2;
  2678. }
  2679. /* initialize attributes */
  2680. fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
  2681. fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
  2682. fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
  2683. fc_host_supported_classes(vha->host) =
  2684. fc_host_supported_classes(base_vha->host);
  2685. fc_host_supported_speeds(vha->host) =
  2686. fc_host_supported_speeds(base_vha->host);
  2687. qlt_vport_create(vha, ha);
  2688. qla24xx_vport_disable(fc_vport, disable);
  2689. if (!ql2xmqsupport || !ha->npiv_info)
  2690. goto vport_queue;
  2691. /* Create a request queue in QoS mode for the vport */
  2692. for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
  2693. if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
  2694. && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
  2695. 8) == 0) {
  2696. qos = ha->npiv_info[cnt].q_qos;
  2697. break;
  2698. }
  2699. }
  2700. if (qos) {
  2701. qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
  2702. if (!qpair)
  2703. ql_log(ql_log_warn, vha, 0x7084,
  2704. "Can't create qpair for VP[%d]\n",
  2705. vha->vp_idx);
  2706. else {
  2707. ql_dbg(ql_dbg_multiq, vha, 0xc001,
  2708. "Queue pair: %d Qos: %d) created for VP[%d]\n",
  2709. qpair->id, qos, vha->vp_idx);
  2710. ql_dbg(ql_dbg_user, vha, 0x7085,
  2711. "Queue Pair: %d Qos: %d) created for VP[%d]\n",
  2712. qpair->id, qos, vha->vp_idx);
  2713. req = qpair->req;
  2714. vha->qpair = qpair;
  2715. }
  2716. }
  2717. vport_queue:
  2718. vha->req = req;
  2719. return 0;
  2720. vport_create_failed_2:
  2721. qla24xx_disable_vp(vha);
  2722. qla24xx_deallocate_vp_id(vha);
  2723. scsi_host_put(vha->host);
  2724. return FC_VPORT_FAILED;
  2725. }
  2726. static int
  2727. qla24xx_vport_delete(struct fc_vport *fc_vport)
  2728. {
  2729. scsi_qla_host_t *vha = fc_vport->dd_data;
  2730. struct qla_hw_data *ha = vha->hw;
  2731. uint16_t id = vha->vp_idx;
  2732. set_bit(VPORT_DELETE, &vha->dpc_flags);
  2733. while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
  2734. msleep(1000);
  2735. qla24xx_disable_vp(vha);
  2736. qla2x00_wait_for_sess_deletion(vha);
  2737. qla_nvme_delete(vha);
  2738. qla_enode_stop(vha);
  2739. qla_edb_stop(vha);
  2740. vha->flags.delete_progress = 1;
  2741. qlt_remove_target(ha, vha);
  2742. fc_remove_host(vha->host);
  2743. scsi_remove_host(vha->host);
  2744. /* Allow timer to run to drain queued items, when removing vp */
  2745. qla24xx_deallocate_vp_id(vha);
  2746. if (vha->timer_active) {
  2747. qla2x00_vp_stop_timer(vha);
  2748. ql_dbg(ql_dbg_user, vha, 0x7086,
  2749. "Timer for the VP[%d] has stopped\n", vha->vp_idx);
  2750. }
  2751. qla2x00_free_fcports(vha);
  2752. mutex_lock(&ha->vport_lock);
  2753. ha->cur_vport_count--;
  2754. clear_bit(vha->vp_idx, ha->vp_idx_map);
  2755. mutex_unlock(&ha->vport_lock);
  2756. dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
  2757. vha->gnl.ldma);
  2758. vha->gnl.l = NULL;
  2759. vfree(vha->scan.l);
  2760. if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
  2761. if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
  2762. ql_log(ql_log_warn, vha, 0x7087,
  2763. "Queue Pair delete failed.\n");
  2764. }
  2765. ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
  2766. scsi_host_put(vha->host);
  2767. return 0;
  2768. }
  2769. static int
  2770. qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
  2771. {
  2772. scsi_qla_host_t *vha = fc_vport->dd_data;
  2773. if (disable)
  2774. qla24xx_disable_vp(vha);
  2775. else
  2776. qla24xx_enable_vp(vha);
  2777. return 0;
  2778. }
  2779. struct fc_function_template qla2xxx_transport_functions = {
  2780. .show_host_node_name = 1,
  2781. .show_host_port_name = 1,
  2782. .show_host_supported_classes = 1,
  2783. .show_host_supported_speeds = 1,
  2784. .get_host_port_id = qla2x00_get_host_port_id,
  2785. .show_host_port_id = 1,
  2786. .get_host_speed = qla2x00_get_host_speed,
  2787. .show_host_speed = 1,
  2788. .get_host_port_type = qla2x00_get_host_port_type,
  2789. .show_host_port_type = 1,
  2790. .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
  2791. .show_host_symbolic_name = 1,
  2792. .set_host_system_hostname = qla2x00_set_host_system_hostname,
  2793. .show_host_system_hostname = 1,
  2794. .get_host_fabric_name = qla2x00_get_host_fabric_name,
  2795. .show_host_fabric_name = 1,
  2796. .get_host_port_state = qla2x00_get_host_port_state,
  2797. .show_host_port_state = 1,
  2798. .dd_fcrport_size = sizeof(struct fc_port *),
  2799. .show_rport_supported_classes = 1,
  2800. .get_starget_node_name = qla2x00_get_starget_node_name,
  2801. .show_starget_node_name = 1,
  2802. .get_starget_port_name = qla2x00_get_starget_port_name,
  2803. .show_starget_port_name = 1,
  2804. .get_starget_port_id = qla2x00_get_starget_port_id,
  2805. .show_starget_port_id = 1,
  2806. .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
  2807. .show_rport_dev_loss_tmo = 1,
  2808. .issue_fc_host_lip = qla2x00_issue_lip,
  2809. .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
  2810. .terminate_rport_io = qla2x00_terminate_rport_io,
  2811. .get_fc_host_stats = qla2x00_get_fc_host_stats,
  2812. .reset_fc_host_stats = qla2x00_reset_host_stats,
  2813. .vport_create = qla24xx_vport_create,
  2814. .vport_disable = qla24xx_vport_disable,
  2815. .vport_delete = qla24xx_vport_delete,
  2816. .bsg_request = qla24xx_bsg_request,
  2817. .bsg_timeout = qla24xx_bsg_timeout,
  2818. };
  2819. struct fc_function_template qla2xxx_transport_vport_functions = {
  2820. .show_host_node_name = 1,
  2821. .show_host_port_name = 1,
  2822. .show_host_supported_classes = 1,
  2823. .show_host_supported_speeds = 1,
  2824. .get_host_port_id = qla2x00_get_host_port_id,
  2825. .show_host_port_id = 1,
  2826. .get_host_speed = qla2x00_get_host_speed,
  2827. .show_host_speed = 1,
  2828. .get_host_port_type = qla2x00_get_host_port_type,
  2829. .show_host_port_type = 1,
  2830. .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
  2831. .show_host_symbolic_name = 1,
  2832. .set_host_system_hostname = qla2x00_set_host_system_hostname,
  2833. .show_host_system_hostname = 1,
  2834. .get_host_fabric_name = qla2x00_get_host_fabric_name,
  2835. .show_host_fabric_name = 1,
  2836. .get_host_port_state = qla2x00_get_host_port_state,
  2837. .show_host_port_state = 1,
  2838. .dd_fcrport_size = sizeof(struct fc_port *),
  2839. .show_rport_supported_classes = 1,
  2840. .get_starget_node_name = qla2x00_get_starget_node_name,
  2841. .show_starget_node_name = 1,
  2842. .get_starget_port_name = qla2x00_get_starget_port_name,
  2843. .show_starget_port_name = 1,
  2844. .get_starget_port_id = qla2x00_get_starget_port_id,
  2845. .show_starget_port_id = 1,
  2846. .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
  2847. .show_rport_dev_loss_tmo = 1,
  2848. .issue_fc_host_lip = qla2x00_issue_lip,
  2849. .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
  2850. .terminate_rport_io = qla2x00_terminate_rport_io,
  2851. .get_fc_host_stats = qla2x00_get_fc_host_stats,
  2852. .reset_fc_host_stats = qla2x00_reset_host_stats,
  2853. .bsg_request = qla24xx_bsg_request,
  2854. .bsg_timeout = qla24xx_bsg_timeout,
  2855. };
  2856. static uint
  2857. qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
  2858. {
  2859. uint supported_speeds = FC_PORTSPEED_UNKNOWN;
  2860. if (speeds & FDMI_PORT_SPEED_64GB)
  2861. supported_speeds |= FC_PORTSPEED_64GBIT;
  2862. if (speeds & FDMI_PORT_SPEED_32GB)
  2863. supported_speeds |= FC_PORTSPEED_32GBIT;
  2864. if (speeds & FDMI_PORT_SPEED_16GB)
  2865. supported_speeds |= FC_PORTSPEED_16GBIT;
  2866. if (speeds & FDMI_PORT_SPEED_8GB)
  2867. supported_speeds |= FC_PORTSPEED_8GBIT;
  2868. if (speeds & FDMI_PORT_SPEED_4GB)
  2869. supported_speeds |= FC_PORTSPEED_4GBIT;
  2870. if (speeds & FDMI_PORT_SPEED_2GB)
  2871. supported_speeds |= FC_PORTSPEED_2GBIT;
  2872. if (speeds & FDMI_PORT_SPEED_1GB)
  2873. supported_speeds |= FC_PORTSPEED_1GBIT;
  2874. return supported_speeds;
  2875. }
  2876. void
  2877. qla2x00_init_host_attr(scsi_qla_host_t *vha)
  2878. {
  2879. struct qla_hw_data *ha = vha->hw;
  2880. u32 speeds = 0, fdmi_speed = 0;
  2881. fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
  2882. fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
  2883. fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
  2884. fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
  2885. (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
  2886. fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
  2887. fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
  2888. fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
  2889. speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
  2890. fc_host_supported_speeds(vha->host) = speeds;
  2891. }