linit.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. * Module Name:
  27. * linit.c
  28. *
  29. * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
  30. */
  31. #include <linux/compat.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/completion.h>
  34. #include <linux/init.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/pci.h>
  40. #include <linux/aer.h>
  41. #include <linux/pci-aspm.h>
  42. #include <linux/slab.h>
  43. #include <linux/mutex.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/syscalls.h>
  46. #include <linux/delay.h>
  47. #include <linux/kthread.h>
  48. #include <scsi/scsi.h>
  49. #include <scsi/scsi_cmnd.h>
  50. #include <scsi/scsi_device.h>
  51. #include <scsi/scsi_host.h>
  52. #include <scsi/scsi_tcq.h>
  53. #include <scsi/scsicam.h>
  54. #include <scsi/scsi_eh.h>
  55. #include "aacraid.h"
  56. #define AAC_DRIVER_VERSION "1.2.1"
  57. #ifndef AAC_DRIVER_BRANCH
  58. #define AAC_DRIVER_BRANCH ""
  59. #endif
  60. #define AAC_DRIVERNAME "aacraid"
  61. #ifdef AAC_DRIVER_BUILD
  62. #define _str(x) #x
  63. #define str(x) _str(x)
  64. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
  65. #else
  66. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
  67. #endif
  68. MODULE_AUTHOR("Red Hat Inc and Adaptec");
  69. MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
  70. "Adaptec Advanced Raid Products, "
  71. "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
  72. MODULE_LICENSE("GPL");
  73. MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
  74. static DEFINE_MUTEX(aac_mutex);
  75. static LIST_HEAD(aac_devices);
  76. static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
  77. char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
  78. /*
  79. * Because of the way Linux names scsi devices, the order in this table has
  80. * become important. Check for on-board Raid first, add-in cards second.
  81. *
  82. * Note: The last field is used to index into aac_drivers below.
  83. */
  84. static const struct pci_device_id aac_pci_tbl[] = {
  85. { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
  86. { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
  87. { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
  88. { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  89. { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
  90. { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  91. { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  92. { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  93. { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  94. { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
  95. { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
  96. { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
  97. { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
  98. { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
  99. { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
  100. { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
  101. { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
  102. { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
  103. { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  104. { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  105. { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  106. { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
  107. { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
  108. { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
  109. { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
  110. { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
  111. { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
  112. { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
  113. { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
  114. { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
  115. { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
  116. { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
  117. { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
  118. { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
  119. { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
  120. { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
  121. { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  122. { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  123. { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  124. { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  125. { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  126. { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  127. { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  128. { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
  129. { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
  130. { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
  131. { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
  132. { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
  133. { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
  134. { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  135. { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
  136. { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
  137. { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
  138. { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
  139. { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
  140. { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
  141. { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
  142. { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
  143. { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
  144. { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
  145. { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
  146. { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
  147. { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
  148. { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
  149. { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
  150. { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
  151. { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
  152. { 0,}
  153. };
  154. MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
  155. /*
  156. * dmb - For now we add the number of channels to this structure.
  157. * In the future we should add a fib that reports the number of channels
  158. * for the card. At that time we can remove the channels from here
  159. */
  160. static struct aac_driver_ident aac_drivers[] = {
  161. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
  162. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
  163. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
  164. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  165. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
  166. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  167. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  168. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  169. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  170. { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
  171. { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
  172. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
  173. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
  174. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
  175. { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
  176. { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
  177. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
  178. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
  179. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  180. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  181. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  182. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
  183. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
  184. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
  185. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
  186. { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
  187. { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
  188. { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
  189. { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
  190. { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
  191. { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
  192. { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
  193. { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
  194. { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
  195. { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
  196. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  197. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  198. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  199. { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  200. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  201. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  202. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  203. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
  204. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
  205. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
  206. { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
  207. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
  208. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  209. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
  210. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
  211. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
  212. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
  213. { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
  214. { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  215. { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  216. { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
  217. { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
  218. { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
  219. { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
  220. { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
  221. { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
  222. { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
  223. { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
  224. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
  225. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
  226. };
  227. /**
  228. * aac_queuecommand - queue a SCSI command
  229. * @cmd: SCSI command to queue
  230. * @done: Function to call on command completion
  231. *
  232. * Queues a command for execution by the associated Host Adapter.
  233. *
  234. * TODO: unify with aac_scsi_cmd().
  235. */
  236. static int aac_queuecommand(struct Scsi_Host *shost,
  237. struct scsi_cmnd *cmd)
  238. {
  239. int r = 0;
  240. cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
  241. r = (aac_scsi_cmd(cmd) ? FAILED : 0);
  242. return r;
  243. }
  244. /**
  245. * aac_info - Returns the host adapter name
  246. * @shost: Scsi host to report on
  247. *
  248. * Returns a static string describing the device in question
  249. */
  250. static const char *aac_info(struct Scsi_Host *shost)
  251. {
  252. struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
  253. return aac_drivers[dev->cardtype].name;
  254. }
  255. /**
  256. * aac_get_driver_ident
  257. * @devtype: index into lookup table
  258. *
  259. * Returns a pointer to the entry in the driver lookup table.
  260. */
  261. struct aac_driver_ident* aac_get_driver_ident(int devtype)
  262. {
  263. return &aac_drivers[devtype];
  264. }
  265. /**
  266. * aac_biosparm - return BIOS parameters for disk
  267. * @sdev: The scsi device corresponding to the disk
  268. * @bdev: the block device corresponding to the disk
  269. * @capacity: the sector capacity of the disk
  270. * @geom: geometry block to fill in
  271. *
  272. * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
  273. * The default disk geometry is 64 heads, 32 sectors, and the appropriate
  274. * number of cylinders so as not to exceed drive capacity. In order for
  275. * disks equal to or larger than 1 GB to be addressable by the BIOS
  276. * without exceeding the BIOS limitation of 1024 cylinders, Extended
  277. * Translation should be enabled. With Extended Translation enabled,
  278. * drives between 1 GB inclusive and 2 GB exclusive are given a disk
  279. * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
  280. * are given a disk geometry of 255 heads and 63 sectors. However, if
  281. * the BIOS detects that the Extended Translation setting does not match
  282. * the geometry in the partition table, then the translation inferred
  283. * from the partition table will be used by the BIOS, and a warning may
  284. * be displayed.
  285. */
  286. static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
  287. sector_t capacity, int *geom)
  288. {
  289. struct diskparm *param = (struct diskparm *)geom;
  290. unsigned char *buf;
  291. dprintk((KERN_DEBUG "aac_biosparm.\n"));
  292. /*
  293. * Assuming extended translation is enabled - #REVISIT#
  294. */
  295. if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
  296. if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
  297. param->heads = 255;
  298. param->sectors = 63;
  299. } else {
  300. param->heads = 128;
  301. param->sectors = 32;
  302. }
  303. } else {
  304. param->heads = 64;
  305. param->sectors = 32;
  306. }
  307. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  308. /*
  309. * Read the first 1024 bytes from the disk device, if the boot
  310. * sector partition table is valid, search for a partition table
  311. * entry whose end_head matches one of the standard geometry
  312. * translations ( 64/32, 128/32, 255/63 ).
  313. */
  314. buf = scsi_bios_ptable(bdev);
  315. if (!buf)
  316. return 0;
  317. if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
  318. struct partition *first = (struct partition * )buf;
  319. struct partition *entry = first;
  320. int saved_cylinders = param->cylinders;
  321. int num;
  322. unsigned char end_head, end_sec;
  323. for(num = 0; num < 4; num++) {
  324. end_head = entry->end_head;
  325. end_sec = entry->end_sector & 0x3f;
  326. if(end_head == 63) {
  327. param->heads = 64;
  328. param->sectors = 32;
  329. break;
  330. } else if(end_head == 127) {
  331. param->heads = 128;
  332. param->sectors = 32;
  333. break;
  334. } else if(end_head == 254) {
  335. param->heads = 255;
  336. param->sectors = 63;
  337. break;
  338. }
  339. entry++;
  340. }
  341. if (num == 4) {
  342. end_head = first->end_head;
  343. end_sec = first->end_sector & 0x3f;
  344. }
  345. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  346. if (num < 4 && end_sec == param->sectors) {
  347. if (param->cylinders != saved_cylinders)
  348. dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
  349. param->heads, param->sectors, num));
  350. } else if (end_head > 0 || end_sec > 0) {
  351. dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
  352. end_head + 1, end_sec, num));
  353. dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  354. param->heads, param->sectors));
  355. }
  356. }
  357. kfree(buf);
  358. return 0;
  359. }
  360. /**
  361. * aac_slave_configure - compute queue depths
  362. * @sdev: SCSI device we are considering
  363. *
  364. * Selects queue depths for each target device based on the host adapter's
  365. * total capacity and the queue depth supported by the target device.
  366. * A queue depth of one automatically disables tagged queueing.
  367. */
  368. static int aac_slave_configure(struct scsi_device *sdev)
  369. {
  370. struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
  371. int chn, tid;
  372. unsigned int depth = 0;
  373. unsigned int set_timeout = 0;
  374. bool set_qd_dev_type = false;
  375. u8 devtype = 0;
  376. chn = aac_logical_to_phys(sdev_channel(sdev));
  377. tid = sdev_id(sdev);
  378. if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
  379. devtype = aac->hba_map[chn][tid].devtype;
  380. if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
  381. depth = aac->hba_map[chn][tid].qd_limit;
  382. set_timeout = 1;
  383. goto common_config;
  384. }
  385. if (devtype == AAC_DEVTYPE_ARC_RAW) {
  386. set_qd_dev_type = true;
  387. set_timeout = 1;
  388. goto common_config;
  389. }
  390. }
  391. if (aac->jbod && (sdev->type == TYPE_DISK))
  392. sdev->removable = 1;
  393. if (sdev->type == TYPE_DISK
  394. && sdev_channel(sdev) != CONTAINER_CHANNEL
  395. && (!aac->jbod || sdev->inq_periph_qual)
  396. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
  397. if (expose_physicals == 0)
  398. return -ENXIO;
  399. if (expose_physicals < 0)
  400. sdev->no_uld_attach = 1;
  401. }
  402. if (sdev->tagged_supported
  403. && sdev->type == TYPE_DISK
  404. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
  405. && !sdev->no_uld_attach) {
  406. struct scsi_device * dev;
  407. struct Scsi_Host *host = sdev->host;
  408. unsigned num_lsu = 0;
  409. unsigned num_one = 0;
  410. unsigned cid;
  411. set_timeout = 1;
  412. for (cid = 0; cid < aac->maximum_num_containers; ++cid)
  413. if (aac->fsa_dev[cid].valid)
  414. ++num_lsu;
  415. __shost_for_each_device(dev, host) {
  416. if (dev->tagged_supported
  417. && dev->type == TYPE_DISK
  418. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
  419. && !dev->no_uld_attach) {
  420. if ((sdev_channel(dev) != CONTAINER_CHANNEL)
  421. || !aac->fsa_dev[sdev_id(dev)].valid) {
  422. ++num_lsu;
  423. }
  424. } else {
  425. ++num_one;
  426. }
  427. }
  428. if (num_lsu == 0)
  429. ++num_lsu;
  430. depth = (host->can_queue - num_one) / num_lsu;
  431. if (sdev_channel(sdev) != NATIVE_CHANNEL)
  432. goto common_config;
  433. set_qd_dev_type = true;
  434. }
  435. common_config:
  436. /*
  437. * Check if SATA drive
  438. */
  439. if (set_qd_dev_type) {
  440. if (strncmp(sdev->vendor, "ATA", 3) == 0)
  441. depth = 32;
  442. else
  443. depth = 64;
  444. }
  445. /*
  446. * Firmware has an individual device recovery time typically
  447. * of 35 seconds, give us a margin.
  448. */
  449. if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
  450. blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
  451. if (depth > 256)
  452. depth = 256;
  453. else if (depth < 1)
  454. depth = 1;
  455. scsi_change_queue_depth(sdev, depth);
  456. sdev->tagged_supported = 1;
  457. return 0;
  458. }
  459. /**
  460. * aac_change_queue_depth - alter queue depths
  461. * @sdev: SCSI device we are considering
  462. * @depth: desired queue depth
  463. *
  464. * Alters queue depths for target device based on the host adapter's
  465. * total capacity and the queue depth supported by the target device.
  466. */
  467. static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
  468. {
  469. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  470. int chn, tid, is_native_device = 0;
  471. chn = aac_logical_to_phys(sdev_channel(sdev));
  472. tid = sdev_id(sdev);
  473. if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
  474. aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
  475. is_native_device = 1;
  476. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  477. (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
  478. struct scsi_device * dev;
  479. struct Scsi_Host *host = sdev->host;
  480. unsigned num = 0;
  481. __shost_for_each_device(dev, host) {
  482. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  483. (sdev_channel(dev) == CONTAINER_CHANNEL))
  484. ++num;
  485. ++num;
  486. }
  487. if (num >= host->can_queue)
  488. num = host->can_queue - 1;
  489. if (depth > (host->can_queue - num))
  490. depth = host->can_queue - num;
  491. if (depth > 256)
  492. depth = 256;
  493. else if (depth < 2)
  494. depth = 2;
  495. return scsi_change_queue_depth(sdev, depth);
  496. } else if (is_native_device) {
  497. scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
  498. } else {
  499. scsi_change_queue_depth(sdev, 1);
  500. }
  501. return sdev->queue_depth;
  502. }
  503. static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
  504. {
  505. struct scsi_device *sdev = to_scsi_device(dev);
  506. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  507. if (sdev_channel(sdev) != CONTAINER_CHANNEL)
  508. return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
  509. ? "Hidden\n" :
  510. ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
  511. return snprintf(buf, PAGE_SIZE, "%s\n",
  512. get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
  513. }
  514. static struct device_attribute aac_raid_level_attr = {
  515. .attr = {
  516. .name = "level",
  517. .mode = S_IRUGO,
  518. },
  519. .show = aac_show_raid_level
  520. };
  521. static ssize_t aac_show_unique_id(struct device *dev,
  522. struct device_attribute *attr, char *buf)
  523. {
  524. struct scsi_device *sdev = to_scsi_device(dev);
  525. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  526. unsigned char sn[16];
  527. memset(sn, 0, sizeof(sn));
  528. if (sdev_channel(sdev) == CONTAINER_CHANNEL)
  529. memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
  530. return snprintf(buf, 16 * 2 + 2,
  531. "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
  532. sn[0], sn[1], sn[2], sn[3],
  533. sn[4], sn[5], sn[6], sn[7],
  534. sn[8], sn[9], sn[10], sn[11],
  535. sn[12], sn[13], sn[14], sn[15]);
  536. }
  537. static struct device_attribute aac_unique_id_attr = {
  538. .attr = {
  539. .name = "unique_id",
  540. .mode = 0444,
  541. },
  542. .show = aac_show_unique_id
  543. };
  544. static struct device_attribute *aac_dev_attrs[] = {
  545. &aac_raid_level_attr,
  546. &aac_unique_id_attr,
  547. NULL,
  548. };
  549. static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
  550. {
  551. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  552. if (!capable(CAP_SYS_RAWIO))
  553. return -EPERM;
  554. return aac_do_ioctl(dev, cmd, arg);
  555. }
  556. static int get_num_of_incomplete_fibs(struct aac_dev *aac)
  557. {
  558. unsigned long flags;
  559. struct scsi_device *sdev = NULL;
  560. struct Scsi_Host *shost = aac->scsi_host_ptr;
  561. struct scsi_cmnd *scmnd = NULL;
  562. struct device *ctrl_dev;
  563. int mlcnt = 0;
  564. int llcnt = 0;
  565. int ehcnt = 0;
  566. int fwcnt = 0;
  567. int krlcnt = 0;
  568. __shost_for_each_device(sdev, shost) {
  569. spin_lock_irqsave(&sdev->list_lock, flags);
  570. list_for_each_entry(scmnd, &sdev->cmd_list, list) {
  571. switch (scmnd->SCp.phase) {
  572. case AAC_OWNER_FIRMWARE:
  573. fwcnt++;
  574. break;
  575. case AAC_OWNER_ERROR_HANDLER:
  576. ehcnt++;
  577. break;
  578. case AAC_OWNER_LOWLEVEL:
  579. llcnt++;
  580. break;
  581. case AAC_OWNER_MIDLEVEL:
  582. mlcnt++;
  583. break;
  584. default:
  585. krlcnt++;
  586. break;
  587. }
  588. }
  589. spin_unlock_irqrestore(&sdev->list_lock, flags);
  590. }
  591. ctrl_dev = &aac->pdev->dev;
  592. dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
  593. dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
  594. dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
  595. dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
  596. dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
  597. return mlcnt + llcnt + ehcnt + fwcnt;
  598. }
  599. static int aac_eh_abort(struct scsi_cmnd* cmd)
  600. {
  601. struct scsi_device * dev = cmd->device;
  602. struct Scsi_Host * host = dev->host;
  603. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  604. int count, found;
  605. u32 bus, cid;
  606. int ret = FAILED;
  607. if (aac_adapter_check_health(aac))
  608. return ret;
  609. bus = aac_logical_to_phys(scmd_channel(cmd));
  610. cid = scmd_id(cmd);
  611. if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
  612. struct fib *fib;
  613. struct aac_hba_tm_req *tmf;
  614. int status;
  615. u64 address;
  616. pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
  617. AAC_DRIVERNAME,
  618. host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
  619. found = 0;
  620. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  621. fib = &aac->fibs[count];
  622. if (*(u8 *)fib->hw_fib_va != 0 &&
  623. (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
  624. (fib->callback_data == cmd)) {
  625. found = 1;
  626. break;
  627. }
  628. }
  629. if (!found)
  630. return ret;
  631. /* start a HBA_TMF_ABORT_TASK TMF request */
  632. fib = aac_fib_alloc(aac);
  633. if (!fib)
  634. return ret;
  635. tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
  636. memset(tmf, 0, sizeof(*tmf));
  637. tmf->tmf = HBA_TMF_ABORT_TASK;
  638. tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
  639. tmf->lun[1] = cmd->device->lun;
  640. address = (u64)fib->hw_error_pa;
  641. tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  642. tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  643. tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  644. fib->hbacmd_size = sizeof(*tmf);
  645. cmd->SCp.sent_command = 0;
  646. status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
  647. (fib_callback) aac_hba_callback,
  648. (void *) cmd);
  649. if (status != -EINPROGRESS) {
  650. aac_fib_complete(fib);
  651. aac_fib_free(fib);
  652. return ret;
  653. }
  654. /* Wait up to 15 secs for completion */
  655. for (count = 0; count < 15; ++count) {
  656. if (cmd->SCp.sent_command) {
  657. ret = SUCCESS;
  658. break;
  659. }
  660. msleep(1000);
  661. }
  662. if (ret != SUCCESS)
  663. pr_err("%s: Host adapter abort request timed out\n",
  664. AAC_DRIVERNAME);
  665. } else {
  666. pr_err(
  667. "%s: Host adapter abort request.\n"
  668. "%s: Outstanding commands on (%d,%d,%d,%d):\n",
  669. AAC_DRIVERNAME, AAC_DRIVERNAME,
  670. host->host_no, sdev_channel(dev), sdev_id(dev),
  671. (int)dev->lun);
  672. switch (cmd->cmnd[0]) {
  673. case SERVICE_ACTION_IN_16:
  674. if (!(aac->raw_io_interface) ||
  675. !(aac->raw_io_64) ||
  676. ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
  677. break;
  678. case INQUIRY:
  679. case READ_CAPACITY:
  680. /*
  681. * Mark associated FIB to not complete,
  682. * eh handler does this
  683. */
  684. for (count = 0;
  685. count < (host->can_queue + AAC_NUM_MGT_FIB);
  686. ++count) {
  687. struct fib *fib = &aac->fibs[count];
  688. if (fib->hw_fib_va->header.XferState &&
  689. (fib->flags & FIB_CONTEXT_FLAG) &&
  690. (fib->callback_data == cmd)) {
  691. fib->flags |=
  692. FIB_CONTEXT_FLAG_TIMED_OUT;
  693. cmd->SCp.phase =
  694. AAC_OWNER_ERROR_HANDLER;
  695. ret = SUCCESS;
  696. }
  697. }
  698. break;
  699. case TEST_UNIT_READY:
  700. /*
  701. * Mark associated FIB to not complete,
  702. * eh handler does this
  703. */
  704. for (count = 0;
  705. count < (host->can_queue + AAC_NUM_MGT_FIB);
  706. ++count) {
  707. struct scsi_cmnd *command;
  708. struct fib *fib = &aac->fibs[count];
  709. command = fib->callback_data;
  710. if ((fib->hw_fib_va->header.XferState &
  711. cpu_to_le32
  712. (Async | NoResponseExpected)) &&
  713. (fib->flags & FIB_CONTEXT_FLAG) &&
  714. ((command)) &&
  715. (command->device == cmd->device)) {
  716. fib->flags |=
  717. FIB_CONTEXT_FLAG_TIMED_OUT;
  718. command->SCp.phase =
  719. AAC_OWNER_ERROR_HANDLER;
  720. if (command == cmd)
  721. ret = SUCCESS;
  722. }
  723. }
  724. break;
  725. }
  726. }
  727. return ret;
  728. }
  729. static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
  730. struct fib *fib, u64 tmf_lun)
  731. {
  732. struct aac_hba_tm_req *tmf;
  733. u64 address;
  734. /* start a HBA_TMF_LUN_RESET TMF request */
  735. tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
  736. memset(tmf, 0, sizeof(*tmf));
  737. tmf->tmf = HBA_TMF_LUN_RESET;
  738. tmf->it_nexus = info->rmw_nexus;
  739. int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
  740. address = (u64)fib->hw_error_pa;
  741. tmf->error_ptr_hi = cpu_to_le32
  742. ((u32)(address >> 32));
  743. tmf->error_ptr_lo = cpu_to_le32
  744. ((u32)(address & 0xffffffff));
  745. tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  746. fib->hbacmd_size = sizeof(*tmf);
  747. return HBA_IU_TYPE_SCSI_TM_REQ;
  748. }
  749. static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
  750. struct fib *fib)
  751. {
  752. struct aac_hba_reset_req *rst;
  753. u64 address;
  754. /* already tried, start a hard reset now */
  755. rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
  756. memset(rst, 0, sizeof(*rst));
  757. rst->it_nexus = info->rmw_nexus;
  758. address = (u64)fib->hw_error_pa;
  759. rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  760. rst->error_ptr_lo = cpu_to_le32
  761. ((u32)(address & 0xffffffff));
  762. rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  763. fib->hbacmd_size = sizeof(*rst);
  764. return HBA_IU_TYPE_SATA_REQ;
  765. }
  766. void aac_tmf_callback(void *context, struct fib *fibptr)
  767. {
  768. struct aac_hba_resp *err =
  769. &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
  770. struct aac_hba_map_info *info = context;
  771. int res;
  772. switch (err->service_response) {
  773. case HBA_RESP_SVCRES_TMF_REJECTED:
  774. res = -1;
  775. break;
  776. case HBA_RESP_SVCRES_TMF_LUN_INVALID:
  777. res = 0;
  778. break;
  779. case HBA_RESP_SVCRES_TMF_COMPLETE:
  780. case HBA_RESP_SVCRES_TMF_SUCCEEDED:
  781. res = 0;
  782. break;
  783. default:
  784. res = -2;
  785. break;
  786. }
  787. aac_fib_complete(fibptr);
  788. info->reset_state = res;
  789. }
  790. /*
  791. * aac_eh_dev_reset - Device reset command handling
  792. * @scsi_cmd: SCSI command block causing the reset
  793. *
  794. */
  795. static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
  796. {
  797. struct scsi_device * dev = cmd->device;
  798. struct Scsi_Host * host = dev->host;
  799. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  800. struct aac_hba_map_info *info;
  801. int count;
  802. u32 bus, cid;
  803. struct fib *fib;
  804. int ret = FAILED;
  805. int status;
  806. u8 command;
  807. bus = aac_logical_to_phys(scmd_channel(cmd));
  808. cid = scmd_id(cmd);
  809. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
  810. return FAILED;
  811. info = &aac->hba_map[bus][cid];
  812. if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
  813. !(info->reset_state > 0)))
  814. return FAILED;
  815. pr_err("%s: Host device reset request. SCSI hang ?\n",
  816. AAC_DRIVERNAME);
  817. fib = aac_fib_alloc(aac);
  818. if (!fib)
  819. return ret;
  820. /* start a HBA_TMF_LUN_RESET TMF request */
  821. command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
  822. info->reset_state = 1;
  823. status = aac_hba_send(command, fib,
  824. (fib_callback) aac_tmf_callback,
  825. (void *) info);
  826. if (status != -EINPROGRESS) {
  827. info->reset_state = 0;
  828. aac_fib_complete(fib);
  829. aac_fib_free(fib);
  830. return ret;
  831. }
  832. /* Wait up to 15 seconds for completion */
  833. for (count = 0; count < 15; ++count) {
  834. if (info->reset_state == 0) {
  835. ret = info->reset_state == 0 ? SUCCESS : FAILED;
  836. break;
  837. }
  838. msleep(1000);
  839. }
  840. return ret;
  841. }
  842. /*
  843. * aac_eh_target_reset - Target reset command handling
  844. * @scsi_cmd: SCSI command block causing the reset
  845. *
  846. */
  847. static int aac_eh_target_reset(struct scsi_cmnd *cmd)
  848. {
  849. struct scsi_device * dev = cmd->device;
  850. struct Scsi_Host * host = dev->host;
  851. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  852. struct aac_hba_map_info *info;
  853. int count;
  854. u32 bus, cid;
  855. int ret = FAILED;
  856. struct fib *fib;
  857. int status;
  858. u8 command;
  859. bus = aac_logical_to_phys(scmd_channel(cmd));
  860. cid = scmd_id(cmd);
  861. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
  862. return FAILED;
  863. info = &aac->hba_map[bus][cid];
  864. if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
  865. !(info->reset_state > 0)))
  866. return FAILED;
  867. pr_err("%s: Host target reset request. SCSI hang ?\n",
  868. AAC_DRIVERNAME);
  869. fib = aac_fib_alloc(aac);
  870. if (!fib)
  871. return ret;
  872. /* already tried, start a hard reset now */
  873. command = aac_eh_tmf_hard_reset_fib(info, fib);
  874. info->reset_state = 2;
  875. status = aac_hba_send(command, fib,
  876. (fib_callback) aac_tmf_callback,
  877. (void *) info);
  878. if (status != -EINPROGRESS) {
  879. info->reset_state = 0;
  880. aac_fib_complete(fib);
  881. aac_fib_free(fib);
  882. return ret;
  883. }
  884. /* Wait up to 15 seconds for completion */
  885. for (count = 0; count < 15; ++count) {
  886. if (info->reset_state <= 0) {
  887. ret = info->reset_state == 0 ? SUCCESS : FAILED;
  888. break;
  889. }
  890. msleep(1000);
  891. }
  892. return ret;
  893. }
  894. /*
  895. * aac_eh_bus_reset - Bus reset command handling
  896. * @scsi_cmd: SCSI command block causing the reset
  897. *
  898. */
  899. static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
  900. {
  901. struct scsi_device * dev = cmd->device;
  902. struct Scsi_Host * host = dev->host;
  903. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  904. int count;
  905. u32 cmd_bus;
  906. int status = 0;
  907. cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
  908. /* Mark the assoc. FIB to not complete, eh handler does this */
  909. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  910. struct fib *fib = &aac->fibs[count];
  911. if (fib->hw_fib_va->header.XferState &&
  912. (fib->flags & FIB_CONTEXT_FLAG) &&
  913. (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
  914. struct aac_hba_map_info *info;
  915. u32 bus, cid;
  916. cmd = (struct scsi_cmnd *)fib->callback_data;
  917. bus = aac_logical_to_phys(scmd_channel(cmd));
  918. if (bus != cmd_bus)
  919. continue;
  920. cid = scmd_id(cmd);
  921. info = &aac->hba_map[bus][cid];
  922. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
  923. info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
  924. fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
  925. cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  926. }
  927. }
  928. }
  929. pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
  930. /*
  931. * Check the health of the controller
  932. */
  933. status = aac_adapter_check_health(aac);
  934. if (status)
  935. dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
  936. count = get_num_of_incomplete_fibs(aac);
  937. return (count == 0) ? SUCCESS : FAILED;
  938. }
  939. /*
  940. * aac_eh_host_reset - Host reset command handling
  941. * @scsi_cmd: SCSI command block causing the reset
  942. *
  943. */
  944. int aac_eh_host_reset(struct scsi_cmnd *cmd)
  945. {
  946. struct scsi_device * dev = cmd->device;
  947. struct Scsi_Host * host = dev->host;
  948. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  949. int ret = FAILED;
  950. __le32 supported_options2 = 0;
  951. bool is_mu_reset;
  952. bool is_ignore_reset;
  953. bool is_doorbell_reset;
  954. /*
  955. * Check if reset is supported by the firmware
  956. */
  957. supported_options2 = aac->supplement_adapter_info.supported_options2;
  958. is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
  959. is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
  960. is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
  961. /*
  962. * This adapter needs a blind reset, only do so for
  963. * Adapters that support a register, instead of a commanded,
  964. * reset.
  965. */
  966. if ((is_mu_reset || is_doorbell_reset)
  967. && aac_check_reset
  968. && (aac_check_reset != -1 || !is_ignore_reset)) {
  969. /* Bypass wait for command quiesce */
  970. if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
  971. ret = SUCCESS;
  972. }
  973. /*
  974. * Reset EH state
  975. */
  976. if (ret == SUCCESS) {
  977. int bus, cid;
  978. struct aac_hba_map_info *info;
  979. for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
  980. for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
  981. info = &aac->hba_map[bus][cid];
  982. if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
  983. info->reset_state = 0;
  984. }
  985. }
  986. }
  987. return ret;
  988. }
  989. /**
  990. * aac_cfg_open - open a configuration file
  991. * @inode: inode being opened
  992. * @file: file handle attached
  993. *
  994. * Called when the configuration device is opened. Does the needed
  995. * set up on the handle and then returns
  996. *
  997. * Bugs: This needs extending to check a given adapter is present
  998. * so we can support hot plugging, and to ref count adapters.
  999. */
  1000. static int aac_cfg_open(struct inode *inode, struct file *file)
  1001. {
  1002. struct aac_dev *aac;
  1003. unsigned minor_number = iminor(inode);
  1004. int err = -ENODEV;
  1005. mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
  1006. list_for_each_entry(aac, &aac_devices, entry) {
  1007. if (aac->id == minor_number) {
  1008. file->private_data = aac;
  1009. err = 0;
  1010. break;
  1011. }
  1012. }
  1013. mutex_unlock(&aac_mutex);
  1014. return err;
  1015. }
  1016. /**
  1017. * aac_cfg_ioctl - AAC configuration request
  1018. * @inode: inode of device
  1019. * @file: file handle
  1020. * @cmd: ioctl command code
  1021. * @arg: argument
  1022. *
  1023. * Handles a configuration ioctl. Currently this involves wrapping it
  1024. * up and feeding it into the nasty windowsalike glue layer.
  1025. *
  1026. * Bugs: Needs locking against parallel ioctls lower down
  1027. * Bugs: Needs to handle hot plugging
  1028. */
  1029. static long aac_cfg_ioctl(struct file *file,
  1030. unsigned int cmd, unsigned long arg)
  1031. {
  1032. struct aac_dev *aac = (struct aac_dev *)file->private_data;
  1033. if (!capable(CAP_SYS_RAWIO))
  1034. return -EPERM;
  1035. return aac_do_ioctl(aac, cmd, (void __user *)arg);
  1036. }
  1037. #ifdef CONFIG_COMPAT
  1038. static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
  1039. {
  1040. long ret;
  1041. switch (cmd) {
  1042. case FSACTL_MINIPORT_REV_CHECK:
  1043. case FSACTL_SENDFIB:
  1044. case FSACTL_OPEN_GET_ADAPTER_FIB:
  1045. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  1046. case FSACTL_SEND_RAW_SRB:
  1047. case FSACTL_GET_PCI_INFO:
  1048. case FSACTL_QUERY_DISK:
  1049. case FSACTL_DELETE_DISK:
  1050. case FSACTL_FORCE_DELETE_DISK:
  1051. case FSACTL_GET_CONTAINERS:
  1052. case FSACTL_SEND_LARGE_FIB:
  1053. ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
  1054. break;
  1055. case FSACTL_GET_NEXT_ADAPTER_FIB: {
  1056. struct fib_ioctl __user *f;
  1057. f = compat_alloc_user_space(sizeof(*f));
  1058. ret = 0;
  1059. if (clear_user(f, sizeof(*f)))
  1060. ret = -EFAULT;
  1061. if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
  1062. ret = -EFAULT;
  1063. if (!ret)
  1064. ret = aac_do_ioctl(dev, cmd, f);
  1065. break;
  1066. }
  1067. default:
  1068. ret = -ENOIOCTLCMD;
  1069. break;
  1070. }
  1071. return ret;
  1072. }
  1073. static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  1074. {
  1075. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  1076. if (!capable(CAP_SYS_RAWIO))
  1077. return -EPERM;
  1078. return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
  1079. }
  1080. static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  1081. {
  1082. if (!capable(CAP_SYS_RAWIO))
  1083. return -EPERM;
  1084. return aac_compat_do_ioctl(file->private_data, cmd, arg);
  1085. }
  1086. #endif
  1087. static ssize_t aac_show_model(struct device *device,
  1088. struct device_attribute *attr, char *buf)
  1089. {
  1090. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1091. int len;
  1092. if (dev->supplement_adapter_info.adapter_type_text[0]) {
  1093. char *cp = dev->supplement_adapter_info.adapter_type_text;
  1094. while (*cp && *cp != ' ')
  1095. ++cp;
  1096. while (*cp == ' ')
  1097. ++cp;
  1098. len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
  1099. } else
  1100. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1101. aac_drivers[dev->cardtype].model);
  1102. return len;
  1103. }
  1104. static ssize_t aac_show_vendor(struct device *device,
  1105. struct device_attribute *attr, char *buf)
  1106. {
  1107. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1108. struct aac_supplement_adapter_info *sup_adap_info;
  1109. int len;
  1110. sup_adap_info = &dev->supplement_adapter_info;
  1111. if (sup_adap_info->adapter_type_text[0]) {
  1112. char *cp = sup_adap_info->adapter_type_text;
  1113. while (*cp && *cp != ' ')
  1114. ++cp;
  1115. len = snprintf(buf, PAGE_SIZE, "%.*s\n",
  1116. (int)(cp - (char *)sup_adap_info->adapter_type_text),
  1117. sup_adap_info->adapter_type_text);
  1118. } else
  1119. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1120. aac_drivers[dev->cardtype].vname);
  1121. return len;
  1122. }
  1123. static ssize_t aac_show_flags(struct device *cdev,
  1124. struct device_attribute *attr, char *buf)
  1125. {
  1126. int len = 0;
  1127. struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
  1128. if (nblank(dprintk(x)))
  1129. len = snprintf(buf, PAGE_SIZE, "dprintk\n");
  1130. #ifdef AAC_DETAILED_STATUS_INFO
  1131. len += snprintf(buf + len, PAGE_SIZE - len,
  1132. "AAC_DETAILED_STATUS_INFO\n");
  1133. #endif
  1134. if (dev->raw_io_interface && dev->raw_io_64)
  1135. len += snprintf(buf + len, PAGE_SIZE - len,
  1136. "SAI_READ_CAPACITY_16\n");
  1137. if (dev->jbod)
  1138. len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
  1139. if (dev->supplement_adapter_info.supported_options2 &
  1140. AAC_OPTION_POWER_MANAGEMENT)
  1141. len += snprintf(buf + len, PAGE_SIZE - len,
  1142. "SUPPORTED_POWER_MANAGEMENT\n");
  1143. if (dev->msi)
  1144. len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
  1145. return len;
  1146. }
  1147. static ssize_t aac_show_kernel_version(struct device *device,
  1148. struct device_attribute *attr,
  1149. char *buf)
  1150. {
  1151. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1152. int len, tmp;
  1153. tmp = le32_to_cpu(dev->adapter_info.kernelrev);
  1154. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1155. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1156. le32_to_cpu(dev->adapter_info.kernelbuild));
  1157. return len;
  1158. }
  1159. static ssize_t aac_show_monitor_version(struct device *device,
  1160. struct device_attribute *attr,
  1161. char *buf)
  1162. {
  1163. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1164. int len, tmp;
  1165. tmp = le32_to_cpu(dev->adapter_info.monitorrev);
  1166. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1167. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1168. le32_to_cpu(dev->adapter_info.monitorbuild));
  1169. return len;
  1170. }
  1171. static ssize_t aac_show_bios_version(struct device *device,
  1172. struct device_attribute *attr,
  1173. char *buf)
  1174. {
  1175. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1176. int len, tmp;
  1177. tmp = le32_to_cpu(dev->adapter_info.biosrev);
  1178. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1179. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1180. le32_to_cpu(dev->adapter_info.biosbuild));
  1181. return len;
  1182. }
  1183. static ssize_t aac_show_driver_version(struct device *device,
  1184. struct device_attribute *attr,
  1185. char *buf)
  1186. {
  1187. return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
  1188. }
  1189. static ssize_t aac_show_serial_number(struct device *device,
  1190. struct device_attribute *attr, char *buf)
  1191. {
  1192. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1193. int len = 0;
  1194. if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
  1195. len = snprintf(buf, 16, "%06X\n",
  1196. le32_to_cpu(dev->adapter_info.serial[0]));
  1197. if (len &&
  1198. !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
  1199. sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
  1200. buf, len-1))
  1201. len = snprintf(buf, 16, "%.*s\n",
  1202. (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
  1203. dev->supplement_adapter_info.mfg_pcba_serial_no);
  1204. return min(len, 16);
  1205. }
  1206. static ssize_t aac_show_max_channel(struct device *device,
  1207. struct device_attribute *attr, char *buf)
  1208. {
  1209. return snprintf(buf, PAGE_SIZE, "%d\n",
  1210. class_to_shost(device)->max_channel);
  1211. }
  1212. static ssize_t aac_show_max_id(struct device *device,
  1213. struct device_attribute *attr, char *buf)
  1214. {
  1215. return snprintf(buf, PAGE_SIZE, "%d\n",
  1216. class_to_shost(device)->max_id);
  1217. }
  1218. static ssize_t aac_store_reset_adapter(struct device *device,
  1219. struct device_attribute *attr,
  1220. const char *buf, size_t count)
  1221. {
  1222. int retval = -EACCES;
  1223. if (!capable(CAP_SYS_ADMIN))
  1224. return retval;
  1225. retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
  1226. buf[0] == '!', IOP_HWSOFT_RESET);
  1227. if (retval >= 0)
  1228. retval = count;
  1229. return retval;
  1230. }
  1231. static ssize_t aac_show_reset_adapter(struct device *device,
  1232. struct device_attribute *attr,
  1233. char *buf)
  1234. {
  1235. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1236. int len, tmp;
  1237. tmp = aac_adapter_check_health(dev);
  1238. if ((tmp == 0) && dev->in_reset)
  1239. tmp = -EBUSY;
  1240. len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
  1241. return len;
  1242. }
  1243. static struct device_attribute aac_model = {
  1244. .attr = {
  1245. .name = "model",
  1246. .mode = S_IRUGO,
  1247. },
  1248. .show = aac_show_model,
  1249. };
  1250. static struct device_attribute aac_vendor = {
  1251. .attr = {
  1252. .name = "vendor",
  1253. .mode = S_IRUGO,
  1254. },
  1255. .show = aac_show_vendor,
  1256. };
  1257. static struct device_attribute aac_flags = {
  1258. .attr = {
  1259. .name = "flags",
  1260. .mode = S_IRUGO,
  1261. },
  1262. .show = aac_show_flags,
  1263. };
  1264. static struct device_attribute aac_kernel_version = {
  1265. .attr = {
  1266. .name = "hba_kernel_version",
  1267. .mode = S_IRUGO,
  1268. },
  1269. .show = aac_show_kernel_version,
  1270. };
  1271. static struct device_attribute aac_monitor_version = {
  1272. .attr = {
  1273. .name = "hba_monitor_version",
  1274. .mode = S_IRUGO,
  1275. },
  1276. .show = aac_show_monitor_version,
  1277. };
  1278. static struct device_attribute aac_bios_version = {
  1279. .attr = {
  1280. .name = "hba_bios_version",
  1281. .mode = S_IRUGO,
  1282. },
  1283. .show = aac_show_bios_version,
  1284. };
  1285. static struct device_attribute aac_lld_version = {
  1286. .attr = {
  1287. .name = "driver_version",
  1288. .mode = 0444,
  1289. },
  1290. .show = aac_show_driver_version,
  1291. };
  1292. static struct device_attribute aac_serial_number = {
  1293. .attr = {
  1294. .name = "serial_number",
  1295. .mode = S_IRUGO,
  1296. },
  1297. .show = aac_show_serial_number,
  1298. };
  1299. static struct device_attribute aac_max_channel = {
  1300. .attr = {
  1301. .name = "max_channel",
  1302. .mode = S_IRUGO,
  1303. },
  1304. .show = aac_show_max_channel,
  1305. };
  1306. static struct device_attribute aac_max_id = {
  1307. .attr = {
  1308. .name = "max_id",
  1309. .mode = S_IRUGO,
  1310. },
  1311. .show = aac_show_max_id,
  1312. };
  1313. static struct device_attribute aac_reset = {
  1314. .attr = {
  1315. .name = "reset_host",
  1316. .mode = S_IWUSR|S_IRUGO,
  1317. },
  1318. .store = aac_store_reset_adapter,
  1319. .show = aac_show_reset_adapter,
  1320. };
  1321. static struct device_attribute *aac_attrs[] = {
  1322. &aac_model,
  1323. &aac_vendor,
  1324. &aac_flags,
  1325. &aac_kernel_version,
  1326. &aac_monitor_version,
  1327. &aac_bios_version,
  1328. &aac_lld_version,
  1329. &aac_serial_number,
  1330. &aac_max_channel,
  1331. &aac_max_id,
  1332. &aac_reset,
  1333. NULL
  1334. };
  1335. ssize_t aac_get_serial_number(struct device *device, char *buf)
  1336. {
  1337. return aac_show_serial_number(device, &aac_serial_number, buf);
  1338. }
  1339. static const struct file_operations aac_cfg_fops = {
  1340. .owner = THIS_MODULE,
  1341. .unlocked_ioctl = aac_cfg_ioctl,
  1342. #ifdef CONFIG_COMPAT
  1343. .compat_ioctl = aac_compat_cfg_ioctl,
  1344. #endif
  1345. .open = aac_cfg_open,
  1346. .llseek = noop_llseek,
  1347. };
  1348. static struct scsi_host_template aac_driver_template = {
  1349. .module = THIS_MODULE,
  1350. .name = "AAC",
  1351. .proc_name = AAC_DRIVERNAME,
  1352. .info = aac_info,
  1353. .ioctl = aac_ioctl,
  1354. #ifdef CONFIG_COMPAT
  1355. .compat_ioctl = aac_compat_ioctl,
  1356. #endif
  1357. .queuecommand = aac_queuecommand,
  1358. .bios_param = aac_biosparm,
  1359. .shost_attrs = aac_attrs,
  1360. .slave_configure = aac_slave_configure,
  1361. .change_queue_depth = aac_change_queue_depth,
  1362. .sdev_attrs = aac_dev_attrs,
  1363. .eh_abort_handler = aac_eh_abort,
  1364. .eh_device_reset_handler = aac_eh_dev_reset,
  1365. .eh_target_reset_handler = aac_eh_target_reset,
  1366. .eh_bus_reset_handler = aac_eh_bus_reset,
  1367. .eh_host_reset_handler = aac_eh_host_reset,
  1368. .can_queue = AAC_NUM_IO_FIB,
  1369. .this_id = MAXIMUM_NUM_CONTAINERS,
  1370. .sg_tablesize = 16,
  1371. .max_sectors = 128,
  1372. #if (AAC_NUM_IO_FIB > 256)
  1373. .cmd_per_lun = 256,
  1374. #else
  1375. .cmd_per_lun = AAC_NUM_IO_FIB,
  1376. #endif
  1377. .use_clustering = ENABLE_CLUSTERING,
  1378. .emulated = 1,
  1379. .no_write_same = 1,
  1380. };
  1381. static void __aac_shutdown(struct aac_dev * aac)
  1382. {
  1383. int i;
  1384. mutex_lock(&aac->ioctl_mutex);
  1385. aac->adapter_shutdown = 1;
  1386. mutex_unlock(&aac->ioctl_mutex);
  1387. if (aac->aif_thread) {
  1388. int i;
  1389. /* Clear out events first */
  1390. for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
  1391. struct fib *fib = &aac->fibs[i];
  1392. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  1393. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
  1394. up(&fib->event_wait);
  1395. }
  1396. kthread_stop(aac->thread);
  1397. aac->thread = NULL;
  1398. }
  1399. aac_send_shutdown(aac);
  1400. aac_adapter_disable_int(aac);
  1401. if (aac_is_src(aac)) {
  1402. if (aac->max_msix > 1) {
  1403. for (i = 0; i < aac->max_msix; i++) {
  1404. free_irq(pci_irq_vector(aac->pdev, i),
  1405. &(aac->aac_msix[i]));
  1406. }
  1407. } else {
  1408. free_irq(aac->pdev->irq,
  1409. &(aac->aac_msix[0]));
  1410. }
  1411. } else {
  1412. free_irq(aac->pdev->irq, aac);
  1413. }
  1414. if (aac->msi)
  1415. pci_disable_msi(aac->pdev);
  1416. else if (aac->max_msix > 1)
  1417. pci_disable_msix(aac->pdev);
  1418. }
  1419. static void aac_init_char(void)
  1420. {
  1421. aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
  1422. if (aac_cfg_major < 0) {
  1423. pr_err("aacraid: unable to register \"aac\" device.\n");
  1424. }
  1425. }
  1426. static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  1427. {
  1428. unsigned index = id->driver_data;
  1429. struct Scsi_Host *shost;
  1430. struct aac_dev *aac;
  1431. struct list_head *insert = &aac_devices;
  1432. int error;
  1433. int unique_id = 0;
  1434. u64 dmamask;
  1435. int mask_bits = 0;
  1436. extern int aac_sync_mode;
  1437. /*
  1438. * Only series 7 needs freset.
  1439. */
  1440. if (pdev->device == PMC_DEVICE_S7)
  1441. pdev->needs_freset = 1;
  1442. list_for_each_entry(aac, &aac_devices, entry) {
  1443. if (aac->id > unique_id)
  1444. break;
  1445. insert = &aac->entry;
  1446. unique_id++;
  1447. }
  1448. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1449. PCIE_LINK_STATE_CLKPM);
  1450. error = pci_enable_device(pdev);
  1451. if (error)
  1452. goto out;
  1453. if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
  1454. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1455. if (error) {
  1456. dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
  1457. goto out_disable_pdev;
  1458. }
  1459. }
  1460. /*
  1461. * If the quirk31 bit is set, the adapter needs adapter
  1462. * to driver communication memory to be allocated below 2gig
  1463. */
  1464. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
  1465. dmamask = DMA_BIT_MASK(31);
  1466. mask_bits = 31;
  1467. } else {
  1468. dmamask = DMA_BIT_MASK(32);
  1469. mask_bits = 32;
  1470. }
  1471. error = pci_set_consistent_dma_mask(pdev, dmamask);
  1472. if (error) {
  1473. dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
  1474. , mask_bits);
  1475. goto out_disable_pdev;
  1476. }
  1477. pci_set_master(pdev);
  1478. shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
  1479. if (!shost) {
  1480. error = -ENOMEM;
  1481. goto out_disable_pdev;
  1482. }
  1483. shost->irq = pdev->irq;
  1484. shost->unique_id = unique_id;
  1485. shost->max_cmd_len = 16;
  1486. shost->use_cmd_list = 1;
  1487. if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
  1488. aac_init_char();
  1489. aac = (struct aac_dev *)shost->hostdata;
  1490. aac->base_start = pci_resource_start(pdev, 0);
  1491. aac->scsi_host_ptr = shost;
  1492. aac->pdev = pdev;
  1493. aac->name = aac_driver_template.name;
  1494. aac->id = shost->unique_id;
  1495. aac->cardtype = index;
  1496. INIT_LIST_HEAD(&aac->entry);
  1497. if (aac_reset_devices || reset_devices)
  1498. aac->init_reset = true;
  1499. aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
  1500. sizeof(struct fib),
  1501. GFP_KERNEL);
  1502. if (!aac->fibs) {
  1503. error = -ENOMEM;
  1504. goto out_free_host;
  1505. }
  1506. spin_lock_init(&aac->fib_lock);
  1507. mutex_init(&aac->ioctl_mutex);
  1508. mutex_init(&aac->scan_mutex);
  1509. INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
  1510. /*
  1511. * Map in the registers from the adapter.
  1512. */
  1513. aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
  1514. if ((*aac_drivers[index].init)(aac)) {
  1515. error = -ENODEV;
  1516. goto out_unmap;
  1517. }
  1518. if (aac->sync_mode) {
  1519. if (aac_sync_mode)
  1520. printk(KERN_INFO "%s%d: Sync. mode enforced "
  1521. "by driver parameter. This will cause "
  1522. "a significant performance decrease!\n",
  1523. aac->name,
  1524. aac->id);
  1525. else
  1526. printk(KERN_INFO "%s%d: Async. mode not supported "
  1527. "by current driver, sync. mode enforced."
  1528. "\nPlease update driver to get full performance.\n",
  1529. aac->name,
  1530. aac->id);
  1531. }
  1532. /*
  1533. * Start any kernel threads needed
  1534. */
  1535. aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
  1536. if (IS_ERR(aac->thread)) {
  1537. printk(KERN_ERR "aacraid: Unable to create command thread.\n");
  1538. error = PTR_ERR(aac->thread);
  1539. aac->thread = NULL;
  1540. goto out_deinit;
  1541. }
  1542. aac->maximum_num_channels = aac_drivers[index].channels;
  1543. error = aac_get_adapter_info(aac);
  1544. if (error < 0)
  1545. goto out_deinit;
  1546. /*
  1547. * Lets override negotiations and drop the maximum SG limit to 34
  1548. */
  1549. if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
  1550. (shost->sg_tablesize > 34)) {
  1551. shost->sg_tablesize = 34;
  1552. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1553. }
  1554. if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
  1555. (shost->sg_tablesize > 17)) {
  1556. shost->sg_tablesize = 17;
  1557. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1558. }
  1559. error = pci_set_dma_max_seg_size(pdev,
  1560. (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
  1561. (shost->max_sectors << 9) : 65536);
  1562. if (error)
  1563. goto out_deinit;
  1564. /*
  1565. * Firmware printf works only with older firmware.
  1566. */
  1567. if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
  1568. aac->printf_enabled = 1;
  1569. else
  1570. aac->printf_enabled = 0;
  1571. /*
  1572. * max channel will be the physical channels plus 1 virtual channel
  1573. * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
  1574. * physical channels are address by their actual physical number+1
  1575. */
  1576. if (aac->nondasd_support || expose_physicals || aac->jbod)
  1577. shost->max_channel = aac->maximum_num_channels;
  1578. else
  1579. shost->max_channel = 0;
  1580. aac_get_config_status(aac, 0);
  1581. aac_get_containers(aac);
  1582. list_add(&aac->entry, insert);
  1583. shost->max_id = aac->maximum_num_containers;
  1584. if (shost->max_id < aac->maximum_num_physicals)
  1585. shost->max_id = aac->maximum_num_physicals;
  1586. if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
  1587. shost->max_id = MAXIMUM_NUM_CONTAINERS;
  1588. else
  1589. shost->this_id = shost->max_id;
  1590. if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
  1591. aac_intr_normal(aac, 0, 2, 0, NULL);
  1592. /*
  1593. * dmb - we may need to move the setting of these parms somewhere else once
  1594. * we get a fib that can report the actual numbers
  1595. */
  1596. shost->max_lun = AAC_MAX_LUN;
  1597. pci_set_drvdata(pdev, shost);
  1598. error = scsi_add_host(shost, &pdev->dev);
  1599. if (error)
  1600. goto out_deinit;
  1601. aac_scan_host(aac);
  1602. pci_enable_pcie_error_reporting(pdev);
  1603. pci_save_state(pdev);
  1604. return 0;
  1605. out_deinit:
  1606. __aac_shutdown(aac);
  1607. out_unmap:
  1608. aac_fib_map_free(aac);
  1609. if (aac->comm_addr)
  1610. dma_free_coherent(&aac->pdev->dev, aac->comm_size,
  1611. aac->comm_addr, aac->comm_phys);
  1612. kfree(aac->queues);
  1613. aac_adapter_ioremap(aac, 0);
  1614. kfree(aac->fibs);
  1615. kfree(aac->fsa_dev);
  1616. out_free_host:
  1617. scsi_host_put(shost);
  1618. out_disable_pdev:
  1619. pci_disable_device(pdev);
  1620. out:
  1621. return error;
  1622. }
  1623. static void aac_release_resources(struct aac_dev *aac)
  1624. {
  1625. aac_adapter_disable_int(aac);
  1626. aac_free_irq(aac);
  1627. }
  1628. static int aac_acquire_resources(struct aac_dev *dev)
  1629. {
  1630. unsigned long status;
  1631. /*
  1632. * First clear out all interrupts. Then enable the one's that we
  1633. * can handle.
  1634. */
  1635. while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
  1636. || status == 0xffffffff)
  1637. msleep(20);
  1638. aac_adapter_disable_int(dev);
  1639. aac_adapter_enable_int(dev);
  1640. if (aac_is_src(dev))
  1641. aac_define_int_mode(dev);
  1642. if (dev->msi_enabled)
  1643. aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
  1644. if (aac_acquire_irq(dev))
  1645. goto error_iounmap;
  1646. aac_adapter_enable_int(dev);
  1647. /*max msix may change after EEH
  1648. * Re-assign vectors to fibs
  1649. */
  1650. aac_fib_vector_assign(dev);
  1651. if (!dev->sync_mode) {
  1652. /* After EEH recovery or suspend resume, max_msix count
  1653. * may change, therefore updating in init as well.
  1654. */
  1655. dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
  1656. aac_adapter_start(dev);
  1657. }
  1658. return 0;
  1659. error_iounmap:
  1660. return -1;
  1661. }
  1662. #if (defined(CONFIG_PM))
  1663. static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
  1664. {
  1665. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1666. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1667. scsi_block_requests(shost);
  1668. aac_cancel_safw_rescan_worker(aac);
  1669. aac_send_shutdown(aac);
  1670. aac_release_resources(aac);
  1671. pci_set_drvdata(pdev, shost);
  1672. pci_save_state(pdev);
  1673. pci_disable_device(pdev);
  1674. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1675. return 0;
  1676. }
  1677. static int aac_resume(struct pci_dev *pdev)
  1678. {
  1679. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1680. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1681. int r;
  1682. pci_set_power_state(pdev, PCI_D0);
  1683. pci_enable_wake(pdev, PCI_D0, 0);
  1684. pci_restore_state(pdev);
  1685. r = pci_enable_device(pdev);
  1686. if (r)
  1687. goto fail_device;
  1688. pci_set_master(pdev);
  1689. if (aac_acquire_resources(aac))
  1690. goto fail_device;
  1691. /*
  1692. * reset this flag to unblock ioctl() as it was set at
  1693. * aac_send_shutdown() to block ioctls from upperlayer
  1694. */
  1695. aac->adapter_shutdown = 0;
  1696. scsi_unblock_requests(shost);
  1697. return 0;
  1698. fail_device:
  1699. printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
  1700. scsi_host_put(shost);
  1701. pci_disable_device(pdev);
  1702. return -ENODEV;
  1703. }
  1704. #endif
  1705. static void aac_shutdown(struct pci_dev *dev)
  1706. {
  1707. struct Scsi_Host *shost = pci_get_drvdata(dev);
  1708. scsi_block_requests(shost);
  1709. __aac_shutdown((struct aac_dev *)shost->hostdata);
  1710. }
  1711. static void aac_remove_one(struct pci_dev *pdev)
  1712. {
  1713. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1714. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1715. aac_cancel_safw_rescan_worker(aac);
  1716. scsi_remove_host(shost);
  1717. __aac_shutdown(aac);
  1718. aac_fib_map_free(aac);
  1719. dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
  1720. aac->comm_phys);
  1721. kfree(aac->queues);
  1722. aac_adapter_ioremap(aac, 0);
  1723. kfree(aac->fibs);
  1724. kfree(aac->fsa_dev);
  1725. list_del(&aac->entry);
  1726. scsi_host_put(shost);
  1727. pci_disable_device(pdev);
  1728. if (list_empty(&aac_devices)) {
  1729. unregister_chrdev(aac_cfg_major, "aac");
  1730. aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
  1731. }
  1732. }
  1733. static void aac_flush_ios(struct aac_dev *aac)
  1734. {
  1735. int i;
  1736. struct scsi_cmnd *cmd;
  1737. for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
  1738. cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
  1739. if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
  1740. scsi_dma_unmap(cmd);
  1741. if (aac->handle_pci_error)
  1742. cmd->result = DID_NO_CONNECT << 16;
  1743. else
  1744. cmd->result = DID_RESET << 16;
  1745. cmd->scsi_done(cmd);
  1746. }
  1747. }
  1748. }
  1749. static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
  1750. enum pci_channel_state error)
  1751. {
  1752. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1753. struct aac_dev *aac = shost_priv(shost);
  1754. dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
  1755. switch (error) {
  1756. case pci_channel_io_normal:
  1757. return PCI_ERS_RESULT_CAN_RECOVER;
  1758. case pci_channel_io_frozen:
  1759. aac->handle_pci_error = 1;
  1760. scsi_block_requests(aac->scsi_host_ptr);
  1761. aac_cancel_safw_rescan_worker(aac);
  1762. aac_flush_ios(aac);
  1763. aac_release_resources(aac);
  1764. pci_disable_pcie_error_reporting(pdev);
  1765. aac_adapter_ioremap(aac, 0);
  1766. return PCI_ERS_RESULT_NEED_RESET;
  1767. case pci_channel_io_perm_failure:
  1768. aac->handle_pci_error = 1;
  1769. aac_flush_ios(aac);
  1770. return PCI_ERS_RESULT_DISCONNECT;
  1771. }
  1772. return PCI_ERS_RESULT_NEED_RESET;
  1773. }
  1774. static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
  1775. {
  1776. dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
  1777. return PCI_ERS_RESULT_NEED_RESET;
  1778. }
  1779. static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
  1780. {
  1781. dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
  1782. pci_restore_state(pdev);
  1783. if (pci_enable_device(pdev)) {
  1784. dev_warn(&pdev->dev,
  1785. "aacraid: failed to enable slave\n");
  1786. goto fail_device;
  1787. }
  1788. pci_set_master(pdev);
  1789. if (pci_enable_device_mem(pdev)) {
  1790. dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
  1791. goto fail_device;
  1792. }
  1793. return PCI_ERS_RESULT_RECOVERED;
  1794. fail_device:
  1795. dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
  1796. return PCI_ERS_RESULT_DISCONNECT;
  1797. }
  1798. static void aac_pci_resume(struct pci_dev *pdev)
  1799. {
  1800. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1801. struct scsi_device *sdev = NULL;
  1802. struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
  1803. pci_cleanup_aer_uncorrect_error_status(pdev);
  1804. if (aac_adapter_ioremap(aac, aac->base_size)) {
  1805. dev_err(&pdev->dev, "aacraid: ioremap failed\n");
  1806. /* remap failed, go back ... */
  1807. aac->comm_interface = AAC_COMM_PRODUCER;
  1808. if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
  1809. dev_warn(&pdev->dev,
  1810. "aacraid: unable to map adapter.\n");
  1811. return;
  1812. }
  1813. }
  1814. msleep(10000);
  1815. aac_acquire_resources(aac);
  1816. /*
  1817. * reset this flag to unblock ioctl() as it was set
  1818. * at aac_send_shutdown() to block ioctls from upperlayer
  1819. */
  1820. aac->adapter_shutdown = 0;
  1821. aac->handle_pci_error = 0;
  1822. shost_for_each_device(sdev, shost)
  1823. if (sdev->sdev_state == SDEV_OFFLINE)
  1824. sdev->sdev_state = SDEV_RUNNING;
  1825. scsi_unblock_requests(aac->scsi_host_ptr);
  1826. aac_scan_host(aac);
  1827. pci_save_state(pdev);
  1828. dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
  1829. }
  1830. static struct pci_error_handlers aac_pci_err_handler = {
  1831. .error_detected = aac_pci_error_detected,
  1832. .mmio_enabled = aac_pci_mmio_enabled,
  1833. .slot_reset = aac_pci_slot_reset,
  1834. .resume = aac_pci_resume,
  1835. };
  1836. static struct pci_driver aac_pci_driver = {
  1837. .name = AAC_DRIVERNAME,
  1838. .id_table = aac_pci_tbl,
  1839. .probe = aac_probe_one,
  1840. .remove = aac_remove_one,
  1841. #if (defined(CONFIG_PM))
  1842. .suspend = aac_suspend,
  1843. .resume = aac_resume,
  1844. #endif
  1845. .shutdown = aac_shutdown,
  1846. .err_handler = &aac_pci_err_handler,
  1847. };
  1848. static int __init aac_init(void)
  1849. {
  1850. int error;
  1851. printk(KERN_INFO "Adaptec %s driver %s\n",
  1852. AAC_DRIVERNAME, aac_driver_version);
  1853. error = pci_register_driver(&aac_pci_driver);
  1854. if (error < 0)
  1855. return error;
  1856. aac_init_char();
  1857. return 0;
  1858. }
  1859. static void __exit aac_exit(void)
  1860. {
  1861. if (aac_cfg_major > -1)
  1862. unregister_chrdev(aac_cfg_major, "aac");
  1863. pci_unregister_driver(&aac_pci_driver);
  1864. }
  1865. module_init(aac_init);
  1866. module_exit(aac_exit);