linit.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Adaptec AAC series RAID controller driver
  4. * (c) Copyright 2001 Red Hat Inc.
  5. *
  6. * based on the old aacraid driver that is..
  7. * Adaptec aacraid device driver for Linux.
  8. *
  9. * Copyright (c) 2000-2010 Adaptec, Inc.
  10. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12. *
  13. * Module Name:
  14. * linit.c
  15. *
  16. * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
  17. */
  18. #include <linux/compat.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/completion.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/pci.h>
  27. #include <linux/slab.h>
  28. #include <linux/mutex.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/syscalls.h>
  31. #include <linux/delay.h>
  32. #include <linux/kthread.h>
  33. #include <linux/msdos_partition.h>
  34. #include <scsi/scsi.h>
  35. #include <scsi/scsi_cmnd.h>
  36. #include <scsi/scsi_device.h>
  37. #include <scsi/scsi_host.h>
  38. #include <scsi/scsi_tcq.h>
  39. #include <scsi/scsicam.h>
  40. #include <scsi/scsi_eh.h>
  41. #include "aacraid.h"
  42. #define AAC_DRIVER_VERSION "1.2.1"
  43. #ifndef AAC_DRIVER_BRANCH
  44. #define AAC_DRIVER_BRANCH ""
  45. #endif
  46. #define AAC_DRIVERNAME "aacraid"
  47. #ifdef AAC_DRIVER_BUILD
  48. #define _str(x) #x
  49. #define str(x) _str(x)
  50. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
  51. #else
  52. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
  53. #endif
  54. MODULE_AUTHOR("Red Hat Inc and Adaptec");
  55. MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
  56. "Adaptec Advanced Raid Products, "
  57. "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
  58. MODULE_LICENSE("GPL");
  59. MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
  60. static DEFINE_MUTEX(aac_mutex);
  61. static LIST_HEAD(aac_devices);
  62. static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
  63. char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
  64. /*
  65. * Because of the way Linux names scsi devices, the order in this table has
  66. * become important. Check for on-board Raid first, add-in cards second.
  67. *
  68. * Note: The last field is used to index into aac_drivers below.
  69. */
  70. static const struct pci_device_id aac_pci_tbl[] = {
  71. { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
  72. { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
  73. { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
  74. { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  75. { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
  76. { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  77. { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  78. { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  79. { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  80. { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
  81. { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
  82. { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
  83. { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
  84. { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
  85. { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
  86. { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
  87. { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
  88. { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
  89. { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  90. { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  91. { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  92. { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
  93. { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
  94. { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
  95. { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
  96. { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
  97. { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
  98. { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
  99. { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
  100. { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
  101. { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
  102. { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
  103. { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
  104. { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
  105. { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
  106. { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
  107. { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  108. { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  109. { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  110. { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  111. { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  112. { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  113. { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  114. { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
  115. { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
  116. { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
  117. { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
  118. { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
  119. { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
  120. { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  121. { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
  122. { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
  123. { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
  124. { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
  125. { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
  126. { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
  127. { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
  128. { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
  129. { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
  130. { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
  131. { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
  132. { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
  133. { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
  134. { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
  135. { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
  136. { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
  137. { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
  138. { 0,}
  139. };
  140. MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
  141. /*
  142. * dmb - For now we add the number of channels to this structure.
  143. * In the future we should add a fib that reports the number of channels
  144. * for the card. At that time we can remove the channels from here
  145. */
  146. static struct aac_driver_ident aac_drivers[] = {
  147. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
  148. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
  149. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
  150. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  151. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
  152. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  153. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  154. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  155. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  156. { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
  157. { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
  158. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
  159. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
  160. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
  161. { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
  162. { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
  163. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
  164. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
  165. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  166. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  167. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  168. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
  169. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
  170. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
  171. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
  172. { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
  173. { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
  174. { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
  175. { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
  176. { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
  177. { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
  178. { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
  179. { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
  180. { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
  181. { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
  182. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  183. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  184. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  185. { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  186. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  187. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  188. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  189. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
  190. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
  191. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
  192. { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
  193. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
  194. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  195. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
  196. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
  197. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
  198. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
  199. { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
  200. { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  201. { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  202. { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
  203. { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
  204. { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
  205. { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
  206. { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
  207. { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
  208. { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
  209. { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
  210. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
  211. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
  212. };
  213. /**
  214. * aac_queuecommand - queue a SCSI command
  215. * @shost: Scsi host to queue command on
  216. * @cmd: SCSI command to queue
  217. *
  218. * Queues a command for execution by the associated Host Adapter.
  219. *
  220. * TODO: unify with aac_scsi_cmd().
  221. */
  222. static int aac_queuecommand(struct Scsi_Host *shost,
  223. struct scsi_cmnd *cmd)
  224. {
  225. aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
  226. return aac_scsi_cmd(cmd) ? FAILED : 0;
  227. }
  228. /**
  229. * aac_info - Returns the host adapter name
  230. * @shost: Scsi host to report on
  231. *
  232. * Returns a static string describing the device in question
  233. */
  234. static const char *aac_info(struct Scsi_Host *shost)
  235. {
  236. struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
  237. return aac_drivers[dev->cardtype].name;
  238. }
  239. /**
  240. * aac_get_driver_ident
  241. * @devtype: index into lookup table
  242. *
  243. * Returns a pointer to the entry in the driver lookup table.
  244. */
  245. struct aac_driver_ident* aac_get_driver_ident(int devtype)
  246. {
  247. return &aac_drivers[devtype];
  248. }
  249. /**
  250. * aac_biosparm - return BIOS parameters for disk
  251. * @sdev: The scsi device corresponding to the disk
  252. * @bdev: the block device corresponding to the disk
  253. * @capacity: the sector capacity of the disk
  254. * @geom: geometry block to fill in
  255. *
  256. * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
  257. * The default disk geometry is 64 heads, 32 sectors, and the appropriate
  258. * number of cylinders so as not to exceed drive capacity. In order for
  259. * disks equal to or larger than 1 GB to be addressable by the BIOS
  260. * without exceeding the BIOS limitation of 1024 cylinders, Extended
  261. * Translation should be enabled. With Extended Translation enabled,
  262. * drives between 1 GB inclusive and 2 GB exclusive are given a disk
  263. * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
  264. * are given a disk geometry of 255 heads and 63 sectors. However, if
  265. * the BIOS detects that the Extended Translation setting does not match
  266. * the geometry in the partition table, then the translation inferred
  267. * from the partition table will be used by the BIOS, and a warning may
  268. * be displayed.
  269. */
  270. static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
  271. sector_t capacity, int *geom)
  272. {
  273. struct diskparm *param = (struct diskparm *)geom;
  274. unsigned char *buf;
  275. dprintk((KERN_DEBUG "aac_biosparm.\n"));
  276. /*
  277. * Assuming extended translation is enabled - #REVISIT#
  278. */
  279. if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
  280. if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
  281. param->heads = 255;
  282. param->sectors = 63;
  283. } else {
  284. param->heads = 128;
  285. param->sectors = 32;
  286. }
  287. } else {
  288. param->heads = 64;
  289. param->sectors = 32;
  290. }
  291. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  292. /*
  293. * Read the first 1024 bytes from the disk device, if the boot
  294. * sector partition table is valid, search for a partition table
  295. * entry whose end_head matches one of the standard geometry
  296. * translations ( 64/32, 128/32, 255/63 ).
  297. */
  298. buf = scsi_bios_ptable(bdev);
  299. if (!buf)
  300. return 0;
  301. if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) {
  302. struct msdos_partition *first = (struct msdos_partition *)buf;
  303. struct msdos_partition *entry = first;
  304. int saved_cylinders = param->cylinders;
  305. int num;
  306. unsigned char end_head, end_sec;
  307. for(num = 0; num < 4; num++) {
  308. end_head = entry->end_head;
  309. end_sec = entry->end_sector & 0x3f;
  310. if(end_head == 63) {
  311. param->heads = 64;
  312. param->sectors = 32;
  313. break;
  314. } else if(end_head == 127) {
  315. param->heads = 128;
  316. param->sectors = 32;
  317. break;
  318. } else if(end_head == 254) {
  319. param->heads = 255;
  320. param->sectors = 63;
  321. break;
  322. }
  323. entry++;
  324. }
  325. if (num == 4) {
  326. end_head = first->end_head;
  327. end_sec = first->end_sector & 0x3f;
  328. }
  329. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  330. if (num < 4 && end_sec == param->sectors) {
  331. if (param->cylinders != saved_cylinders) {
  332. dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
  333. param->heads, param->sectors, num));
  334. }
  335. } else if (end_head > 0 || end_sec > 0) {
  336. dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
  337. end_head + 1, end_sec, num));
  338. dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  339. param->heads, param->sectors));
  340. }
  341. }
  342. kfree(buf);
  343. return 0;
  344. }
  345. /**
  346. * aac_slave_configure - compute queue depths
  347. * @sdev: SCSI device we are considering
  348. *
  349. * Selects queue depths for each target device based on the host adapter's
  350. * total capacity and the queue depth supported by the target device.
  351. * A queue depth of one automatically disables tagged queueing.
  352. */
  353. static int aac_slave_configure(struct scsi_device *sdev)
  354. {
  355. struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
  356. int chn, tid;
  357. unsigned int depth = 0;
  358. unsigned int set_timeout = 0;
  359. int timeout = 0;
  360. bool set_qd_dev_type = false;
  361. u8 devtype = 0;
  362. chn = aac_logical_to_phys(sdev_channel(sdev));
  363. tid = sdev_id(sdev);
  364. if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
  365. devtype = aac->hba_map[chn][tid].devtype;
  366. if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
  367. depth = aac->hba_map[chn][tid].qd_limit;
  368. set_timeout = 1;
  369. goto common_config;
  370. }
  371. if (devtype == AAC_DEVTYPE_ARC_RAW) {
  372. set_qd_dev_type = true;
  373. set_timeout = 1;
  374. goto common_config;
  375. }
  376. }
  377. if (aac->jbod && (sdev->type == TYPE_DISK))
  378. sdev->removable = 1;
  379. if (sdev->type == TYPE_DISK
  380. && sdev_channel(sdev) != CONTAINER_CHANNEL
  381. && (!aac->jbod || sdev->inq_periph_qual)
  382. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
  383. if (expose_physicals == 0)
  384. return -ENXIO;
  385. if (expose_physicals < 0)
  386. sdev->no_uld_attach = 1;
  387. }
  388. if (sdev->tagged_supported
  389. && sdev->type == TYPE_DISK
  390. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
  391. && !sdev->no_uld_attach) {
  392. struct scsi_device * dev;
  393. struct Scsi_Host *host = sdev->host;
  394. unsigned num_lsu = 0;
  395. unsigned num_one = 0;
  396. unsigned cid;
  397. set_timeout = 1;
  398. for (cid = 0; cid < aac->maximum_num_containers; ++cid)
  399. if (aac->fsa_dev[cid].valid)
  400. ++num_lsu;
  401. __shost_for_each_device(dev, host) {
  402. if (dev->tagged_supported
  403. && dev->type == TYPE_DISK
  404. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
  405. && !dev->no_uld_attach) {
  406. if ((sdev_channel(dev) != CONTAINER_CHANNEL)
  407. || !aac->fsa_dev[sdev_id(dev)].valid) {
  408. ++num_lsu;
  409. }
  410. } else {
  411. ++num_one;
  412. }
  413. }
  414. if (num_lsu == 0)
  415. ++num_lsu;
  416. depth = (host->can_queue - num_one) / num_lsu;
  417. if (sdev_channel(sdev) != NATIVE_CHANNEL)
  418. goto common_config;
  419. set_qd_dev_type = true;
  420. }
  421. common_config:
  422. /*
  423. * Check if SATA drive
  424. */
  425. if (set_qd_dev_type) {
  426. if (strncmp(sdev->vendor, "ATA", 3) == 0)
  427. depth = 32;
  428. else
  429. depth = 64;
  430. }
  431. /*
  432. * Firmware has an individual device recovery time typically
  433. * of 35 seconds, give us a margin. Thor devices can take longer in
  434. * error recovery, hence different value.
  435. */
  436. if (set_timeout) {
  437. timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
  438. blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
  439. }
  440. if (depth > 256)
  441. depth = 256;
  442. else if (depth < 1)
  443. depth = 1;
  444. scsi_change_queue_depth(sdev, depth);
  445. sdev->tagged_supported = 1;
  446. return 0;
  447. }
  448. /**
  449. * aac_change_queue_depth - alter queue depths
  450. * @sdev: SCSI device we are considering
  451. * @depth: desired queue depth
  452. *
  453. * Alters queue depths for target device based on the host adapter's
  454. * total capacity and the queue depth supported by the target device.
  455. */
  456. static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
  457. {
  458. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  459. int chn, tid, is_native_device = 0;
  460. chn = aac_logical_to_phys(sdev_channel(sdev));
  461. tid = sdev_id(sdev);
  462. if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
  463. aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
  464. is_native_device = 1;
  465. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  466. (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
  467. struct scsi_device * dev;
  468. struct Scsi_Host *host = sdev->host;
  469. unsigned num = 0;
  470. __shost_for_each_device(dev, host) {
  471. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  472. (sdev_channel(dev) == CONTAINER_CHANNEL))
  473. ++num;
  474. ++num;
  475. }
  476. if (num >= host->can_queue)
  477. num = host->can_queue - 1;
  478. if (depth > (host->can_queue - num))
  479. depth = host->can_queue - num;
  480. if (depth > 256)
  481. depth = 256;
  482. else if (depth < 2)
  483. depth = 2;
  484. return scsi_change_queue_depth(sdev, depth);
  485. } else if (is_native_device) {
  486. scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
  487. } else {
  488. scsi_change_queue_depth(sdev, 1);
  489. }
  490. return sdev->queue_depth;
  491. }
  492. static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
  493. {
  494. struct scsi_device *sdev = to_scsi_device(dev);
  495. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  496. if (sdev_channel(sdev) != CONTAINER_CHANNEL)
  497. return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
  498. ? "Hidden\n" :
  499. ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
  500. return snprintf(buf, PAGE_SIZE, "%s\n",
  501. get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
  502. }
  503. static struct device_attribute aac_raid_level_attr = {
  504. .attr = {
  505. .name = "level",
  506. .mode = S_IRUGO,
  507. },
  508. .show = aac_show_raid_level
  509. };
  510. static ssize_t aac_show_unique_id(struct device *dev,
  511. struct device_attribute *attr, char *buf)
  512. {
  513. struct scsi_device *sdev = to_scsi_device(dev);
  514. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  515. unsigned char sn[16];
  516. memset(sn, 0, sizeof(sn));
  517. if (sdev_channel(sdev) == CONTAINER_CHANNEL)
  518. memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
  519. return snprintf(buf, 16 * 2 + 2,
  520. "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
  521. sn[0], sn[1], sn[2], sn[3],
  522. sn[4], sn[5], sn[6], sn[7],
  523. sn[8], sn[9], sn[10], sn[11],
  524. sn[12], sn[13], sn[14], sn[15]);
  525. }
  526. static struct device_attribute aac_unique_id_attr = {
  527. .attr = {
  528. .name = "unique_id",
  529. .mode = 0444,
  530. },
  531. .show = aac_show_unique_id
  532. };
  533. static struct attribute *aac_dev_attrs[] = {
  534. &aac_raid_level_attr.attr,
  535. &aac_unique_id_attr.attr,
  536. NULL,
  537. };
  538. ATTRIBUTE_GROUPS(aac_dev);
  539. static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
  540. void __user *arg)
  541. {
  542. int retval;
  543. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  544. if (!capable(CAP_SYS_RAWIO))
  545. return -EPERM;
  546. retval = aac_adapter_check_health(dev);
  547. if (retval)
  548. return -EBUSY;
  549. return aac_do_ioctl(dev, cmd, arg);
  550. }
  551. struct fib_count_data {
  552. int mlcnt;
  553. int llcnt;
  554. int ehcnt;
  555. int fwcnt;
  556. int krlcnt;
  557. };
  558. static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data)
  559. {
  560. struct fib_count_data *fib_count = data;
  561. switch (aac_priv(scmnd)->owner) {
  562. case AAC_OWNER_FIRMWARE:
  563. fib_count->fwcnt++;
  564. break;
  565. case AAC_OWNER_ERROR_HANDLER:
  566. fib_count->ehcnt++;
  567. break;
  568. case AAC_OWNER_LOWLEVEL:
  569. fib_count->llcnt++;
  570. break;
  571. case AAC_OWNER_MIDLEVEL:
  572. fib_count->mlcnt++;
  573. break;
  574. default:
  575. fib_count->krlcnt++;
  576. break;
  577. }
  578. return true;
  579. }
  580. /* Called during SCSI EH, so we don't need to block requests */
  581. static int get_num_of_incomplete_fibs(struct aac_dev *aac)
  582. {
  583. struct Scsi_Host *shost = aac->scsi_host_ptr;
  584. struct device *ctrl_dev;
  585. struct fib_count_data fcnt = { };
  586. scsi_host_busy_iter(shost, fib_count_iter, &fcnt);
  587. ctrl_dev = &aac->pdev->dev;
  588. dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt);
  589. dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt);
  590. dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt);
  591. dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt);
  592. dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt);
  593. return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt;
  594. }
  595. static int aac_eh_abort(struct scsi_cmnd* cmd)
  596. {
  597. struct aac_cmd_priv *cmd_priv = aac_priv(cmd);
  598. struct scsi_device * dev = cmd->device;
  599. struct Scsi_Host * host = dev->host;
  600. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  601. int count, found;
  602. u32 bus, cid;
  603. int ret = FAILED;
  604. if (aac_adapter_check_health(aac))
  605. return ret;
  606. bus = aac_logical_to_phys(scmd_channel(cmd));
  607. cid = scmd_id(cmd);
  608. if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
  609. struct fib *fib;
  610. struct aac_hba_tm_req *tmf;
  611. int status;
  612. u64 address;
  613. pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
  614. AAC_DRIVERNAME,
  615. host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
  616. found = 0;
  617. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  618. fib = &aac->fibs[count];
  619. if (*(u8 *)fib->hw_fib_va != 0 &&
  620. (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
  621. (fib->callback_data == cmd)) {
  622. found = 1;
  623. break;
  624. }
  625. }
  626. if (!found)
  627. return ret;
  628. /* start a HBA_TMF_ABORT_TASK TMF request */
  629. fib = aac_fib_alloc(aac);
  630. if (!fib)
  631. return ret;
  632. tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
  633. memset(tmf, 0, sizeof(*tmf));
  634. tmf->tmf = HBA_TMF_ABORT_TASK;
  635. tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
  636. tmf->lun[1] = cmd->device->lun;
  637. address = (u64)fib->hw_error_pa;
  638. tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  639. tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  640. tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  641. fib->hbacmd_size = sizeof(*tmf);
  642. cmd_priv->sent_command = 0;
  643. status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
  644. (fib_callback) aac_hba_callback,
  645. (void *) cmd);
  646. if (status != -EINPROGRESS) {
  647. aac_fib_complete(fib);
  648. aac_fib_free(fib);
  649. return ret;
  650. }
  651. /* Wait up to 15 secs for completion */
  652. for (count = 0; count < 15; ++count) {
  653. if (cmd_priv->sent_command) {
  654. ret = SUCCESS;
  655. break;
  656. }
  657. msleep(1000);
  658. }
  659. if (ret != SUCCESS)
  660. pr_err("%s: Host adapter abort request timed out\n",
  661. AAC_DRIVERNAME);
  662. } else {
  663. pr_err(
  664. "%s: Host adapter abort request.\n"
  665. "%s: Outstanding commands on (%d,%d,%d,%d):\n",
  666. AAC_DRIVERNAME, AAC_DRIVERNAME,
  667. host->host_no, sdev_channel(dev), sdev_id(dev),
  668. (int)dev->lun);
  669. switch (cmd->cmnd[0]) {
  670. case SERVICE_ACTION_IN_16:
  671. if (!(aac->raw_io_interface) ||
  672. !(aac->raw_io_64) ||
  673. ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
  674. break;
  675. fallthrough;
  676. case INQUIRY:
  677. case READ_CAPACITY:
  678. /*
  679. * Mark associated FIB to not complete,
  680. * eh handler does this
  681. */
  682. for (count = 0;
  683. count < (host->can_queue + AAC_NUM_MGT_FIB);
  684. ++count) {
  685. struct fib *fib = &aac->fibs[count];
  686. if (fib->hw_fib_va->header.XferState &&
  687. (fib->flags & FIB_CONTEXT_FLAG) &&
  688. (fib->callback_data == cmd)) {
  689. fib->flags |=
  690. FIB_CONTEXT_FLAG_TIMED_OUT;
  691. cmd_priv->owner =
  692. AAC_OWNER_ERROR_HANDLER;
  693. ret = SUCCESS;
  694. }
  695. }
  696. break;
  697. case TEST_UNIT_READY:
  698. /*
  699. * Mark associated FIB to not complete,
  700. * eh handler does this
  701. */
  702. for (count = 0;
  703. count < (host->can_queue + AAC_NUM_MGT_FIB);
  704. ++count) {
  705. struct scsi_cmnd *command;
  706. struct fib *fib = &aac->fibs[count];
  707. command = fib->callback_data;
  708. if ((fib->hw_fib_va->header.XferState &
  709. cpu_to_le32
  710. (Async | NoResponseExpected)) &&
  711. (fib->flags & FIB_CONTEXT_FLAG) &&
  712. ((command)) &&
  713. (command->device == cmd->device)) {
  714. fib->flags |=
  715. FIB_CONTEXT_FLAG_TIMED_OUT;
  716. aac_priv(command)->owner =
  717. AAC_OWNER_ERROR_HANDLER;
  718. if (command == cmd)
  719. ret = SUCCESS;
  720. }
  721. }
  722. break;
  723. }
  724. }
  725. return ret;
  726. }
  727. static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
  728. struct fib *fib, u64 tmf_lun)
  729. {
  730. struct aac_hba_tm_req *tmf;
  731. u64 address;
  732. /* start a HBA_TMF_LUN_RESET TMF request */
  733. tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
  734. memset(tmf, 0, sizeof(*tmf));
  735. tmf->tmf = HBA_TMF_LUN_RESET;
  736. tmf->it_nexus = info->rmw_nexus;
  737. int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
  738. address = (u64)fib->hw_error_pa;
  739. tmf->error_ptr_hi = cpu_to_le32
  740. ((u32)(address >> 32));
  741. tmf->error_ptr_lo = cpu_to_le32
  742. ((u32)(address & 0xffffffff));
  743. tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  744. fib->hbacmd_size = sizeof(*tmf);
  745. return HBA_IU_TYPE_SCSI_TM_REQ;
  746. }
  747. static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
  748. struct fib *fib)
  749. {
  750. struct aac_hba_reset_req *rst;
  751. u64 address;
  752. /* already tried, start a hard reset now */
  753. rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
  754. memset(rst, 0, sizeof(*rst));
  755. rst->it_nexus = info->rmw_nexus;
  756. address = (u64)fib->hw_error_pa;
  757. rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  758. rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  759. rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  760. fib->hbacmd_size = sizeof(*rst);
  761. return HBA_IU_TYPE_SATA_REQ;
  762. }
  763. static void aac_tmf_callback(void *context, struct fib *fibptr)
  764. {
  765. struct aac_hba_resp *err =
  766. &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
  767. struct aac_hba_map_info *info = context;
  768. int res;
  769. switch (err->service_response) {
  770. case HBA_RESP_SVCRES_TMF_REJECTED:
  771. res = -1;
  772. break;
  773. case HBA_RESP_SVCRES_TMF_LUN_INVALID:
  774. res = 0;
  775. break;
  776. case HBA_RESP_SVCRES_TMF_COMPLETE:
  777. case HBA_RESP_SVCRES_TMF_SUCCEEDED:
  778. res = 0;
  779. break;
  780. default:
  781. res = -2;
  782. break;
  783. }
  784. aac_fib_complete(fibptr);
  785. info->reset_state = res;
  786. }
  787. /*
  788. * aac_eh_dev_reset - Device reset command handling
  789. * @scsi_cmd: SCSI command block causing the reset
  790. *
  791. */
  792. static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
  793. {
  794. struct scsi_device * dev = cmd->device;
  795. struct Scsi_Host * host = dev->host;
  796. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  797. struct aac_hba_map_info *info;
  798. int count;
  799. u32 bus, cid;
  800. struct fib *fib;
  801. int ret = FAILED;
  802. int status;
  803. u8 command;
  804. bus = aac_logical_to_phys(scmd_channel(cmd));
  805. cid = scmd_id(cmd);
  806. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
  807. return FAILED;
  808. info = &aac->hba_map[bus][cid];
  809. if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
  810. !(info->reset_state > 0)))
  811. return FAILED;
  812. pr_err("%s: Host device reset request. SCSI hang ?\n",
  813. AAC_DRIVERNAME);
  814. fib = aac_fib_alloc(aac);
  815. if (!fib)
  816. return ret;
  817. /* start a HBA_TMF_LUN_RESET TMF request */
  818. command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
  819. info->reset_state = 1;
  820. status = aac_hba_send(command, fib,
  821. (fib_callback) aac_tmf_callback,
  822. (void *) info);
  823. if (status != -EINPROGRESS) {
  824. info->reset_state = 0;
  825. aac_fib_complete(fib);
  826. aac_fib_free(fib);
  827. return ret;
  828. }
  829. /* Wait up to 15 seconds for completion */
  830. for (count = 0; count < 15; ++count) {
  831. if (info->reset_state == 0) {
  832. ret = info->reset_state == 0 ? SUCCESS : FAILED;
  833. break;
  834. }
  835. msleep(1000);
  836. }
  837. return ret;
  838. }
  839. /*
  840. * aac_eh_target_reset - Target reset command handling
  841. * @scsi_cmd: SCSI command block causing the reset
  842. *
  843. */
  844. static int aac_eh_target_reset(struct scsi_cmnd *cmd)
  845. {
  846. struct scsi_device * dev = cmd->device;
  847. struct Scsi_Host * host = dev->host;
  848. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  849. struct aac_hba_map_info *info;
  850. int count;
  851. u32 bus, cid;
  852. int ret = FAILED;
  853. struct fib *fib;
  854. int status;
  855. u8 command;
  856. bus = aac_logical_to_phys(scmd_channel(cmd));
  857. cid = scmd_id(cmd);
  858. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
  859. return FAILED;
  860. info = &aac->hba_map[bus][cid];
  861. if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
  862. !(info->reset_state > 0)))
  863. return FAILED;
  864. pr_err("%s: Host target reset request. SCSI hang ?\n",
  865. AAC_DRIVERNAME);
  866. fib = aac_fib_alloc(aac);
  867. if (!fib)
  868. return ret;
  869. /* already tried, start a hard reset now */
  870. command = aac_eh_tmf_hard_reset_fib(info, fib);
  871. info->reset_state = 2;
  872. status = aac_hba_send(command, fib,
  873. (fib_callback) aac_tmf_callback,
  874. (void *) info);
  875. if (status != -EINPROGRESS) {
  876. info->reset_state = 0;
  877. aac_fib_complete(fib);
  878. aac_fib_free(fib);
  879. return ret;
  880. }
  881. /* Wait up to 15 seconds for completion */
  882. for (count = 0; count < 15; ++count) {
  883. if (info->reset_state <= 0) {
  884. ret = info->reset_state == 0 ? SUCCESS : FAILED;
  885. break;
  886. }
  887. msleep(1000);
  888. }
  889. return ret;
  890. }
  891. /*
  892. * aac_eh_bus_reset - Bus reset command handling
  893. * @scsi_cmd: SCSI command block causing the reset
  894. *
  895. */
  896. static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
  897. {
  898. struct scsi_device * dev = cmd->device;
  899. struct Scsi_Host * host = dev->host;
  900. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  901. int count;
  902. u32 cmd_bus;
  903. int status = 0;
  904. cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
  905. /* Mark the assoc. FIB to not complete, eh handler does this */
  906. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  907. struct fib *fib = &aac->fibs[count];
  908. if (fib->hw_fib_va->header.XferState &&
  909. (fib->flags & FIB_CONTEXT_FLAG) &&
  910. (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
  911. struct aac_hba_map_info *info;
  912. u32 bus, cid;
  913. cmd = (struct scsi_cmnd *)fib->callback_data;
  914. bus = aac_logical_to_phys(scmd_channel(cmd));
  915. if (bus != cmd_bus)
  916. continue;
  917. cid = scmd_id(cmd);
  918. info = &aac->hba_map[bus][cid];
  919. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
  920. info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
  921. fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
  922. aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER;
  923. }
  924. }
  925. }
  926. pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
  927. /*
  928. * Check the health of the controller
  929. */
  930. status = aac_adapter_check_health(aac);
  931. if (status)
  932. dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
  933. count = get_num_of_incomplete_fibs(aac);
  934. return (count == 0) ? SUCCESS : FAILED;
  935. }
  936. /*
  937. * aac_eh_host_reset - Host reset command handling
  938. * @scsi_cmd: SCSI command block causing the reset
  939. *
  940. */
  941. static int aac_eh_host_reset(struct scsi_cmnd *cmd)
  942. {
  943. struct scsi_device * dev = cmd->device;
  944. struct Scsi_Host * host = dev->host;
  945. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  946. int ret = FAILED;
  947. __le32 supported_options2 = 0;
  948. bool is_mu_reset;
  949. bool is_ignore_reset;
  950. bool is_doorbell_reset;
  951. /*
  952. * Check if reset is supported by the firmware
  953. */
  954. supported_options2 = aac->supplement_adapter_info.supported_options2;
  955. is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
  956. is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
  957. is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
  958. /*
  959. * This adapter needs a blind reset, only do so for
  960. * Adapters that support a register, instead of a commanded,
  961. * reset.
  962. */
  963. if ((is_mu_reset || is_doorbell_reset)
  964. && aac_check_reset
  965. && (aac_check_reset != -1 || !is_ignore_reset)) {
  966. /* Bypass wait for command quiesce */
  967. if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
  968. ret = SUCCESS;
  969. }
  970. /*
  971. * Reset EH state
  972. */
  973. if (ret == SUCCESS) {
  974. int bus, cid;
  975. struct aac_hba_map_info *info;
  976. for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
  977. for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
  978. info = &aac->hba_map[bus][cid];
  979. if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
  980. info->reset_state = 0;
  981. }
  982. }
  983. }
  984. return ret;
  985. }
  986. /**
  987. * aac_cfg_open - open a configuration file
  988. * @inode: inode being opened
  989. * @file: file handle attached
  990. *
  991. * Called when the configuration device is opened. Does the needed
  992. * set up on the handle and then returns
  993. *
  994. * Bugs: This needs extending to check a given adapter is present
  995. * so we can support hot plugging, and to ref count adapters.
  996. */
  997. static int aac_cfg_open(struct inode *inode, struct file *file)
  998. {
  999. struct aac_dev *aac;
  1000. unsigned minor_number = iminor(inode);
  1001. int err = -ENODEV;
  1002. mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
  1003. list_for_each_entry(aac, &aac_devices, entry) {
  1004. if (aac->id == minor_number) {
  1005. file->private_data = aac;
  1006. err = 0;
  1007. break;
  1008. }
  1009. }
  1010. mutex_unlock(&aac_mutex);
  1011. return err;
  1012. }
  1013. /**
  1014. * aac_cfg_ioctl - AAC configuration request
  1015. * @file: file handle
  1016. * @cmd: ioctl command code
  1017. * @arg: argument
  1018. *
  1019. * Handles a configuration ioctl. Currently this involves wrapping it
  1020. * up and feeding it into the nasty windowsalike glue layer.
  1021. *
  1022. * Bugs: Needs locking against parallel ioctls lower down
  1023. * Bugs: Needs to handle hot plugging
  1024. */
  1025. static long aac_cfg_ioctl(struct file *file,
  1026. unsigned int cmd, unsigned long arg)
  1027. {
  1028. struct aac_dev *aac = (struct aac_dev *)file->private_data;
  1029. if (!capable(CAP_SYS_RAWIO))
  1030. return -EPERM;
  1031. return aac_do_ioctl(aac, cmd, (void __user *)arg);
  1032. }
  1033. static ssize_t aac_show_model(struct device *device,
  1034. struct device_attribute *attr, char *buf)
  1035. {
  1036. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1037. int len;
  1038. if (dev->supplement_adapter_info.adapter_type_text[0]) {
  1039. char *cp = dev->supplement_adapter_info.adapter_type_text;
  1040. while (*cp && *cp != ' ')
  1041. ++cp;
  1042. while (*cp == ' ')
  1043. ++cp;
  1044. len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
  1045. } else
  1046. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1047. aac_drivers[dev->cardtype].model);
  1048. return len;
  1049. }
  1050. static ssize_t aac_show_vendor(struct device *device,
  1051. struct device_attribute *attr, char *buf)
  1052. {
  1053. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1054. struct aac_supplement_adapter_info *sup_adap_info;
  1055. int len;
  1056. sup_adap_info = &dev->supplement_adapter_info;
  1057. if (sup_adap_info->adapter_type_text[0]) {
  1058. char *cp = sup_adap_info->adapter_type_text;
  1059. while (*cp && *cp != ' ')
  1060. ++cp;
  1061. len = snprintf(buf, PAGE_SIZE, "%.*s\n",
  1062. (int)(cp - (char *)sup_adap_info->adapter_type_text),
  1063. sup_adap_info->adapter_type_text);
  1064. } else
  1065. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1066. aac_drivers[dev->cardtype].vname);
  1067. return len;
  1068. }
  1069. static ssize_t aac_show_flags(struct device *cdev,
  1070. struct device_attribute *attr, char *buf)
  1071. {
  1072. int len = 0;
  1073. struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
  1074. if (nblank(dprintk(x)))
  1075. len = snprintf(buf, PAGE_SIZE, "dprintk\n");
  1076. #ifdef AAC_DETAILED_STATUS_INFO
  1077. len += scnprintf(buf + len, PAGE_SIZE - len,
  1078. "AAC_DETAILED_STATUS_INFO\n");
  1079. #endif
  1080. if (dev->raw_io_interface && dev->raw_io_64)
  1081. len += scnprintf(buf + len, PAGE_SIZE - len,
  1082. "SAI_READ_CAPACITY_16\n");
  1083. if (dev->jbod)
  1084. len += scnprintf(buf + len, PAGE_SIZE - len,
  1085. "SUPPORTED_JBOD\n");
  1086. if (dev->supplement_adapter_info.supported_options2 &
  1087. AAC_OPTION_POWER_MANAGEMENT)
  1088. len += scnprintf(buf + len, PAGE_SIZE - len,
  1089. "SUPPORTED_POWER_MANAGEMENT\n");
  1090. if (dev->msi)
  1091. len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
  1092. return len;
  1093. }
  1094. static ssize_t aac_show_kernel_version(struct device *device,
  1095. struct device_attribute *attr,
  1096. char *buf)
  1097. {
  1098. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1099. int len, tmp;
  1100. tmp = le32_to_cpu(dev->adapter_info.kernelrev);
  1101. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1102. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1103. le32_to_cpu(dev->adapter_info.kernelbuild));
  1104. return len;
  1105. }
  1106. static ssize_t aac_show_monitor_version(struct device *device,
  1107. struct device_attribute *attr,
  1108. char *buf)
  1109. {
  1110. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1111. int len, tmp;
  1112. tmp = le32_to_cpu(dev->adapter_info.monitorrev);
  1113. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1114. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1115. le32_to_cpu(dev->adapter_info.monitorbuild));
  1116. return len;
  1117. }
  1118. static ssize_t aac_show_bios_version(struct device *device,
  1119. struct device_attribute *attr,
  1120. char *buf)
  1121. {
  1122. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1123. int len, tmp;
  1124. tmp = le32_to_cpu(dev->adapter_info.biosrev);
  1125. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1126. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1127. le32_to_cpu(dev->adapter_info.biosbuild));
  1128. return len;
  1129. }
  1130. static ssize_t aac_show_driver_version(struct device *device,
  1131. struct device_attribute *attr,
  1132. char *buf)
  1133. {
  1134. return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
  1135. }
  1136. static ssize_t aac_show_serial_number(struct device *device,
  1137. struct device_attribute *attr, char *buf)
  1138. {
  1139. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1140. int len = 0;
  1141. if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
  1142. len = snprintf(buf, 16, "%06X\n",
  1143. le32_to_cpu(dev->adapter_info.serial[0]));
  1144. if (len &&
  1145. !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
  1146. sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
  1147. buf, len-1))
  1148. len = snprintf(buf, 16, "%.*s\n",
  1149. (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
  1150. dev->supplement_adapter_info.mfg_pcba_serial_no);
  1151. return min(len, 16);
  1152. }
  1153. static ssize_t aac_show_max_channel(struct device *device,
  1154. struct device_attribute *attr, char *buf)
  1155. {
  1156. return snprintf(buf, PAGE_SIZE, "%d\n",
  1157. class_to_shost(device)->max_channel);
  1158. }
  1159. static ssize_t aac_show_max_id(struct device *device,
  1160. struct device_attribute *attr, char *buf)
  1161. {
  1162. return snprintf(buf, PAGE_SIZE, "%d\n",
  1163. class_to_shost(device)->max_id);
  1164. }
  1165. static ssize_t aac_store_reset_adapter(struct device *device,
  1166. struct device_attribute *attr,
  1167. const char *buf, size_t count)
  1168. {
  1169. int retval = -EACCES;
  1170. if (!capable(CAP_SYS_ADMIN))
  1171. return retval;
  1172. retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
  1173. buf[0] == '!', IOP_HWSOFT_RESET);
  1174. if (retval >= 0)
  1175. retval = count;
  1176. return retval;
  1177. }
  1178. static ssize_t aac_show_reset_adapter(struct device *device,
  1179. struct device_attribute *attr,
  1180. char *buf)
  1181. {
  1182. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1183. int len, tmp;
  1184. tmp = aac_adapter_check_health(dev);
  1185. if ((tmp == 0) && dev->in_reset)
  1186. tmp = -EBUSY;
  1187. len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
  1188. return len;
  1189. }
  1190. static struct device_attribute aac_model = {
  1191. .attr = {
  1192. .name = "model",
  1193. .mode = S_IRUGO,
  1194. },
  1195. .show = aac_show_model,
  1196. };
  1197. static struct device_attribute aac_vendor = {
  1198. .attr = {
  1199. .name = "vendor",
  1200. .mode = S_IRUGO,
  1201. },
  1202. .show = aac_show_vendor,
  1203. };
  1204. static struct device_attribute aac_flags = {
  1205. .attr = {
  1206. .name = "flags",
  1207. .mode = S_IRUGO,
  1208. },
  1209. .show = aac_show_flags,
  1210. };
  1211. static struct device_attribute aac_kernel_version = {
  1212. .attr = {
  1213. .name = "hba_kernel_version",
  1214. .mode = S_IRUGO,
  1215. },
  1216. .show = aac_show_kernel_version,
  1217. };
  1218. static struct device_attribute aac_monitor_version = {
  1219. .attr = {
  1220. .name = "hba_monitor_version",
  1221. .mode = S_IRUGO,
  1222. },
  1223. .show = aac_show_monitor_version,
  1224. };
  1225. static struct device_attribute aac_bios_version = {
  1226. .attr = {
  1227. .name = "hba_bios_version",
  1228. .mode = S_IRUGO,
  1229. },
  1230. .show = aac_show_bios_version,
  1231. };
  1232. static struct device_attribute aac_lld_version = {
  1233. .attr = {
  1234. .name = "driver_version",
  1235. .mode = 0444,
  1236. },
  1237. .show = aac_show_driver_version,
  1238. };
  1239. static struct device_attribute aac_serial_number = {
  1240. .attr = {
  1241. .name = "serial_number",
  1242. .mode = S_IRUGO,
  1243. },
  1244. .show = aac_show_serial_number,
  1245. };
  1246. static struct device_attribute aac_max_channel = {
  1247. .attr = {
  1248. .name = "max_channel",
  1249. .mode = S_IRUGO,
  1250. },
  1251. .show = aac_show_max_channel,
  1252. };
  1253. static struct device_attribute aac_max_id = {
  1254. .attr = {
  1255. .name = "max_id",
  1256. .mode = S_IRUGO,
  1257. },
  1258. .show = aac_show_max_id,
  1259. };
  1260. static struct device_attribute aac_reset = {
  1261. .attr = {
  1262. .name = "reset_host",
  1263. .mode = S_IWUSR|S_IRUGO,
  1264. },
  1265. .store = aac_store_reset_adapter,
  1266. .show = aac_show_reset_adapter,
  1267. };
  1268. static struct attribute *aac_host_attrs[] = {
  1269. &aac_model.attr,
  1270. &aac_vendor.attr,
  1271. &aac_flags.attr,
  1272. &aac_kernel_version.attr,
  1273. &aac_monitor_version.attr,
  1274. &aac_bios_version.attr,
  1275. &aac_lld_version.attr,
  1276. &aac_serial_number.attr,
  1277. &aac_max_channel.attr,
  1278. &aac_max_id.attr,
  1279. &aac_reset.attr,
  1280. NULL
  1281. };
  1282. ATTRIBUTE_GROUPS(aac_host);
  1283. ssize_t aac_get_serial_number(struct device *device, char *buf)
  1284. {
  1285. return aac_show_serial_number(device, &aac_serial_number, buf);
  1286. }
  1287. static const struct file_operations aac_cfg_fops = {
  1288. .owner = THIS_MODULE,
  1289. .unlocked_ioctl = aac_cfg_ioctl,
  1290. #ifdef CONFIG_COMPAT
  1291. .compat_ioctl = aac_cfg_ioctl,
  1292. #endif
  1293. .open = aac_cfg_open,
  1294. .llseek = noop_llseek,
  1295. };
  1296. static const struct scsi_host_template aac_driver_template = {
  1297. .module = THIS_MODULE,
  1298. .name = "AAC",
  1299. .proc_name = AAC_DRIVERNAME,
  1300. .info = aac_info,
  1301. .ioctl = aac_ioctl,
  1302. #ifdef CONFIG_COMPAT
  1303. .compat_ioctl = aac_ioctl,
  1304. #endif
  1305. .queuecommand = aac_queuecommand,
  1306. .bios_param = aac_biosparm,
  1307. .shost_groups = aac_host_groups,
  1308. .slave_configure = aac_slave_configure,
  1309. .change_queue_depth = aac_change_queue_depth,
  1310. .sdev_groups = aac_dev_groups,
  1311. .eh_abort_handler = aac_eh_abort,
  1312. .eh_device_reset_handler = aac_eh_dev_reset,
  1313. .eh_target_reset_handler = aac_eh_target_reset,
  1314. .eh_bus_reset_handler = aac_eh_bus_reset,
  1315. .eh_host_reset_handler = aac_eh_host_reset,
  1316. .can_queue = AAC_NUM_IO_FIB,
  1317. .this_id = MAXIMUM_NUM_CONTAINERS,
  1318. .sg_tablesize = 16,
  1319. .max_sectors = 128,
  1320. #if (AAC_NUM_IO_FIB > 256)
  1321. .cmd_per_lun = 256,
  1322. #else
  1323. .cmd_per_lun = AAC_NUM_IO_FIB,
  1324. #endif
  1325. .emulated = 1,
  1326. .no_write_same = 1,
  1327. .cmd_size = sizeof(struct aac_cmd_priv),
  1328. };
  1329. static void __aac_shutdown(struct aac_dev * aac)
  1330. {
  1331. int i;
  1332. mutex_lock(&aac->ioctl_mutex);
  1333. aac->adapter_shutdown = 1;
  1334. mutex_unlock(&aac->ioctl_mutex);
  1335. if (aac->aif_thread) {
  1336. int i;
  1337. /* Clear out events first */
  1338. for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
  1339. struct fib *fib = &aac->fibs[i];
  1340. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  1341. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
  1342. complete(&fib->event_wait);
  1343. }
  1344. kthread_stop(aac->thread);
  1345. aac->thread = NULL;
  1346. }
  1347. aac_send_shutdown(aac);
  1348. aac_adapter_disable_int(aac);
  1349. if (aac_is_src(aac)) {
  1350. if (aac->max_msix > 1) {
  1351. for (i = 0; i < aac->max_msix; i++) {
  1352. free_irq(pci_irq_vector(aac->pdev, i),
  1353. &(aac->aac_msix[i]));
  1354. }
  1355. } else {
  1356. free_irq(aac->pdev->irq,
  1357. &(aac->aac_msix[0]));
  1358. }
  1359. } else {
  1360. free_irq(aac->pdev->irq, aac);
  1361. }
  1362. if (aac->msi)
  1363. pci_disable_msi(aac->pdev);
  1364. else if (aac->max_msix > 1)
  1365. pci_disable_msix(aac->pdev);
  1366. }
  1367. static void aac_init_char(void)
  1368. {
  1369. aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
  1370. if (aac_cfg_major < 0) {
  1371. pr_err("aacraid: unable to register \"aac\" device.\n");
  1372. }
  1373. }
  1374. void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
  1375. {
  1376. /*
  1377. * Firmware may send a AIF messages very early and the Driver may have
  1378. * ignored as it is not fully ready to process the messages. Send
  1379. * AIF to firmware so that if there are any unprocessed events they
  1380. * can be processed now.
  1381. */
  1382. if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
  1383. aac_intr_normal(aac, 0, 2, 0, NULL);
  1384. }
  1385. static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  1386. {
  1387. unsigned index = id->driver_data;
  1388. struct Scsi_Host *shost;
  1389. struct aac_dev *aac;
  1390. struct list_head *insert = &aac_devices;
  1391. int error;
  1392. int unique_id = 0;
  1393. u64 dmamask;
  1394. int mask_bits = 0;
  1395. extern int aac_sync_mode;
  1396. /*
  1397. * Only series 7 needs freset.
  1398. */
  1399. if (pdev->device == PMC_DEVICE_S7)
  1400. pdev->needs_freset = 1;
  1401. list_for_each_entry(aac, &aac_devices, entry) {
  1402. if (aac->id > unique_id)
  1403. break;
  1404. insert = &aac->entry;
  1405. unique_id++;
  1406. }
  1407. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1408. PCIE_LINK_STATE_CLKPM);
  1409. error = pci_enable_device(pdev);
  1410. if (error)
  1411. goto out;
  1412. if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
  1413. error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  1414. if (error) {
  1415. dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
  1416. goto out_disable_pdev;
  1417. }
  1418. }
  1419. /*
  1420. * If the quirk31 bit is set, the adapter needs adapter
  1421. * to driver communication memory to be allocated below 2gig
  1422. */
  1423. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
  1424. dmamask = DMA_BIT_MASK(31);
  1425. mask_bits = 31;
  1426. } else {
  1427. dmamask = DMA_BIT_MASK(32);
  1428. mask_bits = 32;
  1429. }
  1430. error = dma_set_coherent_mask(&pdev->dev, dmamask);
  1431. if (error) {
  1432. dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
  1433. , mask_bits);
  1434. goto out_disable_pdev;
  1435. }
  1436. pci_set_master(pdev);
  1437. shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
  1438. if (!shost) {
  1439. error = -ENOMEM;
  1440. goto out_disable_pdev;
  1441. }
  1442. shost->irq = pdev->irq;
  1443. shost->unique_id = unique_id;
  1444. shost->max_cmd_len = 16;
  1445. if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
  1446. aac_init_char();
  1447. aac = (struct aac_dev *)shost->hostdata;
  1448. aac->base_start = pci_resource_start(pdev, 0);
  1449. aac->scsi_host_ptr = shost;
  1450. aac->pdev = pdev;
  1451. aac->name = aac_driver_template.name;
  1452. aac->id = shost->unique_id;
  1453. aac->cardtype = index;
  1454. INIT_LIST_HEAD(&aac->entry);
  1455. if (aac_reset_devices || reset_devices)
  1456. aac->init_reset = true;
  1457. aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
  1458. sizeof(struct fib),
  1459. GFP_KERNEL);
  1460. if (!aac->fibs) {
  1461. error = -ENOMEM;
  1462. goto out_free_host;
  1463. }
  1464. spin_lock_init(&aac->fib_lock);
  1465. mutex_init(&aac->ioctl_mutex);
  1466. mutex_init(&aac->scan_mutex);
  1467. INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
  1468. INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
  1469. aac_src_reinit_aif_worker);
  1470. /*
  1471. * Map in the registers from the adapter.
  1472. */
  1473. aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
  1474. if ((*aac_drivers[index].init)(aac)) {
  1475. error = -ENODEV;
  1476. goto out_unmap;
  1477. }
  1478. if (aac->sync_mode) {
  1479. if (aac_sync_mode)
  1480. printk(KERN_INFO "%s%d: Sync. mode enforced "
  1481. "by driver parameter. This will cause "
  1482. "a significant performance decrease!\n",
  1483. aac->name,
  1484. aac->id);
  1485. else
  1486. printk(KERN_INFO "%s%d: Async. mode not supported "
  1487. "by current driver, sync. mode enforced."
  1488. "\nPlease update driver to get full performance.\n",
  1489. aac->name,
  1490. aac->id);
  1491. }
  1492. /*
  1493. * Start any kernel threads needed
  1494. */
  1495. aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
  1496. if (IS_ERR(aac->thread)) {
  1497. printk(KERN_ERR "aacraid: Unable to create command thread.\n");
  1498. error = PTR_ERR(aac->thread);
  1499. aac->thread = NULL;
  1500. goto out_deinit;
  1501. }
  1502. aac->maximum_num_channels = aac_drivers[index].channels;
  1503. error = aac_get_adapter_info(aac);
  1504. if (error < 0)
  1505. goto out_deinit;
  1506. /*
  1507. * Lets override negotiations and drop the maximum SG limit to 34
  1508. */
  1509. if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
  1510. (shost->sg_tablesize > 34)) {
  1511. shost->sg_tablesize = 34;
  1512. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1513. }
  1514. if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
  1515. (shost->sg_tablesize > 17)) {
  1516. shost->sg_tablesize = 17;
  1517. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1518. }
  1519. if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
  1520. shost->max_segment_size = shost->max_sectors << 9;
  1521. else
  1522. shost->max_segment_size = 65536;
  1523. /*
  1524. * Firmware printf works only with older firmware.
  1525. */
  1526. if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
  1527. aac->printf_enabled = 1;
  1528. else
  1529. aac->printf_enabled = 0;
  1530. /*
  1531. * max channel will be the physical channels plus 1 virtual channel
  1532. * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
  1533. * physical channels are address by their actual physical number+1
  1534. */
  1535. if (aac->nondasd_support || expose_physicals || aac->jbod)
  1536. shost->max_channel = aac->maximum_num_channels;
  1537. else
  1538. shost->max_channel = 0;
  1539. aac_get_config_status(aac, 0);
  1540. aac_get_containers(aac);
  1541. list_add(&aac->entry, insert);
  1542. shost->max_id = aac->maximum_num_containers;
  1543. if (shost->max_id < aac->maximum_num_physicals)
  1544. shost->max_id = aac->maximum_num_physicals;
  1545. if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
  1546. shost->max_id = MAXIMUM_NUM_CONTAINERS;
  1547. else
  1548. shost->this_id = shost->max_id;
  1549. if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
  1550. aac_intr_normal(aac, 0, 2, 0, NULL);
  1551. /*
  1552. * dmb - we may need to move the setting of these parms somewhere else once
  1553. * we get a fib that can report the actual numbers
  1554. */
  1555. shost->max_lun = AAC_MAX_LUN;
  1556. pci_set_drvdata(pdev, shost);
  1557. error = scsi_add_host(shost, &pdev->dev);
  1558. if (error)
  1559. goto out_deinit;
  1560. aac_scan_host(aac);
  1561. pci_save_state(pdev);
  1562. return 0;
  1563. out_deinit:
  1564. __aac_shutdown(aac);
  1565. out_unmap:
  1566. aac_fib_map_free(aac);
  1567. if (aac->comm_addr)
  1568. dma_free_coherent(&aac->pdev->dev, aac->comm_size,
  1569. aac->comm_addr, aac->comm_phys);
  1570. kfree(aac->queues);
  1571. aac_adapter_ioremap(aac, 0);
  1572. kfree(aac->fibs);
  1573. kfree(aac->fsa_dev);
  1574. out_free_host:
  1575. scsi_host_put(shost);
  1576. out_disable_pdev:
  1577. pci_disable_device(pdev);
  1578. out:
  1579. return error;
  1580. }
  1581. static void aac_release_resources(struct aac_dev *aac)
  1582. {
  1583. aac_adapter_disable_int(aac);
  1584. aac_free_irq(aac);
  1585. }
  1586. static int aac_acquire_resources(struct aac_dev *dev)
  1587. {
  1588. unsigned long status;
  1589. /*
  1590. * First clear out all interrupts. Then enable the one's that we
  1591. * can handle.
  1592. */
  1593. while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
  1594. || status == 0xffffffff)
  1595. msleep(20);
  1596. aac_adapter_disable_int(dev);
  1597. aac_adapter_enable_int(dev);
  1598. if (aac_is_src(dev))
  1599. aac_define_int_mode(dev);
  1600. if (dev->msi_enabled)
  1601. aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
  1602. if (aac_acquire_irq(dev))
  1603. goto error_iounmap;
  1604. aac_adapter_enable_int(dev);
  1605. /*max msix may change after EEH
  1606. * Re-assign vectors to fibs
  1607. */
  1608. aac_fib_vector_assign(dev);
  1609. if (!dev->sync_mode) {
  1610. /* After EEH recovery or suspend resume, max_msix count
  1611. * may change, therefore updating in init as well.
  1612. */
  1613. dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
  1614. aac_adapter_start(dev);
  1615. }
  1616. return 0;
  1617. error_iounmap:
  1618. return -1;
  1619. }
  1620. static int __maybe_unused aac_suspend(struct device *dev)
  1621. {
  1622. struct Scsi_Host *shost = dev_get_drvdata(dev);
  1623. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1624. scsi_host_block(shost);
  1625. aac_cancel_rescan_worker(aac);
  1626. aac_send_shutdown(aac);
  1627. aac_release_resources(aac);
  1628. return 0;
  1629. }
  1630. static int __maybe_unused aac_resume(struct device *dev)
  1631. {
  1632. struct Scsi_Host *shost = dev_get_drvdata(dev);
  1633. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1634. if (aac_acquire_resources(aac))
  1635. goto fail_device;
  1636. /*
  1637. * reset this flag to unblock ioctl() as it was set at
  1638. * aac_send_shutdown() to block ioctls from upperlayer
  1639. */
  1640. aac->adapter_shutdown = 0;
  1641. scsi_host_unblock(shost, SDEV_RUNNING);
  1642. return 0;
  1643. fail_device:
  1644. printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
  1645. scsi_host_put(shost);
  1646. return -ENODEV;
  1647. }
  1648. static void aac_shutdown(struct pci_dev *dev)
  1649. {
  1650. struct Scsi_Host *shost = pci_get_drvdata(dev);
  1651. scsi_host_block(shost);
  1652. __aac_shutdown((struct aac_dev *)shost->hostdata);
  1653. }
  1654. static void aac_remove_one(struct pci_dev *pdev)
  1655. {
  1656. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1657. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1658. aac_cancel_rescan_worker(aac);
  1659. scsi_remove_host(shost);
  1660. __aac_shutdown(aac);
  1661. aac_fib_map_free(aac);
  1662. dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
  1663. aac->comm_phys);
  1664. kfree(aac->queues);
  1665. aac_adapter_ioremap(aac, 0);
  1666. kfree(aac->fibs);
  1667. kfree(aac->fsa_dev);
  1668. list_del(&aac->entry);
  1669. scsi_host_put(shost);
  1670. pci_disable_device(pdev);
  1671. if (list_empty(&aac_devices)) {
  1672. unregister_chrdev(aac_cfg_major, "aac");
  1673. aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
  1674. }
  1675. }
  1676. static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
  1677. pci_channel_state_t error)
  1678. {
  1679. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1680. struct aac_dev *aac = shost_priv(shost);
  1681. dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
  1682. switch (error) {
  1683. case pci_channel_io_normal:
  1684. return PCI_ERS_RESULT_CAN_RECOVER;
  1685. case pci_channel_io_frozen:
  1686. aac->handle_pci_error = 1;
  1687. scsi_host_block(shost);
  1688. aac_cancel_rescan_worker(aac);
  1689. scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
  1690. aac_release_resources(aac);
  1691. aac_adapter_ioremap(aac, 0);
  1692. return PCI_ERS_RESULT_NEED_RESET;
  1693. case pci_channel_io_perm_failure:
  1694. aac->handle_pci_error = 1;
  1695. scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
  1696. return PCI_ERS_RESULT_DISCONNECT;
  1697. }
  1698. return PCI_ERS_RESULT_NEED_RESET;
  1699. }
  1700. static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
  1701. {
  1702. dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
  1703. return PCI_ERS_RESULT_NEED_RESET;
  1704. }
  1705. static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
  1706. {
  1707. dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
  1708. pci_restore_state(pdev);
  1709. if (pci_enable_device(pdev)) {
  1710. dev_warn(&pdev->dev,
  1711. "aacraid: failed to enable slave\n");
  1712. goto fail_device;
  1713. }
  1714. pci_set_master(pdev);
  1715. if (pci_enable_device_mem(pdev)) {
  1716. dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
  1717. goto fail_device;
  1718. }
  1719. return PCI_ERS_RESULT_RECOVERED;
  1720. fail_device:
  1721. dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
  1722. return PCI_ERS_RESULT_DISCONNECT;
  1723. }
  1724. static void aac_pci_resume(struct pci_dev *pdev)
  1725. {
  1726. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1727. struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
  1728. if (aac_adapter_ioremap(aac, aac->base_size)) {
  1729. dev_err(&pdev->dev, "aacraid: ioremap failed\n");
  1730. /* remap failed, go back ... */
  1731. aac->comm_interface = AAC_COMM_PRODUCER;
  1732. if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
  1733. dev_warn(&pdev->dev,
  1734. "aacraid: unable to map adapter.\n");
  1735. return;
  1736. }
  1737. }
  1738. msleep(10000);
  1739. aac_acquire_resources(aac);
  1740. /*
  1741. * reset this flag to unblock ioctl() as it was set
  1742. * at aac_send_shutdown() to block ioctls from upperlayer
  1743. */
  1744. aac->adapter_shutdown = 0;
  1745. aac->handle_pci_error = 0;
  1746. scsi_host_unblock(shost, SDEV_RUNNING);
  1747. aac_scan_host(aac);
  1748. pci_save_state(pdev);
  1749. dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
  1750. }
  1751. static struct pci_error_handlers aac_pci_err_handler = {
  1752. .error_detected = aac_pci_error_detected,
  1753. .mmio_enabled = aac_pci_mmio_enabled,
  1754. .slot_reset = aac_pci_slot_reset,
  1755. .resume = aac_pci_resume,
  1756. };
  1757. static SIMPLE_DEV_PM_OPS(aac_pm_ops, aac_suspend, aac_resume);
  1758. static struct pci_driver aac_pci_driver = {
  1759. .name = AAC_DRIVERNAME,
  1760. .id_table = aac_pci_tbl,
  1761. .probe = aac_probe_one,
  1762. .remove = aac_remove_one,
  1763. .driver.pm = &aac_pm_ops,
  1764. .shutdown = aac_shutdown,
  1765. .err_handler = &aac_pci_err_handler,
  1766. };
  1767. static int __init aac_init(void)
  1768. {
  1769. int error;
  1770. printk(KERN_INFO "Adaptec %s driver %s\n",
  1771. AAC_DRIVERNAME, aac_driver_version);
  1772. error = pci_register_driver(&aac_pci_driver);
  1773. if (error < 0)
  1774. return error;
  1775. aac_init_char();
  1776. return 0;
  1777. }
  1778. static void __exit aac_exit(void)
  1779. {
  1780. if (aac_cfg_major > -1)
  1781. unregister_chrdev(aac_cfg_major, "aac");
  1782. pci_unregister_driver(&aac_pci_driver);
  1783. }
  1784. module_init(aac_init);
  1785. module_exit(aac_exit);