myrs.c 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
  4. *
  5. * This driver supports the newer, SCSI-based firmware interface only.
  6. *
  7. * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
  8. *
  9. * Based on the original DAC960 driver, which has
  10. * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
  11. * Portions Copyright 2002 by Mylex (An IBM Business Unit)
  12. */
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/delay.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/pci.h>
  18. #include <linux/raid_class.h>
  19. #include <linux/unaligned.h>
  20. #include <scsi/scsi.h>
  21. #include <scsi/scsi_host.h>
  22. #include <scsi/scsi_device.h>
  23. #include <scsi/scsi_cmnd.h>
  24. #include <scsi/scsi_tcq.h>
  25. #include "myrs.h"
  26. static struct raid_template *myrs_raid_template;
  27. static struct myrs_devstate_name_entry {
  28. enum myrs_devstate state;
  29. char *name;
  30. } myrs_devstate_name_list[] = {
  31. { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
  32. { MYRS_DEVICE_ONLINE, "Online" },
  33. { MYRS_DEVICE_REBUILD, "Rebuild" },
  34. { MYRS_DEVICE_MISSING, "Missing" },
  35. { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
  36. { MYRS_DEVICE_OFFLINE, "Offline" },
  37. { MYRS_DEVICE_CRITICAL, "Critical" },
  38. { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
  39. { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
  40. { MYRS_DEVICE_STANDBY, "Standby" },
  41. { MYRS_DEVICE_INVALID_STATE, "Invalid" },
  42. };
  43. static char *myrs_devstate_name(enum myrs_devstate state)
  44. {
  45. struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
  46. int i;
  47. for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
  48. if (entry[i].state == state)
  49. return entry[i].name;
  50. }
  51. return NULL;
  52. }
  53. static struct myrs_raid_level_name_entry {
  54. enum myrs_raid_level level;
  55. char *name;
  56. } myrs_raid_level_name_list[] = {
  57. { MYRS_RAID_LEVEL0, "RAID0" },
  58. { MYRS_RAID_LEVEL1, "RAID1" },
  59. { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
  60. { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
  61. { MYRS_RAID_LEVEL6, "RAID6" },
  62. { MYRS_RAID_JBOD, "JBOD" },
  63. { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
  64. { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
  65. { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
  66. { MYRS_RAID_SPAN, "Mylex SPAN" },
  67. { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
  68. { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
  69. { MYRS_RAID_PHYSICAL, "Physical device" },
  70. };
  71. static char *myrs_raid_level_name(enum myrs_raid_level level)
  72. {
  73. struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
  74. int i;
  75. for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
  76. if (entry[i].level == level)
  77. return entry[i].name;
  78. }
  79. return NULL;
  80. }
  81. /*
  82. * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
  83. */
  84. static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
  85. {
  86. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  87. memset(mbox, 0, sizeof(union myrs_cmd_mbox));
  88. cmd_blk->status = 0;
  89. }
  90. /*
  91. * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
  92. */
  93. static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
  94. {
  95. void __iomem *base = cs->io_base;
  96. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  97. union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
  98. cs->write_cmd_mbox(next_mbox, mbox);
  99. if (cs->prev_cmd_mbox1->words[0] == 0 ||
  100. cs->prev_cmd_mbox2->words[0] == 0)
  101. cs->get_cmd_mbox(base);
  102. cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
  103. cs->prev_cmd_mbox1 = next_mbox;
  104. if (++next_mbox > cs->last_cmd_mbox)
  105. next_mbox = cs->first_cmd_mbox;
  106. cs->next_cmd_mbox = next_mbox;
  107. }
  108. /*
  109. * myrs_exec_cmd - executes V2 Command and waits for completion.
  110. */
  111. static void myrs_exec_cmd(struct myrs_hba *cs,
  112. struct myrs_cmdblk *cmd_blk)
  113. {
  114. DECLARE_COMPLETION_ONSTACK(complete);
  115. unsigned long flags;
  116. cmd_blk->complete = &complete;
  117. spin_lock_irqsave(&cs->queue_lock, flags);
  118. myrs_qcmd(cs, cmd_blk);
  119. spin_unlock_irqrestore(&cs->queue_lock, flags);
  120. wait_for_completion(&complete);
  121. }
  122. /*
  123. * myrs_report_progress - prints progress message
  124. */
  125. static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
  126. unsigned char *msg, unsigned long blocks,
  127. unsigned long size)
  128. {
  129. shost_printk(KERN_INFO, cs->host,
  130. "Logical Drive %d: %s in Progress: %d%% completed\n",
  131. ldev_num, msg,
  132. (100 * (int)(blocks >> 7)) / (int)(size >> 7));
  133. }
  134. /*
  135. * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
  136. */
  137. static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
  138. {
  139. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  140. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  141. dma_addr_t ctlr_info_addr;
  142. union myrs_sgl *sgl;
  143. unsigned char status;
  144. unsigned short ldev_present, ldev_critical, ldev_offline;
  145. ldev_present = cs->ctlr_info->ldev_present;
  146. ldev_critical = cs->ctlr_info->ldev_critical;
  147. ldev_offline = cs->ctlr_info->ldev_offline;
  148. ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
  149. sizeof(struct myrs_ctlr_info),
  150. DMA_FROM_DEVICE);
  151. if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
  152. return MYRS_STATUS_FAILED;
  153. mutex_lock(&cs->dcmd_mutex);
  154. myrs_reset_cmd(cmd_blk);
  155. mbox->ctlr_info.id = MYRS_DCMD_TAG;
  156. mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
  157. mbox->ctlr_info.control.dma_ctrl_to_host = true;
  158. mbox->ctlr_info.control.no_autosense = true;
  159. mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
  160. mbox->ctlr_info.ctlr_num = 0;
  161. mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
  162. sgl = &mbox->ctlr_info.dma_addr;
  163. sgl->sge[0].sge_addr = ctlr_info_addr;
  164. sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
  165. dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
  166. myrs_exec_cmd(cs, cmd_blk);
  167. status = cmd_blk->status;
  168. mutex_unlock(&cs->dcmd_mutex);
  169. dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
  170. sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
  171. if (status == MYRS_STATUS_SUCCESS) {
  172. if (cs->ctlr_info->bg_init_active +
  173. cs->ctlr_info->ldev_init_active +
  174. cs->ctlr_info->pdev_init_active +
  175. cs->ctlr_info->cc_active +
  176. cs->ctlr_info->rbld_active +
  177. cs->ctlr_info->exp_active != 0)
  178. cs->needs_update = true;
  179. if (cs->ctlr_info->ldev_present != ldev_present ||
  180. cs->ctlr_info->ldev_critical != ldev_critical ||
  181. cs->ctlr_info->ldev_offline != ldev_offline)
  182. shost_printk(KERN_INFO, cs->host,
  183. "Logical drive count changes (%d/%d/%d)\n",
  184. cs->ctlr_info->ldev_critical,
  185. cs->ctlr_info->ldev_offline,
  186. cs->ctlr_info->ldev_present);
  187. }
  188. return status;
  189. }
  190. /*
  191. * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
  192. */
  193. static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
  194. unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
  195. {
  196. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  197. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  198. dma_addr_t ldev_info_addr;
  199. struct myrs_ldev_info ldev_info_orig;
  200. union myrs_sgl *sgl;
  201. unsigned char status;
  202. memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
  203. ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
  204. sizeof(struct myrs_ldev_info),
  205. DMA_FROM_DEVICE);
  206. if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
  207. return MYRS_STATUS_FAILED;
  208. mutex_lock(&cs->dcmd_mutex);
  209. myrs_reset_cmd(cmd_blk);
  210. mbox->ldev_info.id = MYRS_DCMD_TAG;
  211. mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
  212. mbox->ldev_info.control.dma_ctrl_to_host = true;
  213. mbox->ldev_info.control.no_autosense = true;
  214. mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
  215. mbox->ldev_info.ldev.ldev_num = ldev_num;
  216. mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
  217. sgl = &mbox->ldev_info.dma_addr;
  218. sgl->sge[0].sge_addr = ldev_info_addr;
  219. sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
  220. dev_dbg(&cs->host->shost_gendev,
  221. "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
  222. myrs_exec_cmd(cs, cmd_blk);
  223. status = cmd_blk->status;
  224. mutex_unlock(&cs->dcmd_mutex);
  225. dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
  226. sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
  227. if (status == MYRS_STATUS_SUCCESS) {
  228. unsigned short ldev_num = ldev_info->ldev_num;
  229. struct myrs_ldev_info *new = ldev_info;
  230. struct myrs_ldev_info *old = &ldev_info_orig;
  231. unsigned long ldev_size = new->cfg_devsize;
  232. if (new->dev_state != old->dev_state) {
  233. const char *name;
  234. name = myrs_devstate_name(new->dev_state);
  235. shost_printk(KERN_INFO, cs->host,
  236. "Logical Drive %d is now %s\n",
  237. ldev_num, name ? name : "Invalid");
  238. }
  239. if ((new->soft_errs != old->soft_errs) ||
  240. (new->cmds_failed != old->cmds_failed) ||
  241. (new->deferred_write_errs != old->deferred_write_errs))
  242. shost_printk(KERN_INFO, cs->host,
  243. "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
  244. ldev_num, new->soft_errs,
  245. new->cmds_failed,
  246. new->deferred_write_errs);
  247. if (new->bg_init_active)
  248. myrs_report_progress(cs, ldev_num,
  249. "Background Initialization",
  250. new->bg_init_lba, ldev_size);
  251. else if (new->fg_init_active)
  252. myrs_report_progress(cs, ldev_num,
  253. "Foreground Initialization",
  254. new->fg_init_lba, ldev_size);
  255. else if (new->migration_active)
  256. myrs_report_progress(cs, ldev_num,
  257. "Data Migration",
  258. new->migration_lba, ldev_size);
  259. else if (new->patrol_active)
  260. myrs_report_progress(cs, ldev_num,
  261. "Patrol Operation",
  262. new->patrol_lba, ldev_size);
  263. if (old->bg_init_active && !new->bg_init_active)
  264. shost_printk(KERN_INFO, cs->host,
  265. "Logical Drive %d: Background Initialization %s\n",
  266. ldev_num,
  267. (new->ldev_control.ldev_init_done ?
  268. "Completed" : "Failed"));
  269. }
  270. return status;
  271. }
  272. /*
  273. * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
  274. */
  275. static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
  276. unsigned char channel, unsigned char target, unsigned char lun,
  277. struct myrs_pdev_info *pdev_info)
  278. {
  279. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  280. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  281. dma_addr_t pdev_info_addr;
  282. union myrs_sgl *sgl;
  283. unsigned char status;
  284. pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
  285. sizeof(struct myrs_pdev_info),
  286. DMA_FROM_DEVICE);
  287. if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
  288. return MYRS_STATUS_FAILED;
  289. mutex_lock(&cs->dcmd_mutex);
  290. myrs_reset_cmd(cmd_blk);
  291. mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
  292. mbox->pdev_info.id = MYRS_DCMD_TAG;
  293. mbox->pdev_info.control.dma_ctrl_to_host = true;
  294. mbox->pdev_info.control.no_autosense = true;
  295. mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
  296. mbox->pdev_info.pdev.lun = lun;
  297. mbox->pdev_info.pdev.target = target;
  298. mbox->pdev_info.pdev.channel = channel;
  299. mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
  300. sgl = &mbox->pdev_info.dma_addr;
  301. sgl->sge[0].sge_addr = pdev_info_addr;
  302. sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
  303. dev_dbg(&cs->host->shost_gendev,
  304. "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
  305. channel, target, lun);
  306. myrs_exec_cmd(cs, cmd_blk);
  307. status = cmd_blk->status;
  308. mutex_unlock(&cs->dcmd_mutex);
  309. dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
  310. sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
  311. return status;
  312. }
  313. /*
  314. * myrs_dev_op - executes a "Device Operation" Command
  315. */
  316. static unsigned char myrs_dev_op(struct myrs_hba *cs,
  317. enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
  318. {
  319. struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
  320. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  321. unsigned char status;
  322. mutex_lock(&cs->dcmd_mutex);
  323. myrs_reset_cmd(cmd_blk);
  324. mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
  325. mbox->dev_op.id = MYRS_DCMD_TAG;
  326. mbox->dev_op.control.dma_ctrl_to_host = true;
  327. mbox->dev_op.control.no_autosense = true;
  328. mbox->dev_op.ioctl_opcode = opcode;
  329. mbox->dev_op.opdev = opdev;
  330. myrs_exec_cmd(cs, cmd_blk);
  331. status = cmd_blk->status;
  332. mutex_unlock(&cs->dcmd_mutex);
  333. return status;
  334. }
  335. /*
  336. * myrs_translate_pdev - translates a Physical Device Channel and
  337. * TargetID into a Logical Device.
  338. */
  339. static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
  340. unsigned char channel, unsigned char target, unsigned char lun,
  341. struct myrs_devmap *devmap)
  342. {
  343. struct pci_dev *pdev = cs->pdev;
  344. dma_addr_t devmap_addr;
  345. struct myrs_cmdblk *cmd_blk;
  346. union myrs_cmd_mbox *mbox;
  347. union myrs_sgl *sgl;
  348. unsigned char status;
  349. memset(devmap, 0x0, sizeof(struct myrs_devmap));
  350. devmap_addr = dma_map_single(&pdev->dev, devmap,
  351. sizeof(struct myrs_devmap),
  352. DMA_FROM_DEVICE);
  353. if (dma_mapping_error(&pdev->dev, devmap_addr))
  354. return MYRS_STATUS_FAILED;
  355. mutex_lock(&cs->dcmd_mutex);
  356. cmd_blk = &cs->dcmd_blk;
  357. mbox = &cmd_blk->mbox;
  358. mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
  359. mbox->pdev_info.control.dma_ctrl_to_host = true;
  360. mbox->pdev_info.control.no_autosense = true;
  361. mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
  362. mbox->pdev_info.pdev.target = target;
  363. mbox->pdev_info.pdev.channel = channel;
  364. mbox->pdev_info.pdev.lun = lun;
  365. mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
  366. sgl = &mbox->pdev_info.dma_addr;
  367. sgl->sge[0].sge_addr = devmap_addr;
  368. sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
  369. myrs_exec_cmd(cs, cmd_blk);
  370. status = cmd_blk->status;
  371. mutex_unlock(&cs->dcmd_mutex);
  372. dma_unmap_single(&pdev->dev, devmap_addr,
  373. sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
  374. return status;
  375. }
  376. /*
  377. * myrs_get_event - executes a Get Event Command
  378. */
  379. static unsigned char myrs_get_event(struct myrs_hba *cs,
  380. unsigned int event_num, struct myrs_event *event_buf)
  381. {
  382. struct pci_dev *pdev = cs->pdev;
  383. dma_addr_t event_addr;
  384. struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
  385. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  386. union myrs_sgl *sgl;
  387. unsigned char status;
  388. event_addr = dma_map_single(&pdev->dev, event_buf,
  389. sizeof(struct myrs_event), DMA_FROM_DEVICE);
  390. if (dma_mapping_error(&pdev->dev, event_addr))
  391. return MYRS_STATUS_FAILED;
  392. mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
  393. mbox->get_event.dma_size = sizeof(struct myrs_event);
  394. mbox->get_event.evnum_upper = event_num >> 16;
  395. mbox->get_event.ctlr_num = 0;
  396. mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
  397. mbox->get_event.evnum_lower = event_num & 0xFFFF;
  398. sgl = &mbox->get_event.dma_addr;
  399. sgl->sge[0].sge_addr = event_addr;
  400. sgl->sge[0].sge_count = mbox->get_event.dma_size;
  401. myrs_exec_cmd(cs, cmd_blk);
  402. status = cmd_blk->status;
  403. dma_unmap_single(&pdev->dev, event_addr,
  404. sizeof(struct myrs_event), DMA_FROM_DEVICE);
  405. return status;
  406. }
  407. /*
  408. * myrs_get_fwstatus - executes a Get Health Status Command
  409. */
  410. static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
  411. {
  412. struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
  413. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  414. union myrs_sgl *sgl;
  415. unsigned char status = cmd_blk->status;
  416. myrs_reset_cmd(cmd_blk);
  417. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  418. mbox->common.id = MYRS_MCMD_TAG;
  419. mbox->common.control.dma_ctrl_to_host = true;
  420. mbox->common.control.no_autosense = true;
  421. mbox->common.dma_size = sizeof(struct myrs_fwstat);
  422. mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
  423. sgl = &mbox->common.dma_addr;
  424. sgl->sge[0].sge_addr = cs->fwstat_addr;
  425. sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
  426. dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
  427. myrs_exec_cmd(cs, cmd_blk);
  428. status = cmd_blk->status;
  429. return status;
  430. }
  431. /*
  432. * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
  433. */
  434. static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
  435. enable_mbox_t enable_mbox_fn)
  436. {
  437. void __iomem *base = cs->io_base;
  438. struct pci_dev *pdev = cs->pdev;
  439. union myrs_cmd_mbox *cmd_mbox;
  440. struct myrs_stat_mbox *stat_mbox;
  441. union myrs_cmd_mbox *mbox;
  442. dma_addr_t mbox_addr;
  443. unsigned char status = MYRS_STATUS_FAILED;
  444. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
  445. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
  446. dev_err(&pdev->dev, "DMA mask out of range\n");
  447. return false;
  448. }
  449. /* Temporary dma mapping, used only in the scope of this function */
  450. mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
  451. &mbox_addr, GFP_KERNEL);
  452. if (dma_mapping_error(&pdev->dev, mbox_addr))
  453. return false;
  454. /* These are the base addresses for the command memory mailbox array */
  455. cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
  456. cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
  457. &cs->cmd_mbox_addr, GFP_KERNEL);
  458. if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
  459. dev_err(&pdev->dev, "Failed to map command mailbox\n");
  460. goto out_free;
  461. }
  462. cs->first_cmd_mbox = cmd_mbox;
  463. cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
  464. cs->last_cmd_mbox = cmd_mbox;
  465. cs->next_cmd_mbox = cs->first_cmd_mbox;
  466. cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
  467. cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
  468. /* These are the base addresses for the status memory mailbox array */
  469. cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
  470. stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
  471. &cs->stat_mbox_addr, GFP_KERNEL);
  472. if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
  473. dev_err(&pdev->dev, "Failed to map status mailbox\n");
  474. goto out_free;
  475. }
  476. cs->first_stat_mbox = stat_mbox;
  477. stat_mbox += MYRS_MAX_STAT_MBOX - 1;
  478. cs->last_stat_mbox = stat_mbox;
  479. cs->next_stat_mbox = cs->first_stat_mbox;
  480. cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
  481. sizeof(struct myrs_fwstat),
  482. &cs->fwstat_addr, GFP_KERNEL);
  483. if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
  484. dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
  485. cs->fwstat_buf = NULL;
  486. goto out_free;
  487. }
  488. cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
  489. if (!cs->ctlr_info)
  490. goto out_free;
  491. cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
  492. if (!cs->event_buf)
  493. goto out_free;
  494. /* Enable the Memory Mailbox Interface. */
  495. memset(mbox, 0, sizeof(union myrs_cmd_mbox));
  496. mbox->set_mbox.id = 1;
  497. mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
  498. mbox->set_mbox.control.no_autosense = true;
  499. mbox->set_mbox.first_cmd_mbox_size_kb =
  500. (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
  501. mbox->set_mbox.first_stat_mbox_size_kb =
  502. (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
  503. mbox->set_mbox.second_cmd_mbox_size_kb = 0;
  504. mbox->set_mbox.second_stat_mbox_size_kb = 0;
  505. mbox->set_mbox.sense_len = 0;
  506. mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
  507. mbox->set_mbox.fwstat_buf_size_kb = 1;
  508. mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
  509. mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
  510. mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
  511. status = enable_mbox_fn(base, mbox_addr);
  512. out_free:
  513. dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
  514. mbox, mbox_addr);
  515. if (status != MYRS_STATUS_SUCCESS)
  516. dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
  517. status);
  518. return (status == MYRS_STATUS_SUCCESS);
  519. }
  520. /*
  521. * myrs_get_config - reads the Configuration Information
  522. */
  523. static int myrs_get_config(struct myrs_hba *cs)
  524. {
  525. struct myrs_ctlr_info *info = cs->ctlr_info;
  526. struct Scsi_Host *shost = cs->host;
  527. unsigned char status;
  528. unsigned char model[20];
  529. unsigned char fw_version[12];
  530. int i, model_len;
  531. /* Get data into dma-able area, then copy into permanent location */
  532. mutex_lock(&cs->cinfo_mutex);
  533. status = myrs_get_ctlr_info(cs);
  534. mutex_unlock(&cs->cinfo_mutex);
  535. if (status != MYRS_STATUS_SUCCESS) {
  536. shost_printk(KERN_ERR, shost,
  537. "Failed to get controller information\n");
  538. return -ENODEV;
  539. }
  540. /* Initialize the Controller Model Name and Full Model Name fields. */
  541. model_len = sizeof(info->ctlr_name);
  542. if (model_len > sizeof(model)-1)
  543. model_len = sizeof(model)-1;
  544. memcpy(model, info->ctlr_name, model_len);
  545. model_len--;
  546. while (model[model_len] == ' ' || model[model_len] == '\0')
  547. model_len--;
  548. model[++model_len] = '\0';
  549. strcpy(cs->model_name, "DAC960 ");
  550. strcat(cs->model_name, model);
  551. /* Initialize the Controller Firmware Version field. */
  552. sprintf(fw_version, "%d.%02d-%02d",
  553. info->fw_major_version, info->fw_minor_version,
  554. info->fw_turn_number);
  555. if (info->fw_major_version == 6 &&
  556. info->fw_minor_version == 0 &&
  557. info->fw_turn_number < 1) {
  558. shost_printk(KERN_WARNING, shost,
  559. "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
  560. "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
  561. "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
  562. fw_version);
  563. return -ENODEV;
  564. }
  565. /* Initialize the Controller Channels and Targets. */
  566. shost->max_channel = info->physchan_present + info->virtchan_present;
  567. shost->max_id = info->max_targets[0];
  568. for (i = 1; i < 16; i++) {
  569. if (!info->max_targets[i])
  570. continue;
  571. if (shost->max_id < info->max_targets[i])
  572. shost->max_id = info->max_targets[i];
  573. }
  574. /*
  575. * Initialize the Controller Queue Depth, Driver Queue Depth,
  576. * Logical Drive Count, Maximum Blocks per Command, Controller
  577. * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
  578. * The Driver Queue Depth must be at most three less than
  579. * the Controller Queue Depth; tag '1' is reserved for
  580. * direct commands, and tag '2' for monitoring commands.
  581. */
  582. shost->can_queue = info->max_tcq - 3;
  583. if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
  584. shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
  585. shost->max_sectors = info->max_transfer_size;
  586. shost->sg_tablesize = info->max_sge;
  587. if (shost->sg_tablesize > MYRS_SG_LIMIT)
  588. shost->sg_tablesize = MYRS_SG_LIMIT;
  589. shost_printk(KERN_INFO, shost,
  590. "Configuring %s PCI RAID Controller\n", model);
  591. shost_printk(KERN_INFO, shost,
  592. " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
  593. fw_version, info->physchan_present, info->mem_size_mb);
  594. shost_printk(KERN_INFO, shost,
  595. " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
  596. shost->can_queue, shost->max_sectors);
  597. shost_printk(KERN_INFO, shost,
  598. " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
  599. shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
  600. for (i = 0; i < info->physchan_max; i++) {
  601. if (!info->max_targets[i])
  602. continue;
  603. shost_printk(KERN_INFO, shost,
  604. " Device Channel %d: max %d devices\n",
  605. i, info->max_targets[i]);
  606. }
  607. shost_printk(KERN_INFO, shost,
  608. " Physical: %d/%d channels, %d disks, %d devices\n",
  609. info->physchan_present, info->physchan_max,
  610. info->pdisk_present, info->pdev_present);
  611. shost_printk(KERN_INFO, shost,
  612. " Logical: %d/%d channels, %d disks\n",
  613. info->virtchan_present, info->virtchan_max,
  614. info->ldev_present);
  615. return 0;
  616. }
  617. /*
  618. * myrs_log_event - prints a Controller Event message
  619. */
  620. static struct {
  621. int ev_code;
  622. unsigned char *ev_msg;
  623. } myrs_ev_list[] = {
  624. /* Physical Device Events (0x0000 - 0x007F) */
  625. { 0x0001, "P Online" },
  626. { 0x0002, "P Standby" },
  627. { 0x0005, "P Automatic Rebuild Started" },
  628. { 0x0006, "P Manual Rebuild Started" },
  629. { 0x0007, "P Rebuild Completed" },
  630. { 0x0008, "P Rebuild Cancelled" },
  631. { 0x0009, "P Rebuild Failed for Unknown Reasons" },
  632. { 0x000A, "P Rebuild Failed due to New Physical Device" },
  633. { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
  634. { 0x000C, "S Offline" },
  635. { 0x000D, "P Found" },
  636. { 0x000E, "P Removed" },
  637. { 0x000F, "P Unconfigured" },
  638. { 0x0010, "P Expand Capacity Started" },
  639. { 0x0011, "P Expand Capacity Completed" },
  640. { 0x0012, "P Expand Capacity Failed" },
  641. { 0x0013, "P Command Timed Out" },
  642. { 0x0014, "P Command Aborted" },
  643. { 0x0015, "P Command Retried" },
  644. { 0x0016, "P Parity Error" },
  645. { 0x0017, "P Soft Error" },
  646. { 0x0018, "P Miscellaneous Error" },
  647. { 0x0019, "P Reset" },
  648. { 0x001A, "P Active Spare Found" },
  649. { 0x001B, "P Warm Spare Found" },
  650. { 0x001C, "S Sense Data Received" },
  651. { 0x001D, "P Initialization Started" },
  652. { 0x001E, "P Initialization Completed" },
  653. { 0x001F, "P Initialization Failed" },
  654. { 0x0020, "P Initialization Cancelled" },
  655. { 0x0021, "P Failed because Write Recovery Failed" },
  656. { 0x0022, "P Failed because SCSI Bus Reset Failed" },
  657. { 0x0023, "P Failed because of Double Check Condition" },
  658. { 0x0024, "P Failed because Device Cannot Be Accessed" },
  659. { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
  660. { 0x0026, "P Failed because of Bad Tag from Device" },
  661. { 0x0027, "P Failed because of Command Timeout" },
  662. { 0x0028, "P Failed because of System Reset" },
  663. { 0x0029, "P Failed because of Busy Status or Parity Error" },
  664. { 0x002A, "P Failed because Host Set Device to Failed State" },
  665. { 0x002B, "P Failed because of Selection Timeout" },
  666. { 0x002C, "P Failed because of SCSI Bus Phase Error" },
  667. { 0x002D, "P Failed because Device Returned Unknown Status" },
  668. { 0x002E, "P Failed because Device Not Ready" },
  669. { 0x002F, "P Failed because Device Not Found at Startup" },
  670. { 0x0030, "P Failed because COD Write Operation Failed" },
  671. { 0x0031, "P Failed because BDT Write Operation Failed" },
  672. { 0x0039, "P Missing at Startup" },
  673. { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
  674. { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
  675. { 0x003D, "P Standby Rebuild Started" },
  676. /* Logical Device Events (0x0080 - 0x00FF) */
  677. { 0x0080, "M Consistency Check Started" },
  678. { 0x0081, "M Consistency Check Completed" },
  679. { 0x0082, "M Consistency Check Cancelled" },
  680. { 0x0083, "M Consistency Check Completed With Errors" },
  681. { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
  682. { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
  683. { 0x0086, "L Offline" },
  684. { 0x0087, "L Critical" },
  685. { 0x0088, "L Online" },
  686. { 0x0089, "M Automatic Rebuild Started" },
  687. { 0x008A, "M Manual Rebuild Started" },
  688. { 0x008B, "M Rebuild Completed" },
  689. { 0x008C, "M Rebuild Cancelled" },
  690. { 0x008D, "M Rebuild Failed for Unknown Reasons" },
  691. { 0x008E, "M Rebuild Failed due to New Physical Device" },
  692. { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
  693. { 0x0090, "M Initialization Started" },
  694. { 0x0091, "M Initialization Completed" },
  695. { 0x0092, "M Initialization Cancelled" },
  696. { 0x0093, "M Initialization Failed" },
  697. { 0x0094, "L Found" },
  698. { 0x0095, "L Deleted" },
  699. { 0x0096, "M Expand Capacity Started" },
  700. { 0x0097, "M Expand Capacity Completed" },
  701. { 0x0098, "M Expand Capacity Failed" },
  702. { 0x0099, "L Bad Block Found" },
  703. { 0x009A, "L Size Changed" },
  704. { 0x009B, "L Type Changed" },
  705. { 0x009C, "L Bad Data Block Found" },
  706. { 0x009E, "L Read of Data Block in BDT" },
  707. { 0x009F, "L Write Back Data for Disk Block Lost" },
  708. { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
  709. { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
  710. { 0x00A2, "L Standby Rebuild Started" },
  711. /* Fault Management Events (0x0100 - 0x017F) */
  712. { 0x0140, "E Fan %d Failed" },
  713. { 0x0141, "E Fan %d OK" },
  714. { 0x0142, "E Fan %d Not Present" },
  715. { 0x0143, "E Power Supply %d Failed" },
  716. { 0x0144, "E Power Supply %d OK" },
  717. { 0x0145, "E Power Supply %d Not Present" },
  718. { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
  719. { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
  720. { 0x0148, "E Temperature Sensor %d Temperature Normal" },
  721. { 0x0149, "E Temperature Sensor %d Not Present" },
  722. { 0x014A, "E Enclosure Management Unit %d Access Critical" },
  723. { 0x014B, "E Enclosure Management Unit %d Access OK" },
  724. { 0x014C, "E Enclosure Management Unit %d Access Offline" },
  725. /* Controller Events (0x0180 - 0x01FF) */
  726. { 0x0181, "C Cache Write Back Error" },
  727. { 0x0188, "C Battery Backup Unit Found" },
  728. { 0x0189, "C Battery Backup Unit Charge Level Low" },
  729. { 0x018A, "C Battery Backup Unit Charge Level OK" },
  730. { 0x0193, "C Installation Aborted" },
  731. { 0x0195, "C Battery Backup Unit Physically Removed" },
  732. { 0x0196, "C Memory Error During Warm Boot" },
  733. { 0x019E, "C Memory Soft ECC Error Corrected" },
  734. { 0x019F, "C Memory Hard ECC Error Corrected" },
  735. { 0x01A2, "C Battery Backup Unit Failed" },
  736. { 0x01AB, "C Mirror Race Recovery Failed" },
  737. { 0x01AC, "C Mirror Race on Critical Drive" },
  738. /* Controller Internal Processor Events */
  739. { 0x0380, "C Internal Controller Hung" },
  740. { 0x0381, "C Internal Controller Firmware Breakpoint" },
  741. { 0x0390, "C Internal Controller i960 Processor Specific Error" },
  742. { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
  743. { 0, "" }
  744. };
  745. static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
  746. {
  747. unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
  748. int ev_idx = 0, ev_code;
  749. unsigned char ev_type, *ev_msg;
  750. struct Scsi_Host *shost = cs->host;
  751. struct scsi_device *sdev;
  752. struct scsi_sense_hdr sshdr = {0};
  753. unsigned char sense_info[4];
  754. unsigned char cmd_specific[4];
  755. if (ev->ev_code == 0x1C) {
  756. if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
  757. memset(&sshdr, 0x0, sizeof(sshdr));
  758. memset(sense_info, 0x0, sizeof(sense_info));
  759. memset(cmd_specific, 0x0, sizeof(cmd_specific));
  760. } else {
  761. memcpy(sense_info, &ev->sense_data[3], 4);
  762. memcpy(cmd_specific, &ev->sense_data[7], 4);
  763. }
  764. }
  765. if (sshdr.sense_key == VENDOR_SPECIFIC &&
  766. (sshdr.asc == 0x80 || sshdr.asc == 0x81))
  767. ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
  768. while (true) {
  769. ev_code = myrs_ev_list[ev_idx].ev_code;
  770. if (ev_code == ev->ev_code || ev_code == 0)
  771. break;
  772. ev_idx++;
  773. }
  774. ev_type = myrs_ev_list[ev_idx].ev_msg[0];
  775. ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
  776. if (ev_code == 0) {
  777. shost_printk(KERN_WARNING, shost,
  778. "Unknown Controller Event Code %04X\n",
  779. ev->ev_code);
  780. return;
  781. }
  782. switch (ev_type) {
  783. case 'P':
  784. sdev = scsi_device_lookup(shost, ev->channel,
  785. ev->target, 0);
  786. sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
  787. ev->ev_seq, ev_msg);
  788. if (sdev && sdev->hostdata &&
  789. sdev->channel < cs->ctlr_info->physchan_present) {
  790. struct myrs_pdev_info *pdev_info = sdev->hostdata;
  791. switch (ev->ev_code) {
  792. case 0x0001:
  793. case 0x0007:
  794. pdev_info->dev_state = MYRS_DEVICE_ONLINE;
  795. break;
  796. case 0x0002:
  797. pdev_info->dev_state = MYRS_DEVICE_STANDBY;
  798. break;
  799. case 0x000C:
  800. pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
  801. break;
  802. case 0x000E:
  803. pdev_info->dev_state = MYRS_DEVICE_MISSING;
  804. break;
  805. case 0x000F:
  806. pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
  807. break;
  808. }
  809. }
  810. break;
  811. case 'L':
  812. shost_printk(KERN_INFO, shost,
  813. "event %d: Logical Drive %d %s\n",
  814. ev->ev_seq, ev->lun, ev_msg);
  815. cs->needs_update = true;
  816. break;
  817. case 'M':
  818. shost_printk(KERN_INFO, shost,
  819. "event %d: Logical Drive %d %s\n",
  820. ev->ev_seq, ev->lun, ev_msg);
  821. cs->needs_update = true;
  822. break;
  823. case 'S':
  824. if (sshdr.sense_key == NO_SENSE ||
  825. (sshdr.sense_key == NOT_READY &&
  826. sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
  827. sshdr.ascq == 0x02)))
  828. break;
  829. shost_printk(KERN_INFO, shost,
  830. "event %d: Physical Device %d:%d %s\n",
  831. ev->ev_seq, ev->channel, ev->target, ev_msg);
  832. shost_printk(KERN_INFO, shost,
  833. "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
  834. ev->channel, ev->target,
  835. sshdr.sense_key, sshdr.asc, sshdr.ascq);
  836. shost_printk(KERN_INFO, shost,
  837. "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
  838. ev->channel, ev->target,
  839. sense_info[0], sense_info[1],
  840. sense_info[2], sense_info[3],
  841. cmd_specific[0], cmd_specific[1],
  842. cmd_specific[2], cmd_specific[3]);
  843. break;
  844. case 'E':
  845. if (cs->disable_enc_msg)
  846. break;
  847. sprintf(msg_buf, ev_msg, ev->lun);
  848. shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
  849. ev->ev_seq, ev->target, msg_buf);
  850. break;
  851. case 'C':
  852. shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
  853. ev->ev_seq, ev_msg);
  854. break;
  855. default:
  856. shost_printk(KERN_INFO, shost,
  857. "event %d: Unknown Event Code %04X\n",
  858. ev->ev_seq, ev->ev_code);
  859. break;
  860. }
  861. }
  862. /*
  863. * SCSI sysfs interface functions
  864. */
  865. static ssize_t raid_state_show(struct device *dev,
  866. struct device_attribute *attr, char *buf)
  867. {
  868. struct scsi_device *sdev = to_scsi_device(dev);
  869. struct myrs_hba *cs = shost_priv(sdev->host);
  870. int ret;
  871. if (!sdev->hostdata)
  872. return snprintf(buf, 16, "Unknown\n");
  873. if (sdev->channel >= cs->ctlr_info->physchan_present) {
  874. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  875. const char *name;
  876. name = myrs_devstate_name(ldev_info->dev_state);
  877. if (name)
  878. ret = snprintf(buf, 64, "%s\n", name);
  879. else
  880. ret = snprintf(buf, 64, "Invalid (%02X)\n",
  881. ldev_info->dev_state);
  882. } else {
  883. struct myrs_pdev_info *pdev_info;
  884. const char *name;
  885. pdev_info = sdev->hostdata;
  886. name = myrs_devstate_name(pdev_info->dev_state);
  887. if (name)
  888. ret = snprintf(buf, 64, "%s\n", name);
  889. else
  890. ret = snprintf(buf, 64, "Invalid (%02X)\n",
  891. pdev_info->dev_state);
  892. }
  893. return ret;
  894. }
  895. static ssize_t raid_state_store(struct device *dev,
  896. struct device_attribute *attr, const char *buf, size_t count)
  897. {
  898. struct scsi_device *sdev = to_scsi_device(dev);
  899. struct myrs_hba *cs = shost_priv(sdev->host);
  900. struct myrs_cmdblk *cmd_blk;
  901. union myrs_cmd_mbox *mbox;
  902. enum myrs_devstate new_state;
  903. unsigned short ldev_num;
  904. unsigned char status;
  905. if (!strncmp(buf, "offline", 7) ||
  906. !strncmp(buf, "kill", 4))
  907. new_state = MYRS_DEVICE_OFFLINE;
  908. else if (!strncmp(buf, "online", 6))
  909. new_state = MYRS_DEVICE_ONLINE;
  910. else if (!strncmp(buf, "standby", 7))
  911. new_state = MYRS_DEVICE_STANDBY;
  912. else
  913. return -EINVAL;
  914. if (sdev->channel < cs->ctlr_info->physchan_present) {
  915. struct myrs_pdev_info *pdev_info = sdev->hostdata;
  916. struct myrs_devmap *pdev_devmap =
  917. (struct myrs_devmap *)&pdev_info->rsvd13;
  918. if (pdev_info->dev_state == new_state) {
  919. sdev_printk(KERN_INFO, sdev,
  920. "Device already in %s\n",
  921. myrs_devstate_name(new_state));
  922. return count;
  923. }
  924. status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
  925. sdev->lun, pdev_devmap);
  926. if (status != MYRS_STATUS_SUCCESS)
  927. return -ENXIO;
  928. ldev_num = pdev_devmap->ldev_num;
  929. } else {
  930. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  931. if (ldev_info->dev_state == new_state) {
  932. sdev_printk(KERN_INFO, sdev,
  933. "Device already in %s\n",
  934. myrs_devstate_name(new_state));
  935. return count;
  936. }
  937. ldev_num = ldev_info->ldev_num;
  938. }
  939. mutex_lock(&cs->dcmd_mutex);
  940. cmd_blk = &cs->dcmd_blk;
  941. myrs_reset_cmd(cmd_blk);
  942. mbox = &cmd_blk->mbox;
  943. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  944. mbox->common.id = MYRS_DCMD_TAG;
  945. mbox->common.control.dma_ctrl_to_host = true;
  946. mbox->common.control.no_autosense = true;
  947. mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
  948. mbox->set_devstate.state = new_state;
  949. mbox->set_devstate.ldev.ldev_num = ldev_num;
  950. myrs_exec_cmd(cs, cmd_blk);
  951. status = cmd_blk->status;
  952. mutex_unlock(&cs->dcmd_mutex);
  953. if (status == MYRS_STATUS_SUCCESS) {
  954. if (sdev->channel < cs->ctlr_info->physchan_present) {
  955. struct myrs_pdev_info *pdev_info = sdev->hostdata;
  956. pdev_info->dev_state = new_state;
  957. } else {
  958. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  959. ldev_info->dev_state = new_state;
  960. }
  961. sdev_printk(KERN_INFO, sdev,
  962. "Set device state to %s\n",
  963. myrs_devstate_name(new_state));
  964. return count;
  965. }
  966. sdev_printk(KERN_INFO, sdev,
  967. "Failed to set device state to %s, status 0x%02x\n",
  968. myrs_devstate_name(new_state), status);
  969. return -EINVAL;
  970. }
  971. static DEVICE_ATTR_RW(raid_state);
  972. static ssize_t raid_level_show(struct device *dev,
  973. struct device_attribute *attr, char *buf)
  974. {
  975. struct scsi_device *sdev = to_scsi_device(dev);
  976. struct myrs_hba *cs = shost_priv(sdev->host);
  977. const char *name = NULL;
  978. if (!sdev->hostdata)
  979. return snprintf(buf, 16, "Unknown\n");
  980. if (sdev->channel >= cs->ctlr_info->physchan_present) {
  981. struct myrs_ldev_info *ldev_info;
  982. ldev_info = sdev->hostdata;
  983. name = myrs_raid_level_name(ldev_info->raid_level);
  984. if (!name)
  985. return snprintf(buf, 64, "Invalid (%02X)\n",
  986. ldev_info->dev_state);
  987. } else
  988. name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
  989. return snprintf(buf, 64, "%s\n", name);
  990. }
  991. static DEVICE_ATTR_RO(raid_level);
  992. static ssize_t rebuild_show(struct device *dev,
  993. struct device_attribute *attr, char *buf)
  994. {
  995. struct scsi_device *sdev = to_scsi_device(dev);
  996. struct myrs_hba *cs = shost_priv(sdev->host);
  997. struct myrs_ldev_info *ldev_info;
  998. unsigned short ldev_num;
  999. unsigned char status;
  1000. if (sdev->channel < cs->ctlr_info->physchan_present)
  1001. return snprintf(buf, 64, "physical device - not rebuilding\n");
  1002. ldev_info = sdev->hostdata;
  1003. ldev_num = ldev_info->ldev_num;
  1004. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1005. if (status != MYRS_STATUS_SUCCESS) {
  1006. sdev_printk(KERN_INFO, sdev,
  1007. "Failed to get device information, status 0x%02x\n",
  1008. status);
  1009. return -EIO;
  1010. }
  1011. if (ldev_info->rbld_active) {
  1012. return snprintf(buf, 64, "rebuilding block %zu of %zu\n",
  1013. (size_t)ldev_info->rbld_lba,
  1014. (size_t)ldev_info->cfg_devsize);
  1015. } else
  1016. return snprintf(buf, 64, "not rebuilding\n");
  1017. }
  1018. static ssize_t rebuild_store(struct device *dev,
  1019. struct device_attribute *attr, const char *buf, size_t count)
  1020. {
  1021. struct scsi_device *sdev = to_scsi_device(dev);
  1022. struct myrs_hba *cs = shost_priv(sdev->host);
  1023. struct myrs_ldev_info *ldev_info;
  1024. struct myrs_cmdblk *cmd_blk;
  1025. union myrs_cmd_mbox *mbox;
  1026. unsigned short ldev_num;
  1027. unsigned char status;
  1028. int rebuild, ret;
  1029. if (sdev->channel < cs->ctlr_info->physchan_present)
  1030. return -EINVAL;
  1031. ldev_info = sdev->hostdata;
  1032. if (!ldev_info)
  1033. return -ENXIO;
  1034. ldev_num = ldev_info->ldev_num;
  1035. ret = kstrtoint(buf, 0, &rebuild);
  1036. if (ret)
  1037. return ret;
  1038. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1039. if (status != MYRS_STATUS_SUCCESS) {
  1040. sdev_printk(KERN_INFO, sdev,
  1041. "Failed to get device information, status 0x%02x\n",
  1042. status);
  1043. return -EIO;
  1044. }
  1045. if (rebuild && ldev_info->rbld_active) {
  1046. sdev_printk(KERN_INFO, sdev,
  1047. "Rebuild Not Initiated; already in progress\n");
  1048. return -EALREADY;
  1049. }
  1050. if (!rebuild && !ldev_info->rbld_active) {
  1051. sdev_printk(KERN_INFO, sdev,
  1052. "Rebuild Not Cancelled; no rebuild in progress\n");
  1053. return count;
  1054. }
  1055. mutex_lock(&cs->dcmd_mutex);
  1056. cmd_blk = &cs->dcmd_blk;
  1057. myrs_reset_cmd(cmd_blk);
  1058. mbox = &cmd_blk->mbox;
  1059. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  1060. mbox->common.id = MYRS_DCMD_TAG;
  1061. mbox->common.control.dma_ctrl_to_host = true;
  1062. mbox->common.control.no_autosense = true;
  1063. if (rebuild) {
  1064. mbox->ldev_info.ldev.ldev_num = ldev_num;
  1065. mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
  1066. } else {
  1067. mbox->ldev_info.ldev.ldev_num = ldev_num;
  1068. mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
  1069. }
  1070. myrs_exec_cmd(cs, cmd_blk);
  1071. status = cmd_blk->status;
  1072. mutex_unlock(&cs->dcmd_mutex);
  1073. if (status) {
  1074. sdev_printk(KERN_INFO, sdev,
  1075. "Rebuild Not %s, status 0x%02x\n",
  1076. rebuild ? "Initiated" : "Cancelled", status);
  1077. ret = -EIO;
  1078. } else {
  1079. sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
  1080. rebuild ? "Initiated" : "Cancelled");
  1081. ret = count;
  1082. }
  1083. return ret;
  1084. }
  1085. static DEVICE_ATTR_RW(rebuild);
  1086. static ssize_t consistency_check_show(struct device *dev,
  1087. struct device_attribute *attr, char *buf)
  1088. {
  1089. struct scsi_device *sdev = to_scsi_device(dev);
  1090. struct myrs_hba *cs = shost_priv(sdev->host);
  1091. struct myrs_ldev_info *ldev_info;
  1092. unsigned short ldev_num;
  1093. if (sdev->channel < cs->ctlr_info->physchan_present)
  1094. return snprintf(buf, 64, "physical device - not checking\n");
  1095. ldev_info = sdev->hostdata;
  1096. if (!ldev_info)
  1097. return -ENXIO;
  1098. ldev_num = ldev_info->ldev_num;
  1099. myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1100. if (ldev_info->cc_active)
  1101. return snprintf(buf, 64, "checking block %zu of %zu\n",
  1102. (size_t)ldev_info->cc_lba,
  1103. (size_t)ldev_info->cfg_devsize);
  1104. else
  1105. return snprintf(buf, 64, "not checking\n");
  1106. }
  1107. static ssize_t consistency_check_store(struct device *dev,
  1108. struct device_attribute *attr, const char *buf, size_t count)
  1109. {
  1110. struct scsi_device *sdev = to_scsi_device(dev);
  1111. struct myrs_hba *cs = shost_priv(sdev->host);
  1112. struct myrs_ldev_info *ldev_info;
  1113. struct myrs_cmdblk *cmd_blk;
  1114. union myrs_cmd_mbox *mbox;
  1115. unsigned short ldev_num;
  1116. unsigned char status;
  1117. int check, ret;
  1118. if (sdev->channel < cs->ctlr_info->physchan_present)
  1119. return -EINVAL;
  1120. ldev_info = sdev->hostdata;
  1121. if (!ldev_info)
  1122. return -ENXIO;
  1123. ldev_num = ldev_info->ldev_num;
  1124. ret = kstrtoint(buf, 0, &check);
  1125. if (ret)
  1126. return ret;
  1127. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1128. if (status != MYRS_STATUS_SUCCESS) {
  1129. sdev_printk(KERN_INFO, sdev,
  1130. "Failed to get device information, status 0x%02x\n",
  1131. status);
  1132. return -EIO;
  1133. }
  1134. if (check && ldev_info->cc_active) {
  1135. sdev_printk(KERN_INFO, sdev,
  1136. "Consistency Check Not Initiated; "
  1137. "already in progress\n");
  1138. return -EALREADY;
  1139. }
  1140. if (!check && !ldev_info->cc_active) {
  1141. sdev_printk(KERN_INFO, sdev,
  1142. "Consistency Check Not Cancelled; "
  1143. "check not in progress\n");
  1144. return count;
  1145. }
  1146. mutex_lock(&cs->dcmd_mutex);
  1147. cmd_blk = &cs->dcmd_blk;
  1148. myrs_reset_cmd(cmd_blk);
  1149. mbox = &cmd_blk->mbox;
  1150. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  1151. mbox->common.id = MYRS_DCMD_TAG;
  1152. mbox->common.control.dma_ctrl_to_host = true;
  1153. mbox->common.control.no_autosense = true;
  1154. if (check) {
  1155. mbox->cc.ldev.ldev_num = ldev_num;
  1156. mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
  1157. mbox->cc.restore_consistency = true;
  1158. mbox->cc.initialized_area_only = false;
  1159. } else {
  1160. mbox->cc.ldev.ldev_num = ldev_num;
  1161. mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
  1162. }
  1163. myrs_exec_cmd(cs, cmd_blk);
  1164. status = cmd_blk->status;
  1165. mutex_unlock(&cs->dcmd_mutex);
  1166. if (status != MYRS_STATUS_SUCCESS) {
  1167. sdev_printk(KERN_INFO, sdev,
  1168. "Consistency Check Not %s, status 0x%02x\n",
  1169. check ? "Initiated" : "Cancelled", status);
  1170. ret = -EIO;
  1171. } else {
  1172. sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
  1173. check ? "Initiated" : "Cancelled");
  1174. ret = count;
  1175. }
  1176. return ret;
  1177. }
  1178. static DEVICE_ATTR_RW(consistency_check);
  1179. static struct attribute *myrs_sdev_attrs[] = {
  1180. &dev_attr_consistency_check.attr,
  1181. &dev_attr_rebuild.attr,
  1182. &dev_attr_raid_state.attr,
  1183. &dev_attr_raid_level.attr,
  1184. NULL,
  1185. };
  1186. ATTRIBUTE_GROUPS(myrs_sdev);
  1187. static ssize_t serial_show(struct device *dev,
  1188. struct device_attribute *attr, char *buf)
  1189. {
  1190. struct Scsi_Host *shost = class_to_shost(dev);
  1191. struct myrs_hba *cs = shost_priv(shost);
  1192. char serial[17];
  1193. memcpy(serial, cs->ctlr_info->serial_number, 16);
  1194. serial[16] = '\0';
  1195. return snprintf(buf, 16, "%s\n", serial);
  1196. }
  1197. static DEVICE_ATTR_RO(serial);
  1198. static ssize_t ctlr_num_show(struct device *dev,
  1199. struct device_attribute *attr, char *buf)
  1200. {
  1201. struct Scsi_Host *shost = class_to_shost(dev);
  1202. struct myrs_hba *cs = shost_priv(shost);
  1203. return snprintf(buf, 20, "%d\n", cs->host->host_no);
  1204. }
  1205. static DEVICE_ATTR_RO(ctlr_num);
  1206. static struct myrs_cpu_type_tbl {
  1207. enum myrs_cpu_type type;
  1208. char *name;
  1209. } myrs_cpu_type_names[] = {
  1210. { MYRS_CPUTYPE_i960CA, "i960CA" },
  1211. { MYRS_CPUTYPE_i960RD, "i960RD" },
  1212. { MYRS_CPUTYPE_i960RN, "i960RN" },
  1213. { MYRS_CPUTYPE_i960RP, "i960RP" },
  1214. { MYRS_CPUTYPE_NorthBay, "NorthBay" },
  1215. { MYRS_CPUTYPE_StrongArm, "StrongARM" },
  1216. { MYRS_CPUTYPE_i960RM, "i960RM" },
  1217. };
  1218. static ssize_t processor_show(struct device *dev,
  1219. struct device_attribute *attr, char *buf)
  1220. {
  1221. struct Scsi_Host *shost = class_to_shost(dev);
  1222. struct myrs_hba *cs = shost_priv(shost);
  1223. struct myrs_cpu_type_tbl *tbl;
  1224. const char *first_processor = NULL;
  1225. const char *second_processor = NULL;
  1226. struct myrs_ctlr_info *info = cs->ctlr_info;
  1227. ssize_t ret;
  1228. int i;
  1229. if (info->cpu[0].cpu_count) {
  1230. tbl = myrs_cpu_type_names;
  1231. for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
  1232. if (tbl[i].type == info->cpu[0].cpu_type) {
  1233. first_processor = tbl[i].name;
  1234. break;
  1235. }
  1236. }
  1237. }
  1238. if (info->cpu[1].cpu_count) {
  1239. tbl = myrs_cpu_type_names;
  1240. for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
  1241. if (tbl[i].type == info->cpu[1].cpu_type) {
  1242. second_processor = tbl[i].name;
  1243. break;
  1244. }
  1245. }
  1246. }
  1247. if (first_processor && second_processor)
  1248. ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
  1249. "2: %s (%s, %d cpus)\n",
  1250. info->cpu[0].cpu_name,
  1251. first_processor, info->cpu[0].cpu_count,
  1252. info->cpu[1].cpu_name,
  1253. second_processor, info->cpu[1].cpu_count);
  1254. else if (first_processor && !second_processor)
  1255. ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
  1256. info->cpu[0].cpu_name,
  1257. first_processor, info->cpu[0].cpu_count);
  1258. else if (!first_processor && second_processor)
  1259. ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
  1260. info->cpu[1].cpu_name,
  1261. second_processor, info->cpu[1].cpu_count);
  1262. else
  1263. ret = snprintf(buf, 64, "1: absent\n2: absent\n");
  1264. return ret;
  1265. }
  1266. static DEVICE_ATTR_RO(processor);
  1267. static ssize_t model_show(struct device *dev,
  1268. struct device_attribute *attr, char *buf)
  1269. {
  1270. struct Scsi_Host *shost = class_to_shost(dev);
  1271. struct myrs_hba *cs = shost_priv(shost);
  1272. return snprintf(buf, 28, "%s\n", cs->model_name);
  1273. }
  1274. static DEVICE_ATTR_RO(model);
  1275. static ssize_t ctlr_type_show(struct device *dev,
  1276. struct device_attribute *attr, char *buf)
  1277. {
  1278. struct Scsi_Host *shost = class_to_shost(dev);
  1279. struct myrs_hba *cs = shost_priv(shost);
  1280. return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
  1281. }
  1282. static DEVICE_ATTR_RO(ctlr_type);
  1283. static ssize_t cache_size_show(struct device *dev,
  1284. struct device_attribute *attr, char *buf)
  1285. {
  1286. struct Scsi_Host *shost = class_to_shost(dev);
  1287. struct myrs_hba *cs = shost_priv(shost);
  1288. return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
  1289. }
  1290. static DEVICE_ATTR_RO(cache_size);
  1291. static ssize_t firmware_show(struct device *dev,
  1292. struct device_attribute *attr, char *buf)
  1293. {
  1294. struct Scsi_Host *shost = class_to_shost(dev);
  1295. struct myrs_hba *cs = shost_priv(shost);
  1296. return snprintf(buf, 16, "%d.%02d-%02d\n",
  1297. cs->ctlr_info->fw_major_version,
  1298. cs->ctlr_info->fw_minor_version,
  1299. cs->ctlr_info->fw_turn_number);
  1300. }
  1301. static DEVICE_ATTR_RO(firmware);
  1302. static ssize_t discovery_store(struct device *dev,
  1303. struct device_attribute *attr, const char *buf, size_t count)
  1304. {
  1305. struct Scsi_Host *shost = class_to_shost(dev);
  1306. struct myrs_hba *cs = shost_priv(shost);
  1307. struct myrs_cmdblk *cmd_blk;
  1308. union myrs_cmd_mbox *mbox;
  1309. unsigned char status;
  1310. mutex_lock(&cs->dcmd_mutex);
  1311. cmd_blk = &cs->dcmd_blk;
  1312. myrs_reset_cmd(cmd_blk);
  1313. mbox = &cmd_blk->mbox;
  1314. mbox->common.opcode = MYRS_CMD_OP_IOCTL;
  1315. mbox->common.id = MYRS_DCMD_TAG;
  1316. mbox->common.control.dma_ctrl_to_host = true;
  1317. mbox->common.control.no_autosense = true;
  1318. mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
  1319. myrs_exec_cmd(cs, cmd_blk);
  1320. status = cmd_blk->status;
  1321. mutex_unlock(&cs->dcmd_mutex);
  1322. if (status != MYRS_STATUS_SUCCESS) {
  1323. shost_printk(KERN_INFO, shost,
  1324. "Discovery Not Initiated, status %02X\n",
  1325. status);
  1326. return -EINVAL;
  1327. }
  1328. shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
  1329. cs->next_evseq = 0;
  1330. cs->needs_update = true;
  1331. queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
  1332. flush_delayed_work(&cs->monitor_work);
  1333. shost_printk(KERN_INFO, shost, "Discovery Completed\n");
  1334. return count;
  1335. }
  1336. static DEVICE_ATTR_WO(discovery);
  1337. static ssize_t flush_cache_store(struct device *dev,
  1338. struct device_attribute *attr, const char *buf, size_t count)
  1339. {
  1340. struct Scsi_Host *shost = class_to_shost(dev);
  1341. struct myrs_hba *cs = shost_priv(shost);
  1342. unsigned char status;
  1343. status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
  1344. MYRS_RAID_CONTROLLER);
  1345. if (status == MYRS_STATUS_SUCCESS) {
  1346. shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
  1347. return count;
  1348. }
  1349. shost_printk(KERN_INFO, shost,
  1350. "Cache Flush failed, status 0x%02x\n", status);
  1351. return -EIO;
  1352. }
  1353. static DEVICE_ATTR_WO(flush_cache);
  1354. static ssize_t disable_enclosure_messages_show(struct device *dev,
  1355. struct device_attribute *attr, char *buf)
  1356. {
  1357. struct Scsi_Host *shost = class_to_shost(dev);
  1358. struct myrs_hba *cs = shost_priv(shost);
  1359. return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
  1360. }
  1361. static ssize_t disable_enclosure_messages_store(struct device *dev,
  1362. struct device_attribute *attr, const char *buf, size_t count)
  1363. {
  1364. struct scsi_device *sdev = to_scsi_device(dev);
  1365. struct myrs_hba *cs = shost_priv(sdev->host);
  1366. int value, ret;
  1367. ret = kstrtoint(buf, 0, &value);
  1368. if (ret)
  1369. return ret;
  1370. if (value > 2)
  1371. return -EINVAL;
  1372. cs->disable_enc_msg = value;
  1373. return count;
  1374. }
  1375. static DEVICE_ATTR_RW(disable_enclosure_messages);
  1376. static struct attribute *myrs_shost_attrs[] = {
  1377. &dev_attr_serial.attr,
  1378. &dev_attr_ctlr_num.attr,
  1379. &dev_attr_processor.attr,
  1380. &dev_attr_model.attr,
  1381. &dev_attr_ctlr_type.attr,
  1382. &dev_attr_cache_size.attr,
  1383. &dev_attr_firmware.attr,
  1384. &dev_attr_discovery.attr,
  1385. &dev_attr_flush_cache.attr,
  1386. &dev_attr_disable_enclosure_messages.attr,
  1387. NULL,
  1388. };
  1389. ATTRIBUTE_GROUPS(myrs_shost);
  1390. /*
  1391. * SCSI midlayer interface
  1392. */
  1393. static int myrs_host_reset(struct scsi_cmnd *scmd)
  1394. {
  1395. struct Scsi_Host *shost = scmd->device->host;
  1396. struct myrs_hba *cs = shost_priv(shost);
  1397. cs->reset(cs->io_base);
  1398. return SUCCESS;
  1399. }
  1400. static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
  1401. struct myrs_ldev_info *ldev_info)
  1402. {
  1403. unsigned char modes[32], *mode_pg;
  1404. bool dbd;
  1405. size_t mode_len;
  1406. dbd = (scmd->cmnd[1] & 0x08) == 0x08;
  1407. if (dbd) {
  1408. mode_len = 24;
  1409. mode_pg = &modes[4];
  1410. } else {
  1411. mode_len = 32;
  1412. mode_pg = &modes[12];
  1413. }
  1414. memset(modes, 0, sizeof(modes));
  1415. modes[0] = mode_len - 1;
  1416. modes[2] = 0x10; /* Enable FUA */
  1417. if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
  1418. modes[2] |= 0x80;
  1419. if (!dbd) {
  1420. unsigned char *block_desc = &modes[4];
  1421. modes[3] = 8;
  1422. put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
  1423. put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
  1424. }
  1425. mode_pg[0] = 0x08;
  1426. mode_pg[1] = 0x12;
  1427. if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
  1428. mode_pg[2] |= 0x01;
  1429. if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
  1430. ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
  1431. mode_pg[2] |= 0x04;
  1432. if (ldev_info->cacheline_size) {
  1433. mode_pg[2] |= 0x08;
  1434. put_unaligned_be16(1 << ldev_info->cacheline_size,
  1435. &mode_pg[14]);
  1436. }
  1437. scsi_sg_copy_from_buffer(scmd, modes, mode_len);
  1438. }
  1439. static int myrs_queuecommand(struct Scsi_Host *shost,
  1440. struct scsi_cmnd *scmd)
  1441. {
  1442. struct request *rq = scsi_cmd_to_rq(scmd);
  1443. struct myrs_hba *cs = shost_priv(shost);
  1444. struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
  1445. union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  1446. struct scsi_device *sdev = scmd->device;
  1447. union myrs_sgl *hw_sge;
  1448. dma_addr_t sense_addr;
  1449. struct scatterlist *sgl;
  1450. unsigned long flags, timeout;
  1451. int nsge;
  1452. if (!scmd->device->hostdata) {
  1453. scmd->result = (DID_NO_CONNECT << 16);
  1454. scsi_done(scmd);
  1455. return 0;
  1456. }
  1457. switch (scmd->cmnd[0]) {
  1458. case REPORT_LUNS:
  1459. scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
  1460. scsi_done(scmd);
  1461. return 0;
  1462. case MODE_SENSE:
  1463. if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
  1464. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1465. if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
  1466. (scmd->cmnd[2] & 0x3F) != 0x08) {
  1467. /* Illegal request, invalid field in CDB */
  1468. scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
  1469. } else {
  1470. myrs_mode_sense(cs, scmd, ldev_info);
  1471. scmd->result = (DID_OK << 16);
  1472. }
  1473. scsi_done(scmd);
  1474. return 0;
  1475. }
  1476. break;
  1477. }
  1478. myrs_reset_cmd(cmd_blk);
  1479. cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
  1480. &sense_addr);
  1481. if (!cmd_blk->sense)
  1482. return SCSI_MLQUEUE_HOST_BUSY;
  1483. cmd_blk->sense_addr = sense_addr;
  1484. timeout = rq->timeout;
  1485. if (scmd->cmd_len <= 10) {
  1486. if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
  1487. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1488. mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
  1489. mbox->SCSI_10.pdev.lun = ldev_info->lun;
  1490. mbox->SCSI_10.pdev.target = ldev_info->target;
  1491. mbox->SCSI_10.pdev.channel = ldev_info->channel;
  1492. mbox->SCSI_10.pdev.ctlr = 0;
  1493. } else {
  1494. mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
  1495. mbox->SCSI_10.pdev.lun = sdev->lun;
  1496. mbox->SCSI_10.pdev.target = sdev->id;
  1497. mbox->SCSI_10.pdev.channel = sdev->channel;
  1498. }
  1499. mbox->SCSI_10.id = rq->tag + 3;
  1500. mbox->SCSI_10.control.dma_ctrl_to_host =
  1501. (scmd->sc_data_direction == DMA_FROM_DEVICE);
  1502. if (rq->cmd_flags & REQ_FUA)
  1503. mbox->SCSI_10.control.fua = true;
  1504. mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
  1505. mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
  1506. mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
  1507. mbox->SCSI_10.cdb_len = scmd->cmd_len;
  1508. if (timeout > 60) {
  1509. mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
  1510. mbox->SCSI_10.tmo.tmo_val = timeout / 60;
  1511. } else {
  1512. mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
  1513. mbox->SCSI_10.tmo.tmo_val = timeout;
  1514. }
  1515. memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
  1516. hw_sge = &mbox->SCSI_10.dma_addr;
  1517. cmd_blk->dcdb = NULL;
  1518. } else {
  1519. dma_addr_t dcdb_dma;
  1520. cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
  1521. &dcdb_dma);
  1522. if (!cmd_blk->dcdb) {
  1523. dma_pool_free(cs->sense_pool, cmd_blk->sense,
  1524. cmd_blk->sense_addr);
  1525. cmd_blk->sense = NULL;
  1526. cmd_blk->sense_addr = 0;
  1527. return SCSI_MLQUEUE_HOST_BUSY;
  1528. }
  1529. cmd_blk->dcdb_dma = dcdb_dma;
  1530. if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
  1531. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1532. mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
  1533. mbox->SCSI_255.pdev.lun = ldev_info->lun;
  1534. mbox->SCSI_255.pdev.target = ldev_info->target;
  1535. mbox->SCSI_255.pdev.channel = ldev_info->channel;
  1536. mbox->SCSI_255.pdev.ctlr = 0;
  1537. } else {
  1538. mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
  1539. mbox->SCSI_255.pdev.lun = sdev->lun;
  1540. mbox->SCSI_255.pdev.target = sdev->id;
  1541. mbox->SCSI_255.pdev.channel = sdev->channel;
  1542. }
  1543. mbox->SCSI_255.id = rq->tag + 3;
  1544. mbox->SCSI_255.control.dma_ctrl_to_host =
  1545. (scmd->sc_data_direction == DMA_FROM_DEVICE);
  1546. if (rq->cmd_flags & REQ_FUA)
  1547. mbox->SCSI_255.control.fua = true;
  1548. mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
  1549. mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
  1550. mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
  1551. mbox->SCSI_255.cdb_len = scmd->cmd_len;
  1552. mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
  1553. if (timeout > 60) {
  1554. mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
  1555. mbox->SCSI_255.tmo.tmo_val = timeout / 60;
  1556. } else {
  1557. mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
  1558. mbox->SCSI_255.tmo.tmo_val = timeout;
  1559. }
  1560. memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
  1561. hw_sge = &mbox->SCSI_255.dma_addr;
  1562. }
  1563. if (scmd->sc_data_direction == DMA_NONE)
  1564. goto submit;
  1565. nsge = scsi_dma_map(scmd);
  1566. if (nsge == 1) {
  1567. sgl = scsi_sglist(scmd);
  1568. hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
  1569. hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
  1570. } else {
  1571. struct myrs_sge *hw_sgl;
  1572. dma_addr_t hw_sgl_addr;
  1573. int i;
  1574. if (nsge > 2) {
  1575. hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
  1576. &hw_sgl_addr);
  1577. if (WARN_ON(!hw_sgl)) {
  1578. if (cmd_blk->dcdb) {
  1579. dma_pool_free(cs->dcdb_pool,
  1580. cmd_blk->dcdb,
  1581. cmd_blk->dcdb_dma);
  1582. cmd_blk->dcdb = NULL;
  1583. cmd_blk->dcdb_dma = 0;
  1584. }
  1585. dma_pool_free(cs->sense_pool,
  1586. cmd_blk->sense,
  1587. cmd_blk->sense_addr);
  1588. cmd_blk->sense = NULL;
  1589. cmd_blk->sense_addr = 0;
  1590. return SCSI_MLQUEUE_HOST_BUSY;
  1591. }
  1592. cmd_blk->sgl = hw_sgl;
  1593. cmd_blk->sgl_addr = hw_sgl_addr;
  1594. if (scmd->cmd_len <= 10)
  1595. mbox->SCSI_10.control.add_sge_mem = true;
  1596. else
  1597. mbox->SCSI_255.control.add_sge_mem = true;
  1598. hw_sge->ext.sge0_len = nsge;
  1599. hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
  1600. } else
  1601. hw_sgl = hw_sge->sge;
  1602. scsi_for_each_sg(scmd, sgl, nsge, i) {
  1603. if (WARN_ON(!hw_sgl)) {
  1604. scsi_dma_unmap(scmd);
  1605. scmd->result = (DID_ERROR << 16);
  1606. scsi_done(scmd);
  1607. return 0;
  1608. }
  1609. hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
  1610. hw_sgl->sge_count = (u64)sg_dma_len(sgl);
  1611. hw_sgl++;
  1612. }
  1613. }
  1614. submit:
  1615. spin_lock_irqsave(&cs->queue_lock, flags);
  1616. myrs_qcmd(cs, cmd_blk);
  1617. spin_unlock_irqrestore(&cs->queue_lock, flags);
  1618. return 0;
  1619. }
  1620. static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
  1621. struct scsi_device *sdev)
  1622. {
  1623. unsigned short ldev_num;
  1624. unsigned int chan_offset =
  1625. sdev->channel - cs->ctlr_info->physchan_present;
  1626. ldev_num = sdev->id + chan_offset * sdev->host->max_id;
  1627. return ldev_num;
  1628. }
  1629. static int myrs_slave_alloc(struct scsi_device *sdev)
  1630. {
  1631. struct myrs_hba *cs = shost_priv(sdev->host);
  1632. unsigned char status;
  1633. if (sdev->channel > sdev->host->max_channel)
  1634. return 0;
  1635. if (sdev->channel >= cs->ctlr_info->physchan_present) {
  1636. struct myrs_ldev_info *ldev_info;
  1637. unsigned short ldev_num;
  1638. if (sdev->lun > 0)
  1639. return -ENXIO;
  1640. ldev_num = myrs_translate_ldev(cs, sdev);
  1641. ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
  1642. if (!ldev_info)
  1643. return -ENOMEM;
  1644. status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1645. if (status != MYRS_STATUS_SUCCESS) {
  1646. sdev->hostdata = NULL;
  1647. kfree(ldev_info);
  1648. } else {
  1649. enum raid_level level;
  1650. dev_dbg(&sdev->sdev_gendev,
  1651. "Logical device mapping %d:%d:%d -> %d\n",
  1652. ldev_info->channel, ldev_info->target,
  1653. ldev_info->lun, ldev_info->ldev_num);
  1654. sdev->hostdata = ldev_info;
  1655. switch (ldev_info->raid_level) {
  1656. case MYRS_RAID_LEVEL0:
  1657. level = RAID_LEVEL_LINEAR;
  1658. break;
  1659. case MYRS_RAID_LEVEL1:
  1660. level = RAID_LEVEL_1;
  1661. break;
  1662. case MYRS_RAID_LEVEL3:
  1663. case MYRS_RAID_LEVEL3F:
  1664. case MYRS_RAID_LEVEL3L:
  1665. level = RAID_LEVEL_3;
  1666. break;
  1667. case MYRS_RAID_LEVEL5:
  1668. case MYRS_RAID_LEVEL5L:
  1669. level = RAID_LEVEL_5;
  1670. break;
  1671. case MYRS_RAID_LEVEL6:
  1672. level = RAID_LEVEL_6;
  1673. break;
  1674. case MYRS_RAID_LEVELE:
  1675. case MYRS_RAID_NEWSPAN:
  1676. case MYRS_RAID_SPAN:
  1677. level = RAID_LEVEL_LINEAR;
  1678. break;
  1679. case MYRS_RAID_JBOD:
  1680. level = RAID_LEVEL_JBOD;
  1681. break;
  1682. default:
  1683. level = RAID_LEVEL_UNKNOWN;
  1684. break;
  1685. }
  1686. raid_set_level(myrs_raid_template,
  1687. &sdev->sdev_gendev, level);
  1688. if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
  1689. const char *name;
  1690. name = myrs_devstate_name(ldev_info->dev_state);
  1691. sdev_printk(KERN_DEBUG, sdev,
  1692. "logical device in state %s\n",
  1693. name ? name : "Invalid");
  1694. }
  1695. }
  1696. } else {
  1697. struct myrs_pdev_info *pdev_info;
  1698. pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
  1699. if (!pdev_info)
  1700. return -ENOMEM;
  1701. status = myrs_get_pdev_info(cs, sdev->channel,
  1702. sdev->id, sdev->lun,
  1703. pdev_info);
  1704. if (status != MYRS_STATUS_SUCCESS) {
  1705. sdev->hostdata = NULL;
  1706. kfree(pdev_info);
  1707. return -ENXIO;
  1708. }
  1709. sdev->hostdata = pdev_info;
  1710. }
  1711. return 0;
  1712. }
  1713. static int myrs_slave_configure(struct scsi_device *sdev)
  1714. {
  1715. struct myrs_hba *cs = shost_priv(sdev->host);
  1716. struct myrs_ldev_info *ldev_info;
  1717. if (sdev->channel > sdev->host->max_channel)
  1718. return -ENXIO;
  1719. if (sdev->channel < cs->ctlr_info->physchan_present) {
  1720. /* Skip HBA device */
  1721. if (sdev->type == TYPE_RAID)
  1722. return -ENXIO;
  1723. sdev->no_uld_attach = 1;
  1724. return 0;
  1725. }
  1726. if (sdev->lun != 0)
  1727. return -ENXIO;
  1728. ldev_info = sdev->hostdata;
  1729. if (!ldev_info)
  1730. return -ENXIO;
  1731. if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
  1732. ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
  1733. sdev->wce_default_on = 1;
  1734. sdev->tagged_supported = 1;
  1735. return 0;
  1736. }
  1737. static void myrs_slave_destroy(struct scsi_device *sdev)
  1738. {
  1739. kfree(sdev->hostdata);
  1740. }
  1741. static const struct scsi_host_template myrs_template = {
  1742. .module = THIS_MODULE,
  1743. .name = "DAC960",
  1744. .proc_name = "myrs",
  1745. .queuecommand = myrs_queuecommand,
  1746. .eh_host_reset_handler = myrs_host_reset,
  1747. .slave_alloc = myrs_slave_alloc,
  1748. .slave_configure = myrs_slave_configure,
  1749. .slave_destroy = myrs_slave_destroy,
  1750. .cmd_size = sizeof(struct myrs_cmdblk),
  1751. .shost_groups = myrs_shost_groups,
  1752. .sdev_groups = myrs_sdev_groups,
  1753. .this_id = -1,
  1754. };
  1755. static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
  1756. const struct pci_device_id *entry)
  1757. {
  1758. struct Scsi_Host *shost;
  1759. struct myrs_hba *cs;
  1760. shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
  1761. if (!shost)
  1762. return NULL;
  1763. shost->max_cmd_len = 16;
  1764. shost->max_lun = 256;
  1765. cs = shost_priv(shost);
  1766. mutex_init(&cs->dcmd_mutex);
  1767. mutex_init(&cs->cinfo_mutex);
  1768. cs->host = shost;
  1769. return cs;
  1770. }
  1771. /*
  1772. * RAID template functions
  1773. */
  1774. /**
  1775. * myrs_is_raid - return boolean indicating device is raid volume
  1776. * @dev: the device struct object
  1777. */
  1778. static int
  1779. myrs_is_raid(struct device *dev)
  1780. {
  1781. struct scsi_device *sdev = to_scsi_device(dev);
  1782. struct myrs_hba *cs = shost_priv(sdev->host);
  1783. return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
  1784. }
  1785. /**
  1786. * myrs_get_resync - get raid volume resync percent complete
  1787. * @dev: the device struct object
  1788. */
  1789. static void
  1790. myrs_get_resync(struct device *dev)
  1791. {
  1792. struct scsi_device *sdev = to_scsi_device(dev);
  1793. struct myrs_hba *cs = shost_priv(sdev->host);
  1794. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1795. u64 percent_complete = 0;
  1796. if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
  1797. return;
  1798. if (ldev_info->rbld_active) {
  1799. unsigned short ldev_num = ldev_info->ldev_num;
  1800. myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1801. percent_complete = ldev_info->rbld_lba * 100;
  1802. do_div(percent_complete, ldev_info->cfg_devsize);
  1803. }
  1804. raid_set_resync(myrs_raid_template, dev, percent_complete);
  1805. }
  1806. /**
  1807. * myrs_get_state - get raid volume status
  1808. * @dev: the device struct object
  1809. */
  1810. static void
  1811. myrs_get_state(struct device *dev)
  1812. {
  1813. struct scsi_device *sdev = to_scsi_device(dev);
  1814. struct myrs_hba *cs = shost_priv(sdev->host);
  1815. struct myrs_ldev_info *ldev_info = sdev->hostdata;
  1816. enum raid_state state = RAID_STATE_UNKNOWN;
  1817. if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
  1818. state = RAID_STATE_UNKNOWN;
  1819. else {
  1820. switch (ldev_info->dev_state) {
  1821. case MYRS_DEVICE_ONLINE:
  1822. state = RAID_STATE_ACTIVE;
  1823. break;
  1824. case MYRS_DEVICE_SUSPECTED_CRITICAL:
  1825. case MYRS_DEVICE_CRITICAL:
  1826. state = RAID_STATE_DEGRADED;
  1827. break;
  1828. case MYRS_DEVICE_REBUILD:
  1829. state = RAID_STATE_RESYNCING;
  1830. break;
  1831. case MYRS_DEVICE_UNCONFIGURED:
  1832. case MYRS_DEVICE_INVALID_STATE:
  1833. state = RAID_STATE_UNKNOWN;
  1834. break;
  1835. default:
  1836. state = RAID_STATE_OFFLINE;
  1837. }
  1838. }
  1839. raid_set_state(myrs_raid_template, dev, state);
  1840. }
  1841. static struct raid_function_template myrs_raid_functions = {
  1842. .cookie = &myrs_template,
  1843. .is_raid = myrs_is_raid,
  1844. .get_resync = myrs_get_resync,
  1845. .get_state = myrs_get_state,
  1846. };
  1847. /*
  1848. * PCI interface functions
  1849. */
  1850. static void myrs_flush_cache(struct myrs_hba *cs)
  1851. {
  1852. myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
  1853. }
  1854. static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
  1855. struct scsi_cmnd *scmd)
  1856. {
  1857. unsigned char status;
  1858. if (!cmd_blk)
  1859. return;
  1860. scsi_dma_unmap(scmd);
  1861. status = cmd_blk->status;
  1862. if (cmd_blk->sense) {
  1863. if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
  1864. unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
  1865. if (sense_len > cmd_blk->sense_len)
  1866. sense_len = cmd_blk->sense_len;
  1867. memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
  1868. }
  1869. dma_pool_free(cs->sense_pool, cmd_blk->sense,
  1870. cmd_blk->sense_addr);
  1871. cmd_blk->sense = NULL;
  1872. cmd_blk->sense_addr = 0;
  1873. }
  1874. if (cmd_blk->dcdb) {
  1875. dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
  1876. cmd_blk->dcdb_dma);
  1877. cmd_blk->dcdb = NULL;
  1878. cmd_blk->dcdb_dma = 0;
  1879. }
  1880. if (cmd_blk->sgl) {
  1881. dma_pool_free(cs->sg_pool, cmd_blk->sgl,
  1882. cmd_blk->sgl_addr);
  1883. cmd_blk->sgl = NULL;
  1884. cmd_blk->sgl_addr = 0;
  1885. }
  1886. if (cmd_blk->residual)
  1887. scsi_set_resid(scmd, cmd_blk->residual);
  1888. if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
  1889. status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
  1890. scmd->result = (DID_BAD_TARGET << 16);
  1891. else
  1892. scmd->result = (DID_OK << 16) | status;
  1893. scsi_done(scmd);
  1894. }
  1895. static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
  1896. {
  1897. if (!cmd_blk)
  1898. return;
  1899. if (cmd_blk->complete) {
  1900. complete(cmd_blk->complete);
  1901. cmd_blk->complete = NULL;
  1902. }
  1903. }
  1904. static void myrs_monitor(struct work_struct *work)
  1905. {
  1906. struct myrs_hba *cs = container_of(work, struct myrs_hba,
  1907. monitor_work.work);
  1908. struct Scsi_Host *shost = cs->host;
  1909. struct myrs_ctlr_info *info = cs->ctlr_info;
  1910. unsigned int epoch = cs->fwstat_buf->epoch;
  1911. unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
  1912. unsigned char status;
  1913. dev_dbg(&shost->shost_gendev, "monitor tick\n");
  1914. status = myrs_get_fwstatus(cs);
  1915. if (cs->needs_update) {
  1916. cs->needs_update = false;
  1917. mutex_lock(&cs->cinfo_mutex);
  1918. status = myrs_get_ctlr_info(cs);
  1919. mutex_unlock(&cs->cinfo_mutex);
  1920. }
  1921. if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
  1922. status = myrs_get_event(cs, cs->next_evseq,
  1923. cs->event_buf);
  1924. if (status == MYRS_STATUS_SUCCESS) {
  1925. myrs_log_event(cs, cs->event_buf);
  1926. cs->next_evseq++;
  1927. interval = 1;
  1928. }
  1929. }
  1930. if (time_after(jiffies, cs->secondary_monitor_time
  1931. + MYRS_SECONDARY_MONITOR_INTERVAL))
  1932. cs->secondary_monitor_time = jiffies;
  1933. if (info->bg_init_active +
  1934. info->ldev_init_active +
  1935. info->pdev_init_active +
  1936. info->cc_active +
  1937. info->rbld_active +
  1938. info->exp_active != 0) {
  1939. struct scsi_device *sdev;
  1940. shost_for_each_device(sdev, shost) {
  1941. struct myrs_ldev_info *ldev_info;
  1942. int ldev_num;
  1943. if (sdev->channel < info->physchan_present)
  1944. continue;
  1945. ldev_info = sdev->hostdata;
  1946. if (!ldev_info)
  1947. continue;
  1948. ldev_num = ldev_info->ldev_num;
  1949. myrs_get_ldev_info(cs, ldev_num, ldev_info);
  1950. }
  1951. cs->needs_update = true;
  1952. }
  1953. if (epoch == cs->epoch &&
  1954. cs->fwstat_buf->next_evseq == cs->next_evseq &&
  1955. (cs->needs_update == false ||
  1956. time_before(jiffies, cs->primary_monitor_time
  1957. + MYRS_PRIMARY_MONITOR_INTERVAL))) {
  1958. interval = MYRS_SECONDARY_MONITOR_INTERVAL;
  1959. }
  1960. if (interval > 1)
  1961. cs->primary_monitor_time = jiffies;
  1962. queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
  1963. }
  1964. static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
  1965. {
  1966. struct Scsi_Host *shost = cs->host;
  1967. size_t elem_size, elem_align;
  1968. elem_align = sizeof(struct myrs_sge);
  1969. elem_size = shost->sg_tablesize * elem_align;
  1970. cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
  1971. elem_size, elem_align, 0);
  1972. if (cs->sg_pool == NULL) {
  1973. shost_printk(KERN_ERR, shost,
  1974. "Failed to allocate SG pool\n");
  1975. return false;
  1976. }
  1977. cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
  1978. MYRS_SENSE_SIZE, sizeof(int), 0);
  1979. if (cs->sense_pool == NULL) {
  1980. dma_pool_destroy(cs->sg_pool);
  1981. cs->sg_pool = NULL;
  1982. shost_printk(KERN_ERR, shost,
  1983. "Failed to allocate sense data pool\n");
  1984. return false;
  1985. }
  1986. cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
  1987. MYRS_DCDB_SIZE,
  1988. sizeof(unsigned char), 0);
  1989. if (!cs->dcdb_pool) {
  1990. dma_pool_destroy(cs->sg_pool);
  1991. cs->sg_pool = NULL;
  1992. dma_pool_destroy(cs->sense_pool);
  1993. cs->sense_pool = NULL;
  1994. shost_printk(KERN_ERR, shost,
  1995. "Failed to allocate DCDB pool\n");
  1996. return false;
  1997. }
  1998. cs->work_q = alloc_ordered_workqueue("myrs_wq_%d", WQ_MEM_RECLAIM,
  1999. shost->host_no);
  2000. if (!cs->work_q) {
  2001. dma_pool_destroy(cs->dcdb_pool);
  2002. cs->dcdb_pool = NULL;
  2003. dma_pool_destroy(cs->sg_pool);
  2004. cs->sg_pool = NULL;
  2005. dma_pool_destroy(cs->sense_pool);
  2006. cs->sense_pool = NULL;
  2007. shost_printk(KERN_ERR, shost,
  2008. "Failed to create workqueue\n");
  2009. return false;
  2010. }
  2011. /* Initialize the Monitoring Timer. */
  2012. INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
  2013. queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
  2014. return true;
  2015. }
  2016. static void myrs_destroy_mempools(struct myrs_hba *cs)
  2017. {
  2018. cancel_delayed_work_sync(&cs->monitor_work);
  2019. destroy_workqueue(cs->work_q);
  2020. dma_pool_destroy(cs->sg_pool);
  2021. dma_pool_destroy(cs->dcdb_pool);
  2022. dma_pool_destroy(cs->sense_pool);
  2023. }
  2024. static void myrs_unmap(struct myrs_hba *cs)
  2025. {
  2026. kfree(cs->event_buf);
  2027. kfree(cs->ctlr_info);
  2028. if (cs->fwstat_buf) {
  2029. dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
  2030. cs->fwstat_buf, cs->fwstat_addr);
  2031. cs->fwstat_buf = NULL;
  2032. }
  2033. if (cs->first_stat_mbox) {
  2034. dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
  2035. cs->first_stat_mbox, cs->stat_mbox_addr);
  2036. cs->first_stat_mbox = NULL;
  2037. }
  2038. if (cs->first_cmd_mbox) {
  2039. dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
  2040. cs->first_cmd_mbox, cs->cmd_mbox_addr);
  2041. cs->first_cmd_mbox = NULL;
  2042. }
  2043. }
  2044. static void myrs_cleanup(struct myrs_hba *cs)
  2045. {
  2046. struct pci_dev *pdev = cs->pdev;
  2047. /* Free the memory mailbox, status, and related structures */
  2048. myrs_unmap(cs);
  2049. if (cs->mmio_base) {
  2050. if (cs->disable_intr)
  2051. cs->disable_intr(cs);
  2052. iounmap(cs->mmio_base);
  2053. cs->mmio_base = NULL;
  2054. }
  2055. if (cs->irq)
  2056. free_irq(cs->irq, cs);
  2057. if (cs->io_addr)
  2058. release_region(cs->io_addr, 0x80);
  2059. pci_set_drvdata(pdev, NULL);
  2060. pci_disable_device(pdev);
  2061. scsi_host_put(cs->host);
  2062. }
  2063. static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
  2064. const struct pci_device_id *entry)
  2065. {
  2066. struct myrs_privdata *privdata =
  2067. (struct myrs_privdata *)entry->driver_data;
  2068. irq_handler_t irq_handler = privdata->irq_handler;
  2069. unsigned int mmio_size = privdata->mmio_size;
  2070. struct myrs_hba *cs = NULL;
  2071. cs = myrs_alloc_host(pdev, entry);
  2072. if (!cs) {
  2073. dev_err(&pdev->dev, "Unable to allocate Controller\n");
  2074. return NULL;
  2075. }
  2076. cs->pdev = pdev;
  2077. if (pci_enable_device(pdev))
  2078. goto Failure;
  2079. cs->pci_addr = pci_resource_start(pdev, 0);
  2080. pci_set_drvdata(pdev, cs);
  2081. spin_lock_init(&cs->queue_lock);
  2082. /* Map the Controller Register Window. */
  2083. if (mmio_size < PAGE_SIZE)
  2084. mmio_size = PAGE_SIZE;
  2085. cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
  2086. if (cs->mmio_base == NULL) {
  2087. dev_err(&pdev->dev,
  2088. "Unable to map Controller Register Window\n");
  2089. goto Failure;
  2090. }
  2091. cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
  2092. if (privdata->hw_init(pdev, cs, cs->io_base))
  2093. goto Failure;
  2094. /* Acquire shared access to the IRQ Channel. */
  2095. if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
  2096. dev_err(&pdev->dev,
  2097. "Unable to acquire IRQ Channel %d\n", pdev->irq);
  2098. goto Failure;
  2099. }
  2100. cs->irq = pdev->irq;
  2101. return cs;
  2102. Failure:
  2103. dev_err(&pdev->dev,
  2104. "Failed to initialize Controller\n");
  2105. myrs_cleanup(cs);
  2106. return NULL;
  2107. }
  2108. /*
  2109. * myrs_err_status reports Controller BIOS Messages passed through
  2110. * the Error Status Register when the driver performs the BIOS handshaking.
  2111. * It returns true for fatal errors and false otherwise.
  2112. */
  2113. static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
  2114. unsigned char parm0, unsigned char parm1)
  2115. {
  2116. struct pci_dev *pdev = cs->pdev;
  2117. switch (status) {
  2118. case 0x00:
  2119. dev_info(&pdev->dev,
  2120. "Physical Device %d:%d Not Responding\n",
  2121. parm1, parm0);
  2122. break;
  2123. case 0x08:
  2124. dev_notice(&pdev->dev, "Spinning Up Drives\n");
  2125. break;
  2126. case 0x30:
  2127. dev_notice(&pdev->dev, "Configuration Checksum Error\n");
  2128. break;
  2129. case 0x60:
  2130. dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
  2131. break;
  2132. case 0x70:
  2133. dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
  2134. break;
  2135. case 0x90:
  2136. dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
  2137. parm1, parm0);
  2138. break;
  2139. case 0xA0:
  2140. dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
  2141. break;
  2142. case 0xB0:
  2143. dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
  2144. break;
  2145. case 0xD0:
  2146. dev_notice(&pdev->dev, "New Controller Configuration Found\n");
  2147. break;
  2148. case 0xF0:
  2149. dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
  2150. return true;
  2151. default:
  2152. dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
  2153. status);
  2154. return true;
  2155. }
  2156. return false;
  2157. }
  2158. /*
  2159. * Hardware-specific functions
  2160. */
  2161. /*
  2162. * DAC960 GEM Series Controllers.
  2163. */
  2164. static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
  2165. {
  2166. __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
  2167. writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
  2168. }
  2169. static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
  2170. {
  2171. __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
  2172. writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
  2173. }
  2174. static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
  2175. {
  2176. __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
  2177. writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
  2178. }
  2179. static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
  2180. {
  2181. __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
  2182. writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
  2183. }
  2184. static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
  2185. {
  2186. __le32 val;
  2187. val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
  2188. return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
  2189. }
  2190. static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
  2191. {
  2192. __le32 val;
  2193. val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
  2194. return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
  2195. }
  2196. static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
  2197. {
  2198. __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
  2199. writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
  2200. }
  2201. static inline void DAC960_GEM_ack_intr(void __iomem *base)
  2202. {
  2203. __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
  2204. DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
  2205. writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
  2206. }
  2207. static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
  2208. {
  2209. __le32 val;
  2210. val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
  2211. return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
  2212. }
  2213. static inline void DAC960_GEM_enable_intr(void __iomem *base)
  2214. {
  2215. __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
  2216. DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
  2217. writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
  2218. }
  2219. static inline void DAC960_GEM_disable_intr(void __iomem *base)
  2220. {
  2221. __le32 val = 0;
  2222. writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
  2223. }
  2224. static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
  2225. union myrs_cmd_mbox *mbox)
  2226. {
  2227. memcpy(&mem_mbox->words[1], &mbox->words[1],
  2228. sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
  2229. /* Barrier to avoid reordering */
  2230. wmb();
  2231. mem_mbox->words[0] = mbox->words[0];
  2232. /* Barrier to force PCI access */
  2233. mb();
  2234. }
  2235. static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
  2236. dma_addr_t cmd_mbox_addr)
  2237. {
  2238. dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
  2239. }
  2240. static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
  2241. {
  2242. return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
  2243. }
  2244. static inline bool
  2245. DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
  2246. unsigned char *param0, unsigned char *param1)
  2247. {
  2248. __le32 val;
  2249. val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
  2250. if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
  2251. return false;
  2252. *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
  2253. *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
  2254. *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
  2255. writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
  2256. return true;
  2257. }
  2258. static inline unsigned char
  2259. DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
  2260. {
  2261. unsigned char status;
  2262. while (DAC960_GEM_hw_mbox_is_full(base))
  2263. udelay(1);
  2264. DAC960_GEM_write_hw_mbox(base, mbox_addr);
  2265. DAC960_GEM_hw_mbox_new_cmd(base);
  2266. while (!DAC960_GEM_hw_mbox_status_available(base))
  2267. udelay(1);
  2268. status = DAC960_GEM_read_cmd_status(base);
  2269. DAC960_GEM_ack_hw_mbox_intr(base);
  2270. DAC960_GEM_ack_hw_mbox_status(base);
  2271. return status;
  2272. }
  2273. static int DAC960_GEM_hw_init(struct pci_dev *pdev,
  2274. struct myrs_hba *cs, void __iomem *base)
  2275. {
  2276. int timeout = 0;
  2277. unsigned char status, parm0, parm1;
  2278. DAC960_GEM_disable_intr(base);
  2279. DAC960_GEM_ack_hw_mbox_status(base);
  2280. udelay(1000);
  2281. while (DAC960_GEM_init_in_progress(base) &&
  2282. timeout < MYRS_MAILBOX_TIMEOUT) {
  2283. if (DAC960_GEM_read_error_status(base, &status,
  2284. &parm0, &parm1) &&
  2285. myrs_err_status(cs, status, parm0, parm1))
  2286. return -EIO;
  2287. udelay(10);
  2288. timeout++;
  2289. }
  2290. if (timeout == MYRS_MAILBOX_TIMEOUT) {
  2291. dev_err(&pdev->dev,
  2292. "Timeout waiting for Controller Initialisation\n");
  2293. return -ETIMEDOUT;
  2294. }
  2295. if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
  2296. dev_err(&pdev->dev,
  2297. "Unable to Enable Memory Mailbox Interface\n");
  2298. DAC960_GEM_reset_ctrl(base);
  2299. return -EAGAIN;
  2300. }
  2301. DAC960_GEM_enable_intr(base);
  2302. cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
  2303. cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
  2304. cs->disable_intr = DAC960_GEM_disable_intr;
  2305. cs->reset = DAC960_GEM_reset_ctrl;
  2306. return 0;
  2307. }
  2308. static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
  2309. {
  2310. struct myrs_hba *cs = arg;
  2311. void __iomem *base = cs->io_base;
  2312. struct myrs_stat_mbox *next_stat_mbox;
  2313. unsigned long flags;
  2314. spin_lock_irqsave(&cs->queue_lock, flags);
  2315. DAC960_GEM_ack_intr(base);
  2316. next_stat_mbox = cs->next_stat_mbox;
  2317. while (next_stat_mbox->id > 0) {
  2318. unsigned short id = next_stat_mbox->id;
  2319. struct scsi_cmnd *scmd = NULL;
  2320. struct myrs_cmdblk *cmd_blk = NULL;
  2321. if (id == MYRS_DCMD_TAG)
  2322. cmd_blk = &cs->dcmd_blk;
  2323. else if (id == MYRS_MCMD_TAG)
  2324. cmd_blk = &cs->mcmd_blk;
  2325. else {
  2326. scmd = scsi_host_find_tag(cs->host, id - 3);
  2327. if (scmd)
  2328. cmd_blk = scsi_cmd_priv(scmd);
  2329. }
  2330. if (cmd_blk) {
  2331. cmd_blk->status = next_stat_mbox->status;
  2332. cmd_blk->sense_len = next_stat_mbox->sense_len;
  2333. cmd_blk->residual = next_stat_mbox->residual;
  2334. } else
  2335. dev_err(&cs->pdev->dev,
  2336. "Unhandled command completion %d\n", id);
  2337. memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
  2338. if (++next_stat_mbox > cs->last_stat_mbox)
  2339. next_stat_mbox = cs->first_stat_mbox;
  2340. if (cmd_blk) {
  2341. if (id < 3)
  2342. myrs_handle_cmdblk(cs, cmd_blk);
  2343. else
  2344. myrs_handle_scsi(cs, cmd_blk, scmd);
  2345. }
  2346. }
  2347. cs->next_stat_mbox = next_stat_mbox;
  2348. spin_unlock_irqrestore(&cs->queue_lock, flags);
  2349. return IRQ_HANDLED;
  2350. }
  2351. static struct myrs_privdata DAC960_GEM_privdata = {
  2352. .hw_init = DAC960_GEM_hw_init,
  2353. .irq_handler = DAC960_GEM_intr_handler,
  2354. .mmio_size = DAC960_GEM_mmio_size,
  2355. };
  2356. /*
  2357. * DAC960 BA Series Controllers.
  2358. */
  2359. static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
  2360. {
  2361. writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
  2362. }
  2363. static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
  2364. {
  2365. writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
  2366. }
  2367. static inline void DAC960_BA_reset_ctrl(void __iomem *base)
  2368. {
  2369. writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
  2370. }
  2371. static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
  2372. {
  2373. writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
  2374. }
  2375. static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
  2376. {
  2377. u8 val;
  2378. val = readb(base + DAC960_BA_IDB_OFFSET);
  2379. return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
  2380. }
  2381. static inline bool DAC960_BA_init_in_progress(void __iomem *base)
  2382. {
  2383. u8 val;
  2384. val = readb(base + DAC960_BA_IDB_OFFSET);
  2385. return !(val & DAC960_BA_IDB_INIT_DONE);
  2386. }
  2387. static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
  2388. {
  2389. writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
  2390. }
  2391. static inline void DAC960_BA_ack_intr(void __iomem *base)
  2392. {
  2393. writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
  2394. base + DAC960_BA_ODB_OFFSET);
  2395. }
  2396. static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
  2397. {
  2398. u8 val;
  2399. val = readb(base + DAC960_BA_ODB_OFFSET);
  2400. return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
  2401. }
  2402. static inline void DAC960_BA_enable_intr(void __iomem *base)
  2403. {
  2404. writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
  2405. }
  2406. static inline void DAC960_BA_disable_intr(void __iomem *base)
  2407. {
  2408. writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
  2409. }
  2410. static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
  2411. union myrs_cmd_mbox *mbox)
  2412. {
  2413. memcpy(&mem_mbox->words[1], &mbox->words[1],
  2414. sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
  2415. /* Barrier to avoid reordering */
  2416. wmb();
  2417. mem_mbox->words[0] = mbox->words[0];
  2418. /* Barrier to force PCI access */
  2419. mb();
  2420. }
  2421. static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
  2422. dma_addr_t cmd_mbox_addr)
  2423. {
  2424. dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
  2425. }
  2426. static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
  2427. {
  2428. return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
  2429. }
  2430. static inline bool
  2431. DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
  2432. unsigned char *param0, unsigned char *param1)
  2433. {
  2434. u8 val;
  2435. val = readb(base + DAC960_BA_ERRSTS_OFFSET);
  2436. if (!(val & DAC960_BA_ERRSTS_PENDING))
  2437. return false;
  2438. val &= ~DAC960_BA_ERRSTS_PENDING;
  2439. *error = val;
  2440. *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
  2441. *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
  2442. writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
  2443. return true;
  2444. }
  2445. static inline unsigned char
  2446. DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
  2447. {
  2448. unsigned char status;
  2449. while (DAC960_BA_hw_mbox_is_full(base))
  2450. udelay(1);
  2451. DAC960_BA_write_hw_mbox(base, mbox_addr);
  2452. DAC960_BA_hw_mbox_new_cmd(base);
  2453. while (!DAC960_BA_hw_mbox_status_available(base))
  2454. udelay(1);
  2455. status = DAC960_BA_read_cmd_status(base);
  2456. DAC960_BA_ack_hw_mbox_intr(base);
  2457. DAC960_BA_ack_hw_mbox_status(base);
  2458. return status;
  2459. }
  2460. static int DAC960_BA_hw_init(struct pci_dev *pdev,
  2461. struct myrs_hba *cs, void __iomem *base)
  2462. {
  2463. int timeout = 0;
  2464. unsigned char status, parm0, parm1;
  2465. DAC960_BA_disable_intr(base);
  2466. DAC960_BA_ack_hw_mbox_status(base);
  2467. udelay(1000);
  2468. while (DAC960_BA_init_in_progress(base) &&
  2469. timeout < MYRS_MAILBOX_TIMEOUT) {
  2470. if (DAC960_BA_read_error_status(base, &status,
  2471. &parm0, &parm1) &&
  2472. myrs_err_status(cs, status, parm0, parm1))
  2473. return -EIO;
  2474. udelay(10);
  2475. timeout++;
  2476. }
  2477. if (timeout == MYRS_MAILBOX_TIMEOUT) {
  2478. dev_err(&pdev->dev,
  2479. "Timeout waiting for Controller Initialisation\n");
  2480. return -ETIMEDOUT;
  2481. }
  2482. if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
  2483. dev_err(&pdev->dev,
  2484. "Unable to Enable Memory Mailbox Interface\n");
  2485. DAC960_BA_reset_ctrl(base);
  2486. return -EAGAIN;
  2487. }
  2488. DAC960_BA_enable_intr(base);
  2489. cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
  2490. cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
  2491. cs->disable_intr = DAC960_BA_disable_intr;
  2492. cs->reset = DAC960_BA_reset_ctrl;
  2493. return 0;
  2494. }
  2495. static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
  2496. {
  2497. struct myrs_hba *cs = arg;
  2498. void __iomem *base = cs->io_base;
  2499. struct myrs_stat_mbox *next_stat_mbox;
  2500. unsigned long flags;
  2501. spin_lock_irqsave(&cs->queue_lock, flags);
  2502. DAC960_BA_ack_intr(base);
  2503. next_stat_mbox = cs->next_stat_mbox;
  2504. while (next_stat_mbox->id > 0) {
  2505. unsigned short id = next_stat_mbox->id;
  2506. struct scsi_cmnd *scmd = NULL;
  2507. struct myrs_cmdblk *cmd_blk = NULL;
  2508. if (id == MYRS_DCMD_TAG)
  2509. cmd_blk = &cs->dcmd_blk;
  2510. else if (id == MYRS_MCMD_TAG)
  2511. cmd_blk = &cs->mcmd_blk;
  2512. else {
  2513. scmd = scsi_host_find_tag(cs->host, id - 3);
  2514. if (scmd)
  2515. cmd_blk = scsi_cmd_priv(scmd);
  2516. }
  2517. if (cmd_blk) {
  2518. cmd_blk->status = next_stat_mbox->status;
  2519. cmd_blk->sense_len = next_stat_mbox->sense_len;
  2520. cmd_blk->residual = next_stat_mbox->residual;
  2521. } else
  2522. dev_err(&cs->pdev->dev,
  2523. "Unhandled command completion %d\n", id);
  2524. memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
  2525. if (++next_stat_mbox > cs->last_stat_mbox)
  2526. next_stat_mbox = cs->first_stat_mbox;
  2527. if (cmd_blk) {
  2528. if (id < 3)
  2529. myrs_handle_cmdblk(cs, cmd_blk);
  2530. else
  2531. myrs_handle_scsi(cs, cmd_blk, scmd);
  2532. }
  2533. }
  2534. cs->next_stat_mbox = next_stat_mbox;
  2535. spin_unlock_irqrestore(&cs->queue_lock, flags);
  2536. return IRQ_HANDLED;
  2537. }
  2538. static struct myrs_privdata DAC960_BA_privdata = {
  2539. .hw_init = DAC960_BA_hw_init,
  2540. .irq_handler = DAC960_BA_intr_handler,
  2541. .mmio_size = DAC960_BA_mmio_size,
  2542. };
  2543. /*
  2544. * DAC960 LP Series Controllers.
  2545. */
  2546. static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
  2547. {
  2548. writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
  2549. }
  2550. static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
  2551. {
  2552. writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
  2553. }
  2554. static inline void DAC960_LP_reset_ctrl(void __iomem *base)
  2555. {
  2556. writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
  2557. }
  2558. static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
  2559. {
  2560. writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
  2561. }
  2562. static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
  2563. {
  2564. u8 val;
  2565. val = readb(base + DAC960_LP_IDB_OFFSET);
  2566. return val & DAC960_LP_IDB_HWMBOX_FULL;
  2567. }
  2568. static inline bool DAC960_LP_init_in_progress(void __iomem *base)
  2569. {
  2570. u8 val;
  2571. val = readb(base + DAC960_LP_IDB_OFFSET);
  2572. return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
  2573. }
  2574. static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
  2575. {
  2576. writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
  2577. }
  2578. static inline void DAC960_LP_ack_intr(void __iomem *base)
  2579. {
  2580. writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
  2581. base + DAC960_LP_ODB_OFFSET);
  2582. }
  2583. static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
  2584. {
  2585. u8 val;
  2586. val = readb(base + DAC960_LP_ODB_OFFSET);
  2587. return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
  2588. }
  2589. static inline void DAC960_LP_enable_intr(void __iomem *base)
  2590. {
  2591. writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
  2592. }
  2593. static inline void DAC960_LP_disable_intr(void __iomem *base)
  2594. {
  2595. writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
  2596. }
  2597. static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
  2598. union myrs_cmd_mbox *mbox)
  2599. {
  2600. memcpy(&mem_mbox->words[1], &mbox->words[1],
  2601. sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
  2602. /* Barrier to avoid reordering */
  2603. wmb();
  2604. mem_mbox->words[0] = mbox->words[0];
  2605. /* Barrier to force PCI access */
  2606. mb();
  2607. }
  2608. static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
  2609. dma_addr_t cmd_mbox_addr)
  2610. {
  2611. dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
  2612. }
  2613. static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
  2614. {
  2615. return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
  2616. }
  2617. static inline bool
  2618. DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
  2619. unsigned char *param0, unsigned char *param1)
  2620. {
  2621. u8 val;
  2622. val = readb(base + DAC960_LP_ERRSTS_OFFSET);
  2623. if (!(val & DAC960_LP_ERRSTS_PENDING))
  2624. return false;
  2625. val &= ~DAC960_LP_ERRSTS_PENDING;
  2626. *error = val;
  2627. *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
  2628. *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
  2629. writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
  2630. return true;
  2631. }
  2632. static inline unsigned char
  2633. DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
  2634. {
  2635. unsigned char status;
  2636. while (DAC960_LP_hw_mbox_is_full(base))
  2637. udelay(1);
  2638. DAC960_LP_write_hw_mbox(base, mbox_addr);
  2639. DAC960_LP_hw_mbox_new_cmd(base);
  2640. while (!DAC960_LP_hw_mbox_status_available(base))
  2641. udelay(1);
  2642. status = DAC960_LP_read_cmd_status(base);
  2643. DAC960_LP_ack_hw_mbox_intr(base);
  2644. DAC960_LP_ack_hw_mbox_status(base);
  2645. return status;
  2646. }
  2647. static int DAC960_LP_hw_init(struct pci_dev *pdev,
  2648. struct myrs_hba *cs, void __iomem *base)
  2649. {
  2650. int timeout = 0;
  2651. unsigned char status, parm0, parm1;
  2652. DAC960_LP_disable_intr(base);
  2653. DAC960_LP_ack_hw_mbox_status(base);
  2654. udelay(1000);
  2655. while (DAC960_LP_init_in_progress(base) &&
  2656. timeout < MYRS_MAILBOX_TIMEOUT) {
  2657. if (DAC960_LP_read_error_status(base, &status,
  2658. &parm0, &parm1) &&
  2659. myrs_err_status(cs, status, parm0, parm1))
  2660. return -EIO;
  2661. udelay(10);
  2662. timeout++;
  2663. }
  2664. if (timeout == MYRS_MAILBOX_TIMEOUT) {
  2665. dev_err(&pdev->dev,
  2666. "Timeout waiting for Controller Initialisation\n");
  2667. return -ETIMEDOUT;
  2668. }
  2669. if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
  2670. dev_err(&pdev->dev,
  2671. "Unable to Enable Memory Mailbox Interface\n");
  2672. DAC960_LP_reset_ctrl(base);
  2673. return -ENODEV;
  2674. }
  2675. DAC960_LP_enable_intr(base);
  2676. cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
  2677. cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
  2678. cs->disable_intr = DAC960_LP_disable_intr;
  2679. cs->reset = DAC960_LP_reset_ctrl;
  2680. return 0;
  2681. }
  2682. static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
  2683. {
  2684. struct myrs_hba *cs = arg;
  2685. void __iomem *base = cs->io_base;
  2686. struct myrs_stat_mbox *next_stat_mbox;
  2687. unsigned long flags;
  2688. spin_lock_irqsave(&cs->queue_lock, flags);
  2689. DAC960_LP_ack_intr(base);
  2690. next_stat_mbox = cs->next_stat_mbox;
  2691. while (next_stat_mbox->id > 0) {
  2692. unsigned short id = next_stat_mbox->id;
  2693. struct scsi_cmnd *scmd = NULL;
  2694. struct myrs_cmdblk *cmd_blk = NULL;
  2695. if (id == MYRS_DCMD_TAG)
  2696. cmd_blk = &cs->dcmd_blk;
  2697. else if (id == MYRS_MCMD_TAG)
  2698. cmd_blk = &cs->mcmd_blk;
  2699. else {
  2700. scmd = scsi_host_find_tag(cs->host, id - 3);
  2701. if (scmd)
  2702. cmd_blk = scsi_cmd_priv(scmd);
  2703. }
  2704. if (cmd_blk) {
  2705. cmd_blk->status = next_stat_mbox->status;
  2706. cmd_blk->sense_len = next_stat_mbox->sense_len;
  2707. cmd_blk->residual = next_stat_mbox->residual;
  2708. } else
  2709. dev_err(&cs->pdev->dev,
  2710. "Unhandled command completion %d\n", id);
  2711. memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
  2712. if (++next_stat_mbox > cs->last_stat_mbox)
  2713. next_stat_mbox = cs->first_stat_mbox;
  2714. if (cmd_blk) {
  2715. if (id < 3)
  2716. myrs_handle_cmdblk(cs, cmd_blk);
  2717. else
  2718. myrs_handle_scsi(cs, cmd_blk, scmd);
  2719. }
  2720. }
  2721. cs->next_stat_mbox = next_stat_mbox;
  2722. spin_unlock_irqrestore(&cs->queue_lock, flags);
  2723. return IRQ_HANDLED;
  2724. }
  2725. static struct myrs_privdata DAC960_LP_privdata = {
  2726. .hw_init = DAC960_LP_hw_init,
  2727. .irq_handler = DAC960_LP_intr_handler,
  2728. .mmio_size = DAC960_LP_mmio_size,
  2729. };
  2730. /*
  2731. * Module functions
  2732. */
  2733. static int
  2734. myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
  2735. {
  2736. struct myrs_hba *cs;
  2737. int ret;
  2738. cs = myrs_detect(dev, entry);
  2739. if (!cs)
  2740. return -ENODEV;
  2741. ret = myrs_get_config(cs);
  2742. if (ret < 0) {
  2743. myrs_cleanup(cs);
  2744. return ret;
  2745. }
  2746. if (!myrs_create_mempools(dev, cs)) {
  2747. ret = -ENOMEM;
  2748. goto failed;
  2749. }
  2750. ret = scsi_add_host(cs->host, &dev->dev);
  2751. if (ret) {
  2752. dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
  2753. myrs_destroy_mempools(cs);
  2754. goto failed;
  2755. }
  2756. scsi_scan_host(cs->host);
  2757. return 0;
  2758. failed:
  2759. myrs_cleanup(cs);
  2760. return ret;
  2761. }
  2762. static void myrs_remove(struct pci_dev *pdev)
  2763. {
  2764. struct myrs_hba *cs = pci_get_drvdata(pdev);
  2765. if (cs == NULL)
  2766. return;
  2767. shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
  2768. myrs_flush_cache(cs);
  2769. myrs_destroy_mempools(cs);
  2770. myrs_cleanup(cs);
  2771. }
  2772. static const struct pci_device_id myrs_id_table[] = {
  2773. {
  2774. PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
  2775. PCI_DEVICE_ID_MYLEX_DAC960_GEM,
  2776. PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
  2777. .driver_data = (unsigned long) &DAC960_GEM_privdata,
  2778. },
  2779. {
  2780. PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
  2781. },
  2782. {
  2783. PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
  2784. },
  2785. {0, },
  2786. };
  2787. MODULE_DEVICE_TABLE(pci, myrs_id_table);
  2788. static struct pci_driver myrs_pci_driver = {
  2789. .name = "myrs",
  2790. .id_table = myrs_id_table,
  2791. .probe = myrs_probe,
  2792. .remove = myrs_remove,
  2793. };
  2794. static int __init myrs_init_module(void)
  2795. {
  2796. int ret;
  2797. myrs_raid_template = raid_class_attach(&myrs_raid_functions);
  2798. if (!myrs_raid_template)
  2799. return -ENODEV;
  2800. ret = pci_register_driver(&myrs_pci_driver);
  2801. if (ret)
  2802. raid_class_release(myrs_raid_template);
  2803. return ret;
  2804. }
  2805. static void __exit myrs_cleanup_module(void)
  2806. {
  2807. pci_unregister_driver(&myrs_pci_driver);
  2808. raid_class_release(myrs_raid_template);
  2809. }
  2810. module_init(myrs_init_module);
  2811. module_exit(myrs_cleanup_module);
  2812. MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
  2813. MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
  2814. MODULE_LICENSE("GPL");