esas2r_init.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_init.c
  3. * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * NO WARRANTY
  19. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  20. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  21. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  22. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  23. * solely responsible for determining the appropriateness of using and
  24. * distributing the Program and assumes all risks associated with its
  25. * exercise of rights under this Agreement, including but not limited to
  26. * the risks and costs of program errors, damage to or loss of data,
  27. * programs or equipment, and unavailability or interruption of operations.
  28. *
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. *
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include "esas2r.h"
  44. static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
  45. struct esas2r_mem_desc *mem_desc,
  46. u32 align)
  47. {
  48. mem_desc->esas2r_param = mem_desc->size + align;
  49. mem_desc->virt_addr = NULL;
  50. mem_desc->phys_addr = 0;
  51. mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
  52. (size_t)mem_desc->
  53. esas2r_param,
  54. (dma_addr_t *)&mem_desc->
  55. phys_addr,
  56. GFP_KERNEL);
  57. if (mem_desc->esas2r_data == NULL) {
  58. esas2r_log(ESAS2R_LOG_CRIT,
  59. "failed to allocate %lu bytes of consistent memory!",
  60. (long
  61. unsigned
  62. int)mem_desc->esas2r_param);
  63. return false;
  64. }
  65. mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
  66. mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
  67. memset(mem_desc->virt_addr, 0, mem_desc->size);
  68. return true;
  69. }
  70. static void esas2r_initmem_free(struct esas2r_adapter *a,
  71. struct esas2r_mem_desc *mem_desc)
  72. {
  73. if (mem_desc->virt_addr == NULL)
  74. return;
  75. /*
  76. * Careful! phys_addr and virt_addr may have been adjusted from the
  77. * original allocation in order to return the desired alignment. That
  78. * means we have to use the original address (in esas2r_data) and size
  79. * (esas2r_param) and calculate the original physical address based on
  80. * the difference between the requested and actual allocation size.
  81. */
  82. if (mem_desc->phys_addr) {
  83. int unalign = ((u8 *)mem_desc->virt_addr) -
  84. ((u8 *)mem_desc->esas2r_data);
  85. dma_free_coherent(&a->pcid->dev,
  86. (size_t)mem_desc->esas2r_param,
  87. mem_desc->esas2r_data,
  88. (dma_addr_t)(mem_desc->phys_addr - unalign));
  89. } else {
  90. kfree(mem_desc->esas2r_data);
  91. }
  92. mem_desc->virt_addr = NULL;
  93. }
  94. static bool alloc_vda_req(struct esas2r_adapter *a,
  95. struct esas2r_request *rq)
  96. {
  97. struct esas2r_mem_desc *memdesc = kzalloc(
  98. sizeof(struct esas2r_mem_desc), GFP_KERNEL);
  99. if (memdesc == NULL) {
  100. esas2r_hdebug("could not alloc mem for vda request memdesc\n");
  101. return false;
  102. }
  103. memdesc->size = sizeof(union atto_vda_req) +
  104. ESAS2R_DATA_BUF_LEN;
  105. if (!esas2r_initmem_alloc(a, memdesc, 256)) {
  106. esas2r_hdebug("could not alloc mem for vda request\n");
  107. kfree(memdesc);
  108. return false;
  109. }
  110. a->num_vrqs++;
  111. list_add(&memdesc->next_desc, &a->vrq_mds_head);
  112. rq->vrq_md = memdesc;
  113. rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
  114. rq->vrq->scsi.handle = a->num_vrqs;
  115. return true;
  116. }
  117. static void esas2r_unmap_regions(struct esas2r_adapter *a)
  118. {
  119. if (a->regs)
  120. iounmap((void __iomem *)a->regs);
  121. a->regs = NULL;
  122. pci_release_region(a->pcid, 2);
  123. if (a->data_window)
  124. iounmap((void __iomem *)a->data_window);
  125. a->data_window = NULL;
  126. pci_release_region(a->pcid, 0);
  127. }
  128. static int esas2r_map_regions(struct esas2r_adapter *a)
  129. {
  130. int error;
  131. a->regs = NULL;
  132. a->data_window = NULL;
  133. error = pci_request_region(a->pcid, 2, a->name);
  134. if (error != 0) {
  135. esas2r_log(ESAS2R_LOG_CRIT,
  136. "pci_request_region(2) failed, error %d",
  137. error);
  138. return error;
  139. }
  140. a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
  141. pci_resource_len(a->pcid, 2));
  142. if (a->regs == NULL) {
  143. esas2r_log(ESAS2R_LOG_CRIT,
  144. "ioremap failed for regs mem region\n");
  145. pci_release_region(a->pcid, 2);
  146. return -EFAULT;
  147. }
  148. error = pci_request_region(a->pcid, 0, a->name);
  149. if (error != 0) {
  150. esas2r_log(ESAS2R_LOG_CRIT,
  151. "pci_request_region(2) failed, error %d",
  152. error);
  153. esas2r_unmap_regions(a);
  154. return error;
  155. }
  156. a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
  157. 0),
  158. pci_resource_len(a->pcid, 0));
  159. if (a->data_window == NULL) {
  160. esas2r_log(ESAS2R_LOG_CRIT,
  161. "ioremap failed for data_window mem region\n");
  162. esas2r_unmap_regions(a);
  163. return -EFAULT;
  164. }
  165. return 0;
  166. }
  167. static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
  168. {
  169. int i;
  170. /* Set up interrupt mode based on the requested value */
  171. switch (intr_mode) {
  172. case INTR_MODE_LEGACY:
  173. use_legacy_interrupts:
  174. a->intr_mode = INTR_MODE_LEGACY;
  175. break;
  176. case INTR_MODE_MSI:
  177. i = pci_enable_msi(a->pcid);
  178. if (i != 0) {
  179. esas2r_log(ESAS2R_LOG_WARN,
  180. "failed to enable MSI for adapter %d, "
  181. "falling back to legacy interrupts "
  182. "(err=%d)", a->index,
  183. i);
  184. goto use_legacy_interrupts;
  185. }
  186. a->intr_mode = INTR_MODE_MSI;
  187. set_bit(AF2_MSI_ENABLED, &a->flags2);
  188. break;
  189. default:
  190. esas2r_log(ESAS2R_LOG_WARN,
  191. "unknown interrupt_mode %d requested, "
  192. "falling back to legacy interrupt",
  193. interrupt_mode);
  194. goto use_legacy_interrupts;
  195. }
  196. }
  197. static void esas2r_claim_interrupts(struct esas2r_adapter *a)
  198. {
  199. unsigned long flags = 0;
  200. if (a->intr_mode == INTR_MODE_LEGACY)
  201. flags |= IRQF_SHARED;
  202. esas2r_log(ESAS2R_LOG_INFO,
  203. "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
  204. a->pcid->irq, a, a->name, flags);
  205. if (request_irq(a->pcid->irq,
  206. (a->intr_mode ==
  207. INTR_MODE_LEGACY) ? esas2r_interrupt :
  208. esas2r_msi_interrupt,
  209. flags,
  210. a->name,
  211. a)) {
  212. esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
  213. a->pcid->irq);
  214. return;
  215. }
  216. set_bit(AF2_IRQ_CLAIMED, &a->flags2);
  217. esas2r_log(ESAS2R_LOG_INFO,
  218. "claimed IRQ %d flags: 0x%lx",
  219. a->pcid->irq, flags);
  220. }
  221. int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
  222. int index)
  223. {
  224. struct esas2r_adapter *a;
  225. u64 bus_addr = 0;
  226. int i;
  227. void *next_uncached;
  228. struct esas2r_request *first_request, *last_request;
  229. bool dma64 = false;
  230. if (index >= MAX_ADAPTERS) {
  231. esas2r_log(ESAS2R_LOG_CRIT,
  232. "tried to init invalid adapter index %u!",
  233. index);
  234. return 0;
  235. }
  236. if (esas2r_adapters[index]) {
  237. esas2r_log(ESAS2R_LOG_CRIT,
  238. "tried to init existing adapter index %u!",
  239. index);
  240. return 0;
  241. }
  242. a = (struct esas2r_adapter *)host->hostdata;
  243. memset(a, 0, sizeof(struct esas2r_adapter));
  244. a->pcid = pcid;
  245. a->host = host;
  246. if (sizeof(dma_addr_t) > 4 &&
  247. dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
  248. !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
  249. dma64 = true;
  250. if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
  251. esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
  252. esas2r_kill_adapter(index);
  253. return 0;
  254. }
  255. esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
  256. "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
  257. esas2r_adapters[index] = a;
  258. sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
  259. esas2r_debug("new adapter %p, name %s", a, a->name);
  260. spin_lock_init(&a->request_lock);
  261. spin_lock_init(&a->fw_event_lock);
  262. mutex_init(&a->fm_api_mutex);
  263. mutex_init(&a->fs_api_mutex);
  264. sema_init(&a->nvram_semaphore, 1);
  265. esas2r_fw_event_off(a);
  266. a->fw_event_q =
  267. alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index);
  268. init_waitqueue_head(&a->buffered_ioctl_waiter);
  269. init_waitqueue_head(&a->nvram_waiter);
  270. init_waitqueue_head(&a->fm_api_waiter);
  271. init_waitqueue_head(&a->fs_api_waiter);
  272. init_waitqueue_head(&a->vda_waiter);
  273. INIT_LIST_HEAD(&a->general_req.req_list);
  274. INIT_LIST_HEAD(&a->active_list);
  275. INIT_LIST_HEAD(&a->defer_list);
  276. INIT_LIST_HEAD(&a->free_sg_list_head);
  277. INIT_LIST_HEAD(&a->avail_request);
  278. INIT_LIST_HEAD(&a->vrq_mds_head);
  279. INIT_LIST_HEAD(&a->fw_event_list);
  280. first_request = (struct esas2r_request *)((u8 *)(a + 1));
  281. for (last_request = first_request, i = 1; i < num_requests;
  282. last_request++, i++) {
  283. INIT_LIST_HEAD(&last_request->req_list);
  284. list_add_tail(&last_request->comp_list, &a->avail_request);
  285. if (!alloc_vda_req(a, last_request)) {
  286. esas2r_log(ESAS2R_LOG_CRIT,
  287. "failed to allocate a VDA request!");
  288. esas2r_kill_adapter(index);
  289. return 0;
  290. }
  291. }
  292. esas2r_debug("requests: %p to %p (%d, %d)", first_request,
  293. last_request,
  294. sizeof(*first_request),
  295. num_requests);
  296. if (esas2r_map_regions(a) != 0) {
  297. esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
  298. esas2r_kill_adapter(index);
  299. return 0;
  300. }
  301. a->index = index;
  302. /* interrupts will be disabled until we are done with init */
  303. atomic_inc(&a->dis_ints_cnt);
  304. atomic_inc(&a->disable_cnt);
  305. set_bit(AF_CHPRST_PENDING, &a->flags);
  306. set_bit(AF_DISC_PENDING, &a->flags);
  307. set_bit(AF_FIRST_INIT, &a->flags);
  308. set_bit(AF_LEGACY_SGE_MODE, &a->flags);
  309. a->init_msg = ESAS2R_INIT_MSG_START;
  310. a->max_vdareq_size = 128;
  311. a->build_sgl = esas2r_build_sg_list_sge;
  312. esas2r_setup_interrupts(a, interrupt_mode);
  313. a->uncached_size = esas2r_get_uncached_size(a);
  314. a->uncached = dma_alloc_coherent(&pcid->dev,
  315. (size_t)a->uncached_size,
  316. (dma_addr_t *)&bus_addr,
  317. GFP_KERNEL);
  318. if (a->uncached == NULL) {
  319. esas2r_log(ESAS2R_LOG_CRIT,
  320. "failed to allocate %d bytes of consistent memory!",
  321. a->uncached_size);
  322. esas2r_kill_adapter(index);
  323. return 0;
  324. }
  325. a->uncached_phys = bus_addr;
  326. esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
  327. a->uncached_size,
  328. a->uncached,
  329. upper_32_bits(bus_addr),
  330. lower_32_bits(bus_addr));
  331. memset(a->uncached, 0, a->uncached_size);
  332. next_uncached = a->uncached;
  333. if (!esas2r_init_adapter_struct(a,
  334. &next_uncached)) {
  335. esas2r_log(ESAS2R_LOG_CRIT,
  336. "failed to initialize adapter structure (2)!");
  337. esas2r_kill_adapter(index);
  338. return 0;
  339. }
  340. tasklet_init(&a->tasklet,
  341. esas2r_adapter_tasklet,
  342. (unsigned long)a);
  343. /*
  344. * Disable chip interrupts to prevent spurious interrupts
  345. * until we claim the IRQ.
  346. */
  347. esas2r_disable_chip_interrupts(a);
  348. esas2r_check_adapter(a);
  349. if (!esas2r_init_adapter_hw(a, true)) {
  350. esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
  351. } else {
  352. esas2r_debug("esas2r_init_adapter ok");
  353. }
  354. esas2r_claim_interrupts(a);
  355. if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
  356. esas2r_enable_chip_interrupts(a);
  357. set_bit(AF2_INIT_DONE, &a->flags2);
  358. if (!test_bit(AF_DEGRADED_MODE, &a->flags))
  359. esas2r_kickoff_timer(a);
  360. esas2r_debug("esas2r_init_adapter done for %p (%d)",
  361. a, a->disable_cnt);
  362. return 1;
  363. }
  364. static void esas2r_adapter_power_down(struct esas2r_adapter *a,
  365. int power_management)
  366. {
  367. struct esas2r_mem_desc *memdesc, *next;
  368. if ((test_bit(AF2_INIT_DONE, &a->flags2))
  369. && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
  370. if (!power_management) {
  371. del_timer_sync(&a->timer);
  372. tasklet_kill(&a->tasklet);
  373. }
  374. esas2r_power_down(a);
  375. /*
  376. * There are versions of firmware that do not handle the sync
  377. * cache command correctly. Stall here to ensure that the
  378. * cache is lazily flushed.
  379. */
  380. mdelay(500);
  381. esas2r_debug("chip halted");
  382. }
  383. /* Remove sysfs binary files */
  384. if (a->sysfs_fw_created) {
  385. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
  386. a->sysfs_fw_created = 0;
  387. }
  388. if (a->sysfs_fs_created) {
  389. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
  390. a->sysfs_fs_created = 0;
  391. }
  392. if (a->sysfs_vda_created) {
  393. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
  394. a->sysfs_vda_created = 0;
  395. }
  396. if (a->sysfs_hw_created) {
  397. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
  398. a->sysfs_hw_created = 0;
  399. }
  400. if (a->sysfs_live_nvram_created) {
  401. sysfs_remove_bin_file(&a->host->shost_dev.kobj,
  402. &bin_attr_live_nvram);
  403. a->sysfs_live_nvram_created = 0;
  404. }
  405. if (a->sysfs_default_nvram_created) {
  406. sysfs_remove_bin_file(&a->host->shost_dev.kobj,
  407. &bin_attr_default_nvram);
  408. a->sysfs_default_nvram_created = 0;
  409. }
  410. /* Clean up interrupts */
  411. if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
  412. esas2r_log_dev(ESAS2R_LOG_INFO,
  413. &(a->pcid->dev),
  414. "free_irq(%d) called", a->pcid->irq);
  415. free_irq(a->pcid->irq, a);
  416. esas2r_debug("IRQ released");
  417. clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
  418. }
  419. if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
  420. pci_disable_msi(a->pcid);
  421. clear_bit(AF2_MSI_ENABLED, &a->flags2);
  422. esas2r_debug("MSI disabled");
  423. }
  424. if (a->inbound_list_md.virt_addr)
  425. esas2r_initmem_free(a, &a->inbound_list_md);
  426. if (a->outbound_list_md.virt_addr)
  427. esas2r_initmem_free(a, &a->outbound_list_md);
  428. list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
  429. next_desc) {
  430. esas2r_initmem_free(a, memdesc);
  431. }
  432. /* Following frees everything allocated via alloc_vda_req */
  433. list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
  434. esas2r_initmem_free(a, memdesc);
  435. list_del(&memdesc->next_desc);
  436. kfree(memdesc);
  437. }
  438. kfree(a->first_ae_req);
  439. a->first_ae_req = NULL;
  440. kfree(a->sg_list_mds);
  441. a->sg_list_mds = NULL;
  442. kfree(a->req_table);
  443. a->req_table = NULL;
  444. if (a->regs) {
  445. esas2r_unmap_regions(a);
  446. a->regs = NULL;
  447. a->data_window = NULL;
  448. esas2r_debug("regions unmapped");
  449. }
  450. }
  451. /* Release/free allocated resources for specified adapters. */
  452. void esas2r_kill_adapter(int i)
  453. {
  454. struct esas2r_adapter *a = esas2r_adapters[i];
  455. if (a) {
  456. unsigned long flags;
  457. struct workqueue_struct *wq;
  458. esas2r_debug("killing adapter %p [%d] ", a, i);
  459. esas2r_fw_event_off(a);
  460. esas2r_adapter_power_down(a, 0);
  461. if (esas2r_buffered_ioctl &&
  462. (a->pcid == esas2r_buffered_ioctl_pcid)) {
  463. dma_free_coherent(&a->pcid->dev,
  464. (size_t)esas2r_buffered_ioctl_size,
  465. esas2r_buffered_ioctl,
  466. esas2r_buffered_ioctl_addr);
  467. esas2r_buffered_ioctl = NULL;
  468. }
  469. if (a->vda_buffer) {
  470. dma_free_coherent(&a->pcid->dev,
  471. (size_t)VDA_MAX_BUFFER_SIZE,
  472. a->vda_buffer,
  473. (dma_addr_t)a->ppvda_buffer);
  474. a->vda_buffer = NULL;
  475. }
  476. if (a->fs_api_buffer) {
  477. dma_free_coherent(&a->pcid->dev,
  478. (size_t)a->fs_api_buffer_size,
  479. a->fs_api_buffer,
  480. (dma_addr_t)a->ppfs_api_buffer);
  481. a->fs_api_buffer = NULL;
  482. }
  483. kfree(a->local_atto_ioctl);
  484. a->local_atto_ioctl = NULL;
  485. spin_lock_irqsave(&a->fw_event_lock, flags);
  486. wq = a->fw_event_q;
  487. a->fw_event_q = NULL;
  488. spin_unlock_irqrestore(&a->fw_event_lock, flags);
  489. if (wq)
  490. destroy_workqueue(wq);
  491. if (a->uncached) {
  492. dma_free_coherent(&a->pcid->dev,
  493. (size_t)a->uncached_size,
  494. a->uncached,
  495. (dma_addr_t)a->uncached_phys);
  496. a->uncached = NULL;
  497. esas2r_debug("uncached area freed");
  498. }
  499. esas2r_log_dev(ESAS2R_LOG_INFO,
  500. &(a->pcid->dev),
  501. "pci_disable_device() called. msix_enabled: %d "
  502. "msi_enabled: %d irq: %d pin: %d",
  503. a->pcid->msix_enabled,
  504. a->pcid->msi_enabled,
  505. a->pcid->irq,
  506. a->pcid->pin);
  507. esas2r_log_dev(ESAS2R_LOG_INFO,
  508. &(a->pcid->dev),
  509. "before pci_disable_device() enable_cnt: %d",
  510. a->pcid->enable_cnt.counter);
  511. pci_disable_device(a->pcid);
  512. esas2r_log_dev(ESAS2R_LOG_INFO,
  513. &(a->pcid->dev),
  514. "after pci_disable_device() enable_cnt: %d",
  515. a->pcid->enable_cnt.counter);
  516. esas2r_log_dev(ESAS2R_LOG_INFO,
  517. &(a->pcid->dev),
  518. "pci_set_drv_data(%p, NULL) called",
  519. a->pcid);
  520. pci_set_drvdata(a->pcid, NULL);
  521. esas2r_adapters[i] = NULL;
  522. if (test_bit(AF2_INIT_DONE, &a->flags2)) {
  523. clear_bit(AF2_INIT_DONE, &a->flags2);
  524. set_bit(AF_DEGRADED_MODE, &a->flags);
  525. esas2r_log_dev(ESAS2R_LOG_INFO,
  526. &(a->host->shost_gendev),
  527. "scsi_remove_host() called");
  528. scsi_remove_host(a->host);
  529. esas2r_log_dev(ESAS2R_LOG_INFO,
  530. &(a->host->shost_gendev),
  531. "scsi_host_put() called");
  532. scsi_host_put(a->host);
  533. }
  534. }
  535. }
  536. static int __maybe_unused esas2r_suspend(struct device *dev)
  537. {
  538. struct Scsi_Host *host = dev_get_drvdata(dev);
  539. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  540. esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
  541. if (!a)
  542. return -ENODEV;
  543. esas2r_adapter_power_down(a, 1);
  544. esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
  545. return 0;
  546. }
  547. static int __maybe_unused esas2r_resume(struct device *dev)
  548. {
  549. struct Scsi_Host *host = dev_get_drvdata(dev);
  550. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  551. int rez = 0;
  552. esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
  553. if (!a) {
  554. rez = -ENODEV;
  555. goto error_exit;
  556. }
  557. if (esas2r_map_regions(a) != 0) {
  558. esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
  559. rez = -ENOMEM;
  560. goto error_exit;
  561. }
  562. /* Set up interupt mode */
  563. esas2r_setup_interrupts(a, a->intr_mode);
  564. /*
  565. * Disable chip interrupts to prevent spurious interrupts until we
  566. * claim the IRQ.
  567. */
  568. esas2r_disable_chip_interrupts(a);
  569. if (!esas2r_power_up(a, true)) {
  570. esas2r_debug("yikes, esas2r_power_up failed");
  571. rez = -ENOMEM;
  572. goto error_exit;
  573. }
  574. esas2r_claim_interrupts(a);
  575. if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
  576. /*
  577. * Now that system interrupt(s) are claimed, we can enable
  578. * chip interrupts.
  579. */
  580. esas2r_enable_chip_interrupts(a);
  581. esas2r_kickoff_timer(a);
  582. } else {
  583. esas2r_debug("yikes, unable to claim IRQ");
  584. esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
  585. rez = -ENOMEM;
  586. goto error_exit;
  587. }
  588. error_exit:
  589. esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
  590. rez);
  591. return rez;
  592. }
  593. SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
  594. bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
  595. {
  596. set_bit(AF_DEGRADED_MODE, &a->flags);
  597. esas2r_log(ESAS2R_LOG_CRIT,
  598. "setting adapter to degraded mode: %s\n", error_str);
  599. return false;
  600. }
  601. u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
  602. {
  603. return sizeof(struct esas2r_sas_nvram)
  604. + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
  605. + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
  606. + 8
  607. + (num_sg_lists * (u16)sgl_page_size)
  608. + ALIGN((num_requests + num_ae_requests + 1 +
  609. ESAS2R_LIST_EXTRA) *
  610. sizeof(struct esas2r_inbound_list_source_entry),
  611. 8)
  612. + ALIGN((num_requests + num_ae_requests + 1 +
  613. ESAS2R_LIST_EXTRA) *
  614. sizeof(struct atto_vda_ob_rsp), 8)
  615. + 256; /* VDA request and buffer align */
  616. }
  617. static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
  618. {
  619. if (pci_is_pcie(a->pcid)) {
  620. u16 devcontrol;
  621. pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
  622. if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
  623. PCI_EXP_DEVCTL_READRQ_512B) {
  624. esas2r_log(ESAS2R_LOG_INFO,
  625. "max read request size > 512B");
  626. devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
  627. devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
  628. pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
  629. devcontrol);
  630. }
  631. }
  632. }
  633. /*
  634. * Determine the organization of the uncached data area and
  635. * finish initializing the adapter structure
  636. */
  637. bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
  638. void **uncached_area)
  639. {
  640. u32 i;
  641. u8 *high;
  642. struct esas2r_inbound_list_source_entry *element;
  643. struct esas2r_request *rq;
  644. struct esas2r_mem_desc *sgl;
  645. spin_lock_init(&a->sg_list_lock);
  646. spin_lock_init(&a->mem_lock);
  647. spin_lock_init(&a->queue_lock);
  648. a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
  649. if (!alloc_vda_req(a, &a->general_req)) {
  650. esas2r_hdebug(
  651. "failed to allocate a VDA request for the general req!");
  652. return false;
  653. }
  654. /* allocate requests for asynchronous events */
  655. a->first_ae_req =
  656. kcalloc(num_ae_requests, sizeof(struct esas2r_request),
  657. GFP_KERNEL);
  658. if (a->first_ae_req == NULL) {
  659. esas2r_log(ESAS2R_LOG_CRIT,
  660. "failed to allocate memory for asynchronous events");
  661. return false;
  662. }
  663. /* allocate the S/G list memory descriptors */
  664. a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc),
  665. GFP_KERNEL);
  666. if (a->sg_list_mds == NULL) {
  667. esas2r_log(ESAS2R_LOG_CRIT,
  668. "failed to allocate memory for s/g list descriptors");
  669. return false;
  670. }
  671. /* allocate the request table */
  672. a->req_table =
  673. kcalloc(num_requests + num_ae_requests + 1,
  674. sizeof(struct esas2r_request *),
  675. GFP_KERNEL);
  676. if (a->req_table == NULL) {
  677. esas2r_log(ESAS2R_LOG_CRIT,
  678. "failed to allocate memory for the request table");
  679. return false;
  680. }
  681. /* initialize PCI configuration space */
  682. esas2r_init_pci_cfg_space(a);
  683. /*
  684. * the thunder_stream boards all have a serial flash part that has a
  685. * different base address on the AHB bus.
  686. */
  687. if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
  688. && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
  689. a->flags2 |= AF2_THUNDERBOLT;
  690. if (test_bit(AF2_THUNDERBOLT, &a->flags2))
  691. a->flags2 |= AF2_SERIAL_FLASH;
  692. if (a->pcid->subsystem_device == ATTO_TLSH_1068)
  693. a->flags2 |= AF2_THUNDERLINK;
  694. /* Uncached Area */
  695. high = (u8 *)*uncached_area;
  696. /* initialize the scatter/gather table pages */
  697. for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
  698. sgl->size = sgl_page_size;
  699. list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
  700. if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
  701. /* Allow the driver to load if the minimum count met. */
  702. if (i < NUM_SGL_MIN)
  703. return false;
  704. break;
  705. }
  706. }
  707. /* compute the size of the lists */
  708. a->list_size = num_requests + ESAS2R_LIST_EXTRA;
  709. /* allocate the inbound list */
  710. a->inbound_list_md.size = a->list_size *
  711. sizeof(struct
  712. esas2r_inbound_list_source_entry);
  713. if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
  714. esas2r_hdebug("failed to allocate IB list");
  715. return false;
  716. }
  717. /* allocate the outbound list */
  718. a->outbound_list_md.size = a->list_size *
  719. sizeof(struct atto_vda_ob_rsp);
  720. if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
  721. ESAS2R_LIST_ALIGN)) {
  722. esas2r_hdebug("failed to allocate IB list");
  723. return false;
  724. }
  725. /* allocate the NVRAM structure */
  726. a->nvram = (struct esas2r_sas_nvram *)high;
  727. high += sizeof(struct esas2r_sas_nvram);
  728. /* allocate the discovery buffer */
  729. a->disc_buffer = high;
  730. high += ESAS2R_DISC_BUF_LEN;
  731. high = PTR_ALIGN(high, 8);
  732. /* allocate the outbound list copy pointer */
  733. a->outbound_copy = (u32 volatile *)high;
  734. high += sizeof(u32);
  735. if (!test_bit(AF_NVR_VALID, &a->flags))
  736. esas2r_nvram_set_defaults(a);
  737. /* update the caller's uncached memory area pointer */
  738. *uncached_area = (void *)high;
  739. /* initialize the allocated memory */
  740. if (test_bit(AF_FIRST_INIT, &a->flags)) {
  741. esas2r_targ_db_initialize(a);
  742. /* prime parts of the inbound list */
  743. element =
  744. (struct esas2r_inbound_list_source_entry *)a->
  745. inbound_list_md.
  746. virt_addr;
  747. for (i = 0; i < a->list_size; i++) {
  748. element->address = 0;
  749. element->reserved = 0;
  750. element->length = cpu_to_le32(HWILSE_INTERFACE_F0
  751. | (sizeof(union
  752. atto_vda_req)
  753. /
  754. sizeof(u32)));
  755. element++;
  756. }
  757. /* init the AE requests */
  758. for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
  759. i++) {
  760. INIT_LIST_HEAD(&rq->req_list);
  761. if (!alloc_vda_req(a, rq)) {
  762. esas2r_hdebug(
  763. "failed to allocate a VDA request!");
  764. return false;
  765. }
  766. esas2r_rq_init_request(rq, a);
  767. /* override the completion function */
  768. rq->comp_cb = esas2r_ae_complete;
  769. }
  770. }
  771. return true;
  772. }
  773. /* This code will verify that the chip is operational. */
  774. bool esas2r_check_adapter(struct esas2r_adapter *a)
  775. {
  776. u32 starttime;
  777. u32 doorbell;
  778. u64 ppaddr;
  779. u32 dw;
  780. /*
  781. * if the chip reset detected flag is set, we can bypass a bunch of
  782. * stuff.
  783. */
  784. if (test_bit(AF_CHPRST_DETECTED, &a->flags))
  785. goto skip_chip_reset;
  786. /*
  787. * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
  788. * may have left them enabled or we may be recovering from a fault.
  789. */
  790. esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
  791. esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
  792. /*
  793. * wait for the firmware to become ready by forcing an interrupt and
  794. * waiting for a response.
  795. */
  796. starttime = jiffies_to_msecs(jiffies);
  797. while (true) {
  798. esas2r_force_interrupt(a);
  799. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  800. if (doorbell == 0xFFFFFFFF) {
  801. /*
  802. * Give the firmware up to two seconds to enable
  803. * register access after a reset.
  804. */
  805. if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
  806. return esas2r_set_degraded_mode(a,
  807. "unable to access registers");
  808. } else if (doorbell & DRBL_FORCE_INT) {
  809. u32 ver = (doorbell & DRBL_FW_VER_MSK);
  810. /*
  811. * This driver supports version 0 and version 1 of
  812. * the API
  813. */
  814. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  815. doorbell);
  816. if (ver == DRBL_FW_VER_0) {
  817. set_bit(AF_LEGACY_SGE_MODE, &a->flags);
  818. a->max_vdareq_size = 128;
  819. a->build_sgl = esas2r_build_sg_list_sge;
  820. } else if (ver == DRBL_FW_VER_1) {
  821. clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
  822. a->max_vdareq_size = 1024;
  823. a->build_sgl = esas2r_build_sg_list_prd;
  824. } else {
  825. return esas2r_set_degraded_mode(a,
  826. "unknown firmware version");
  827. }
  828. break;
  829. }
  830. schedule_timeout_interruptible(msecs_to_jiffies(100));
  831. if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
  832. esas2r_hdebug("FW ready TMO");
  833. esas2r_bugon();
  834. return esas2r_set_degraded_mode(a,
  835. "firmware start has timed out");
  836. }
  837. }
  838. /* purge any asynchronous events since we will repost them later */
  839. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
  840. starttime = jiffies_to_msecs(jiffies);
  841. while (true) {
  842. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  843. if (doorbell & DRBL_MSG_IFC_DOWN) {
  844. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  845. doorbell);
  846. break;
  847. }
  848. schedule_timeout_interruptible(msecs_to_jiffies(50));
  849. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  850. esas2r_hdebug("timeout waiting for interface down");
  851. break;
  852. }
  853. }
  854. skip_chip_reset:
  855. /*
  856. * first things first, before we go changing any of these registers
  857. * disable the communication lists.
  858. */
  859. dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
  860. dw &= ~MU_ILC_ENABLE;
  861. esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
  862. dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
  863. dw &= ~MU_OLC_ENABLE;
  864. esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
  865. /* configure the communication list addresses */
  866. ppaddr = a->inbound_list_md.phys_addr;
  867. esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
  868. lower_32_bits(ppaddr));
  869. esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
  870. upper_32_bits(ppaddr));
  871. ppaddr = a->outbound_list_md.phys_addr;
  872. esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
  873. lower_32_bits(ppaddr));
  874. esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
  875. upper_32_bits(ppaddr));
  876. ppaddr = a->uncached_phys +
  877. ((u8 *)a->outbound_copy - a->uncached);
  878. esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
  879. lower_32_bits(ppaddr));
  880. esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
  881. upper_32_bits(ppaddr));
  882. /* reset the read and write pointers */
  883. *a->outbound_copy =
  884. a->last_write =
  885. a->last_read = a->list_size - 1;
  886. set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
  887. esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
  888. a->last_write);
  889. esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
  890. a->last_write);
  891. esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
  892. a->last_write);
  893. esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
  894. MU_OLW_TOGGLE | a->last_write);
  895. /* configure the interface select fields */
  896. dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
  897. dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
  898. esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
  899. (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
  900. dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
  901. dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
  902. esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
  903. (dw | MU_OLIC_LIST_F0 |
  904. MU_OLIC_SOURCE_DDR));
  905. /* finish configuring the communication lists */
  906. dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
  907. dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
  908. dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
  909. | (a->list_size << MU_ILC_NUMBER_SHIFT);
  910. esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
  911. dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
  912. dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
  913. dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
  914. esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
  915. /*
  916. * notify the firmware that we're done setting up the communication
  917. * list registers. wait here until the firmware is done configuring
  918. * its lists. it will signal that it is done by enabling the lists.
  919. */
  920. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
  921. starttime = jiffies_to_msecs(jiffies);
  922. while (true) {
  923. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  924. if (doorbell & DRBL_MSG_IFC_INIT) {
  925. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  926. doorbell);
  927. break;
  928. }
  929. schedule_timeout_interruptible(msecs_to_jiffies(100));
  930. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  931. esas2r_hdebug(
  932. "timeout waiting for communication list init");
  933. esas2r_bugon();
  934. return esas2r_set_degraded_mode(a,
  935. "timeout waiting for communication list init");
  936. }
  937. }
  938. /*
  939. * flag whether the firmware supports the power down doorbell. we
  940. * determine this by reading the inbound doorbell enable mask.
  941. */
  942. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
  943. if (doorbell & DRBL_POWER_DOWN)
  944. set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
  945. else
  946. clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
  947. /*
  948. * enable assertion of outbound queue and doorbell interrupts in the
  949. * main interrupt cause register.
  950. */
  951. esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
  952. esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
  953. return true;
  954. }
  955. /* Process the initialization message just completed and format the next one. */
  956. static bool esas2r_format_init_msg(struct esas2r_adapter *a,
  957. struct esas2r_request *rq)
  958. {
  959. u32 msg = a->init_msg;
  960. struct atto_vda_cfg_init *ci;
  961. a->init_msg = 0;
  962. switch (msg) {
  963. case ESAS2R_INIT_MSG_START:
  964. case ESAS2R_INIT_MSG_REINIT:
  965. {
  966. esas2r_hdebug("CFG init");
  967. esas2r_build_cfg_req(a,
  968. rq,
  969. VDA_CFG_INIT,
  970. 0,
  971. NULL);
  972. ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
  973. ci->sgl_page_size = cpu_to_le32(sgl_page_size);
  974. /* firmware interface overflows in y2106 */
  975. ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
  976. rq->flags |= RF_FAILURE_OK;
  977. a->init_msg = ESAS2R_INIT_MSG_INIT;
  978. break;
  979. }
  980. case ESAS2R_INIT_MSG_INIT:
  981. if (rq->req_stat == RS_SUCCESS) {
  982. u32 major;
  983. u32 minor;
  984. u16 fw_release;
  985. a->fw_version = le16_to_cpu(
  986. rq->func_rsp.cfg_rsp.vda_version);
  987. a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
  988. fw_release = le16_to_cpu(
  989. rq->func_rsp.cfg_rsp.fw_release);
  990. major = LOBYTE(fw_release);
  991. minor = HIBYTE(fw_release);
  992. a->fw_version += (major << 16) + (minor << 24);
  993. } else {
  994. esas2r_hdebug("FAILED");
  995. }
  996. /*
  997. * the 2.71 and earlier releases of R6xx firmware did not error
  998. * unsupported config requests correctly.
  999. */
  1000. if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
  1001. || (be32_to_cpu(a->fw_version) > 0x00524702)) {
  1002. esas2r_hdebug("CFG get init");
  1003. esas2r_build_cfg_req(a,
  1004. rq,
  1005. VDA_CFG_GET_INIT2,
  1006. sizeof(struct atto_vda_cfg_init),
  1007. NULL);
  1008. rq->vrq->cfg.sg_list_offset = offsetof(
  1009. struct atto_vda_cfg_req,
  1010. data.sge);
  1011. rq->vrq->cfg.data.prde.ctl_len =
  1012. cpu_to_le32(sizeof(struct atto_vda_cfg_init));
  1013. rq->vrq->cfg.data.prde.address = cpu_to_le64(
  1014. rq->vrq_md->phys_addr +
  1015. sizeof(union atto_vda_req));
  1016. rq->flags |= RF_FAILURE_OK;
  1017. a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
  1018. break;
  1019. }
  1020. fallthrough;
  1021. case ESAS2R_INIT_MSG_GET_INIT:
  1022. if (msg == ESAS2R_INIT_MSG_GET_INIT) {
  1023. ci = (struct atto_vda_cfg_init *)rq->data_buf;
  1024. if (rq->req_stat == RS_SUCCESS) {
  1025. a->num_targets_backend =
  1026. le32_to_cpu(ci->num_targets_backend);
  1027. a->ioctl_tunnel =
  1028. le32_to_cpu(ci->ioctl_tunnel);
  1029. } else {
  1030. esas2r_hdebug("FAILED");
  1031. }
  1032. }
  1033. fallthrough;
  1034. default:
  1035. rq->req_stat = RS_SUCCESS;
  1036. return false;
  1037. }
  1038. return true;
  1039. }
  1040. /*
  1041. * Perform initialization messages via the request queue. Messages are
  1042. * performed with interrupts disabled.
  1043. */
  1044. bool esas2r_init_msgs(struct esas2r_adapter *a)
  1045. {
  1046. bool success = true;
  1047. struct esas2r_request *rq = &a->general_req;
  1048. esas2r_rq_init_request(rq, a);
  1049. rq->comp_cb = esas2r_dummy_complete;
  1050. if (a->init_msg == 0)
  1051. a->init_msg = ESAS2R_INIT_MSG_REINIT;
  1052. while (a->init_msg) {
  1053. if (esas2r_format_init_msg(a, rq)) {
  1054. unsigned long flags;
  1055. while (true) {
  1056. spin_lock_irqsave(&a->queue_lock, flags);
  1057. esas2r_start_vda_request(a, rq);
  1058. spin_unlock_irqrestore(&a->queue_lock, flags);
  1059. esas2r_wait_request(a, rq);
  1060. if (rq->req_stat != RS_PENDING)
  1061. break;
  1062. }
  1063. }
  1064. if (rq->req_stat == RS_SUCCESS
  1065. || ((rq->flags & RF_FAILURE_OK)
  1066. && rq->req_stat != RS_TIMEOUT))
  1067. continue;
  1068. esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
  1069. a->init_msg, rq->req_stat, rq->flags);
  1070. a->init_msg = ESAS2R_INIT_MSG_START;
  1071. success = false;
  1072. break;
  1073. }
  1074. esas2r_rq_destroy_request(rq, a);
  1075. return success;
  1076. }
  1077. /* Initialize the adapter chip */
  1078. bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
  1079. {
  1080. bool rslt = false;
  1081. struct esas2r_request *rq;
  1082. u32 i;
  1083. if (test_bit(AF_DEGRADED_MODE, &a->flags))
  1084. goto exit;
  1085. if (!test_bit(AF_NVR_VALID, &a->flags)) {
  1086. if (!esas2r_nvram_read_direct(a))
  1087. esas2r_log(ESAS2R_LOG_WARN,
  1088. "invalid/missing NVRAM parameters");
  1089. }
  1090. if (!esas2r_init_msgs(a)) {
  1091. esas2r_set_degraded_mode(a, "init messages failed");
  1092. goto exit;
  1093. }
  1094. /* The firmware is ready. */
  1095. clear_bit(AF_DEGRADED_MODE, &a->flags);
  1096. clear_bit(AF_CHPRST_PENDING, &a->flags);
  1097. /* Post all the async event requests */
  1098. for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
  1099. esas2r_start_ae_request(a, rq);
  1100. if (!a->flash_rev[0])
  1101. esas2r_read_flash_rev(a);
  1102. if (!a->image_type[0])
  1103. esas2r_read_image_type(a);
  1104. if (a->fw_version == 0)
  1105. a->fw_rev[0] = 0;
  1106. else
  1107. sprintf(a->fw_rev, "%1d.%02d",
  1108. (int)LOBYTE(HIWORD(a->fw_version)),
  1109. (int)HIBYTE(HIWORD(a->fw_version)));
  1110. esas2r_hdebug("firmware revision: %s", a->fw_rev);
  1111. if (test_bit(AF_CHPRST_DETECTED, &a->flags)
  1112. && (test_bit(AF_FIRST_INIT, &a->flags))) {
  1113. esas2r_enable_chip_interrupts(a);
  1114. return true;
  1115. }
  1116. /* initialize discovery */
  1117. esas2r_disc_initialize(a);
  1118. /*
  1119. * wait for the device wait time to expire here if requested. this is
  1120. * usually requested during initial driver load and possibly when
  1121. * resuming from a low power state. deferred device waiting will use
  1122. * interrupts. chip reset recovery always defers device waiting to
  1123. * avoid being in a TASKLET too long.
  1124. */
  1125. if (init_poll) {
  1126. u32 currtime = a->disc_start_time;
  1127. u32 nexttick = 100;
  1128. u32 deltatime;
  1129. /*
  1130. * Block Tasklets from getting scheduled and indicate this is
  1131. * polled discovery.
  1132. */
  1133. set_bit(AF_TASKLET_SCHEDULED, &a->flags);
  1134. set_bit(AF_DISC_POLLED, &a->flags);
  1135. /*
  1136. * Temporarily bring the disable count to zero to enable
  1137. * deferred processing. Note that the count is already zero
  1138. * after the first initialization.
  1139. */
  1140. if (test_bit(AF_FIRST_INIT, &a->flags))
  1141. atomic_dec(&a->disable_cnt);
  1142. while (test_bit(AF_DISC_PENDING, &a->flags)) {
  1143. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1144. /*
  1145. * Determine the need for a timer tick based on the
  1146. * delta time between this and the last iteration of
  1147. * this loop. We don't use the absolute time because
  1148. * then we would have to worry about when nexttick
  1149. * wraps and currtime hasn't yet.
  1150. */
  1151. deltatime = jiffies_to_msecs(jiffies) - currtime;
  1152. currtime += deltatime;
  1153. /*
  1154. * Process any waiting discovery as long as the chip is
  1155. * up. If a chip reset happens during initial polling,
  1156. * we have to make sure the timer tick processes the
  1157. * doorbell indicating the firmware is ready.
  1158. */
  1159. if (!test_bit(AF_CHPRST_PENDING, &a->flags))
  1160. esas2r_disc_check_for_work(a);
  1161. /* Simulate a timer tick. */
  1162. if (nexttick <= deltatime) {
  1163. /* Time for a timer tick */
  1164. nexttick += 100;
  1165. esas2r_timer_tick(a);
  1166. }
  1167. if (nexttick > deltatime)
  1168. nexttick -= deltatime;
  1169. /* Do any deferred processing */
  1170. if (esas2r_is_tasklet_pending(a))
  1171. esas2r_do_tasklet_tasks(a);
  1172. }
  1173. if (test_bit(AF_FIRST_INIT, &a->flags))
  1174. atomic_inc(&a->disable_cnt);
  1175. clear_bit(AF_DISC_POLLED, &a->flags);
  1176. clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
  1177. }
  1178. esas2r_targ_db_report_changes(a);
  1179. /*
  1180. * For cases where (a) the initialization messages processing may
  1181. * handle an interrupt for a port event and a discovery is waiting, but
  1182. * we are not waiting for devices, or (b) the device wait time has been
  1183. * exhausted but there is still discovery pending, start any leftover
  1184. * discovery in interrupt driven mode.
  1185. */
  1186. esas2r_disc_start_waiting(a);
  1187. /* Enable chip interrupts */
  1188. a->int_mask = ESAS2R_INT_STS_MASK;
  1189. esas2r_enable_chip_interrupts(a);
  1190. esas2r_enable_heartbeat(a);
  1191. rslt = true;
  1192. exit:
  1193. /*
  1194. * Regardless of whether initialization was successful, certain things
  1195. * need to get done before we exit.
  1196. */
  1197. if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
  1198. test_bit(AF_FIRST_INIT, &a->flags)) {
  1199. /*
  1200. * Reinitialization was performed during the first
  1201. * initialization. Only clear the chip reset flag so the
  1202. * original device polling is not cancelled.
  1203. */
  1204. if (!rslt)
  1205. clear_bit(AF_CHPRST_PENDING, &a->flags);
  1206. } else {
  1207. /* First initialization or a subsequent re-init is complete. */
  1208. if (!rslt) {
  1209. clear_bit(AF_CHPRST_PENDING, &a->flags);
  1210. clear_bit(AF_DISC_PENDING, &a->flags);
  1211. }
  1212. /* Enable deferred processing after the first initialization. */
  1213. if (test_bit(AF_FIRST_INIT, &a->flags)) {
  1214. clear_bit(AF_FIRST_INIT, &a->flags);
  1215. if (atomic_dec_return(&a->disable_cnt) == 0)
  1216. esas2r_do_deferred_processes(a);
  1217. }
  1218. }
  1219. return rslt;
  1220. }
  1221. void esas2r_reset_adapter(struct esas2r_adapter *a)
  1222. {
  1223. set_bit(AF_OS_RESET, &a->flags);
  1224. esas2r_local_reset_adapter(a);
  1225. esas2r_schedule_tasklet(a);
  1226. }
  1227. void esas2r_reset_chip(struct esas2r_adapter *a)
  1228. {
  1229. if (!esas2r_is_adapter_present(a))
  1230. return;
  1231. /*
  1232. * Before we reset the chip, save off the VDA core dump. The VDA core
  1233. * dump is located in the upper 512KB of the onchip SRAM. Make sure
  1234. * to not overwrite a previous crash that was saved.
  1235. */
  1236. if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
  1237. !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
  1238. esas2r_read_mem_block(a,
  1239. a->fw_coredump_buff,
  1240. MW_DATA_ADDR_SRAM + 0x80000,
  1241. ESAS2R_FWCOREDUMP_SZ);
  1242. set_bit(AF2_COREDUMP_SAVED, &a->flags2);
  1243. }
  1244. clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
  1245. /* Reset the chip */
  1246. if (a->pcid->revision == MVR_FREY_B2)
  1247. esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
  1248. MU_CTL_IN_FULL_RST2);
  1249. else
  1250. esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
  1251. MU_CTL_IN_FULL_RST);
  1252. /* Stall a little while to let the reset condition clear */
  1253. mdelay(10);
  1254. }
  1255. static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
  1256. {
  1257. u32 starttime;
  1258. u32 doorbell;
  1259. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
  1260. starttime = jiffies_to_msecs(jiffies);
  1261. while (true) {
  1262. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  1263. if (doorbell & DRBL_POWER_DOWN) {
  1264. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1265. doorbell);
  1266. break;
  1267. }
  1268. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1269. if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
  1270. esas2r_hdebug("Timeout waiting for power down");
  1271. break;
  1272. }
  1273. }
  1274. }
  1275. /*
  1276. * Perform power management processing including managing device states, adapter
  1277. * states, interrupts, and I/O.
  1278. */
  1279. void esas2r_power_down(struct esas2r_adapter *a)
  1280. {
  1281. set_bit(AF_POWER_MGT, &a->flags);
  1282. set_bit(AF_POWER_DOWN, &a->flags);
  1283. if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
  1284. u32 starttime;
  1285. u32 doorbell;
  1286. /*
  1287. * We are currently running OK and will be reinitializing later.
  1288. * increment the disable count to coordinate with
  1289. * esas2r_init_adapter. We don't have to do this in degraded
  1290. * mode since we never enabled interrupts in the first place.
  1291. */
  1292. esas2r_disable_chip_interrupts(a);
  1293. esas2r_disable_heartbeat(a);
  1294. /* wait for any VDA activity to clear before continuing */
  1295. esas2r_write_register_dword(a, MU_DOORBELL_IN,
  1296. DRBL_MSG_IFC_DOWN);
  1297. starttime = jiffies_to_msecs(jiffies);
  1298. while (true) {
  1299. doorbell =
  1300. esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  1301. if (doorbell & DRBL_MSG_IFC_DOWN) {
  1302. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1303. doorbell);
  1304. break;
  1305. }
  1306. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1307. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  1308. esas2r_hdebug(
  1309. "timeout waiting for interface down");
  1310. break;
  1311. }
  1312. }
  1313. /*
  1314. * For versions of firmware that support it tell them the driver
  1315. * is powering down.
  1316. */
  1317. if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
  1318. esas2r_power_down_notify_firmware(a);
  1319. }
  1320. /* Suspend I/O processing. */
  1321. set_bit(AF_OS_RESET, &a->flags);
  1322. set_bit(AF_DISC_PENDING, &a->flags);
  1323. set_bit(AF_CHPRST_PENDING, &a->flags);
  1324. esas2r_process_adapter_reset(a);
  1325. /* Remove devices now that I/O is cleaned up. */
  1326. a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
  1327. esas2r_targ_db_remove_all(a, false);
  1328. }
  1329. /*
  1330. * Perform power management processing including managing device states, adapter
  1331. * states, interrupts, and I/O.
  1332. */
  1333. bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
  1334. {
  1335. bool ret;
  1336. clear_bit(AF_POWER_DOWN, &a->flags);
  1337. esas2r_init_pci_cfg_space(a);
  1338. set_bit(AF_FIRST_INIT, &a->flags);
  1339. atomic_inc(&a->disable_cnt);
  1340. /* reinitialize the adapter */
  1341. ret = esas2r_check_adapter(a);
  1342. if (!esas2r_init_adapter_hw(a, init_poll))
  1343. ret = false;
  1344. /* send the reset asynchronous event */
  1345. esas2r_send_reset_ae(a, true);
  1346. /* clear this flag after initialization. */
  1347. clear_bit(AF_POWER_MGT, &a->flags);
  1348. return ret;
  1349. }
  1350. bool esas2r_is_adapter_present(struct esas2r_adapter *a)
  1351. {
  1352. if (test_bit(AF_NOT_PRESENT, &a->flags))
  1353. return false;
  1354. if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
  1355. set_bit(AF_NOT_PRESENT, &a->flags);
  1356. return false;
  1357. }
  1358. return true;
  1359. }
  1360. const char *esas2r_get_model_name(struct esas2r_adapter *a)
  1361. {
  1362. switch (a->pcid->subsystem_device) {
  1363. case ATTO_ESAS_R680:
  1364. return "ATTO ExpressSAS R680";
  1365. case ATTO_ESAS_R608:
  1366. return "ATTO ExpressSAS R608";
  1367. case ATTO_ESAS_R60F:
  1368. return "ATTO ExpressSAS R60F";
  1369. case ATTO_ESAS_R6F0:
  1370. return "ATTO ExpressSAS R6F0";
  1371. case ATTO_ESAS_R644:
  1372. return "ATTO ExpressSAS R644";
  1373. case ATTO_ESAS_R648:
  1374. return "ATTO ExpressSAS R648";
  1375. case ATTO_TSSC_3808:
  1376. return "ATTO ThunderStream SC 3808D";
  1377. case ATTO_TSSC_3808E:
  1378. return "ATTO ThunderStream SC 3808E";
  1379. case ATTO_TLSH_1068:
  1380. return "ATTO ThunderLink SH 1068";
  1381. }
  1382. return "ATTO SAS Controller";
  1383. }
  1384. const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
  1385. {
  1386. switch (a->pcid->subsystem_device) {
  1387. case ATTO_ESAS_R680:
  1388. return "R680";
  1389. case ATTO_ESAS_R608:
  1390. return "R608";
  1391. case ATTO_ESAS_R60F:
  1392. return "R60F";
  1393. case ATTO_ESAS_R6F0:
  1394. return "R6F0";
  1395. case ATTO_ESAS_R644:
  1396. return "R644";
  1397. case ATTO_ESAS_R648:
  1398. return "R648";
  1399. case ATTO_TSSC_3808:
  1400. return "SC 3808D";
  1401. case ATTO_TSSC_3808E:
  1402. return "SC 3808E";
  1403. case ATTO_TLSH_1068:
  1404. return "SH 1068";
  1405. }
  1406. return "unknown";
  1407. }