sas_scsi_host.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Serial Attached SCSI (SAS) class SCSI Host glue.
  4. *
  5. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  6. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  7. */
  8. #include <linux/kthread.h>
  9. #include <linux/firmware.h>
  10. #include <linux/export.h>
  11. #include <linux/ctype.h>
  12. #include <linux/kernel.h>
  13. #include "sas_internal.h"
  14. #include <scsi/scsi_host.h>
  15. #include <scsi/scsi_device.h>
  16. #include <scsi/scsi_tcq.h>
  17. #include <scsi/scsi.h>
  18. #include <scsi/scsi_eh.h>
  19. #include <scsi/scsi_transport.h>
  20. #include <scsi/scsi_transport_sas.h>
  21. #include <scsi/sas_ata.h>
  22. #include "scsi_sas_internal.h"
  23. #include "scsi_transport_api.h"
  24. #include "scsi_priv.h"
  25. #include <linux/err.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/freezer.h>
  28. #include <linux/gfp.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/libata.h>
  31. /* record final status and free the task */
  32. static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
  33. {
  34. struct task_status_struct *ts = &task->task_status;
  35. enum scsi_host_status hs = DID_OK;
  36. enum exec_status stat = SAS_SAM_STAT_GOOD;
  37. if (ts->resp == SAS_TASK_UNDELIVERED) {
  38. /* transport error */
  39. hs = DID_NO_CONNECT;
  40. } else { /* ts->resp == SAS_TASK_COMPLETE */
  41. /* task delivered, what happened afterwards? */
  42. switch (ts->stat) {
  43. case SAS_DEV_NO_RESPONSE:
  44. case SAS_INTERRUPTED:
  45. case SAS_PHY_DOWN:
  46. case SAS_NAK_R_ERR:
  47. case SAS_OPEN_TO:
  48. hs = DID_NO_CONNECT;
  49. break;
  50. case SAS_DATA_UNDERRUN:
  51. scsi_set_resid(sc, ts->residual);
  52. if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
  53. hs = DID_ERROR;
  54. break;
  55. case SAS_DATA_OVERRUN:
  56. hs = DID_ERROR;
  57. break;
  58. case SAS_QUEUE_FULL:
  59. hs = DID_SOFT_ERROR; /* retry */
  60. break;
  61. case SAS_DEVICE_UNKNOWN:
  62. hs = DID_BAD_TARGET;
  63. break;
  64. case SAS_OPEN_REJECT:
  65. if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
  66. hs = DID_SOFT_ERROR; /* retry */
  67. else
  68. hs = DID_ERROR;
  69. break;
  70. case SAS_PROTO_RESPONSE:
  71. pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
  72. task->dev->port->ha->sas_ha_name);
  73. break;
  74. case SAS_ABORTED_TASK:
  75. hs = DID_ABORT;
  76. break;
  77. case SAS_SAM_STAT_CHECK_CONDITION:
  78. memcpy(sc->sense_buffer, ts->buf,
  79. min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
  80. stat = SAS_SAM_STAT_CHECK_CONDITION;
  81. break;
  82. default:
  83. stat = ts->stat;
  84. break;
  85. }
  86. }
  87. sc->result = (hs << 16) | stat;
  88. ASSIGN_SAS_TASK(sc, NULL);
  89. sas_free_task(task);
  90. }
  91. static void sas_scsi_task_done(struct sas_task *task)
  92. {
  93. struct scsi_cmnd *sc = task->uldd_task;
  94. struct domain_device *dev = task->dev;
  95. struct sas_ha_struct *ha = dev->port->ha;
  96. unsigned long flags;
  97. spin_lock_irqsave(&dev->done_lock, flags);
  98. if (test_bit(SAS_HA_FROZEN, &ha->state))
  99. task = NULL;
  100. else
  101. ASSIGN_SAS_TASK(sc, NULL);
  102. spin_unlock_irqrestore(&dev->done_lock, flags);
  103. if (unlikely(!task)) {
  104. /* task will be completed by the error handler */
  105. pr_debug("task done but aborted\n");
  106. return;
  107. }
  108. if (unlikely(!sc)) {
  109. pr_debug("task_done called with non existing SCSI cmnd!\n");
  110. sas_free_task(task);
  111. return;
  112. }
  113. sas_end_task(sc, task);
  114. scsi_done(sc);
  115. }
  116. static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
  117. struct domain_device *dev,
  118. gfp_t gfp_flags)
  119. {
  120. struct sas_task *task = sas_alloc_task(gfp_flags);
  121. struct scsi_lun lun;
  122. if (!task)
  123. return NULL;
  124. task->uldd_task = cmd;
  125. ASSIGN_SAS_TASK(cmd, task);
  126. task->dev = dev;
  127. task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
  128. int_to_scsilun(cmd->device->lun, &lun);
  129. memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
  130. task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
  131. task->ssp_task.cmd = cmd;
  132. task->scatter = scsi_sglist(cmd);
  133. task->num_scatter = scsi_sg_count(cmd);
  134. task->total_xfer_len = scsi_bufflen(cmd);
  135. task->data_dir = cmd->sc_data_direction;
  136. task->task_done = sas_scsi_task_done;
  137. return task;
  138. }
  139. int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  140. {
  141. struct sas_internal *i = to_sas_internal(host->transportt);
  142. struct domain_device *dev = cmd_to_domain_dev(cmd);
  143. struct sas_task *task;
  144. int res = 0;
  145. /* If the device fell off, no sense in issuing commands */
  146. if (test_bit(SAS_DEV_GONE, &dev->state)) {
  147. cmd->result = DID_BAD_TARGET << 16;
  148. goto out_done;
  149. }
  150. if (dev_is_sata(dev)) {
  151. spin_lock_irq(dev->sata_dev.ap->lock);
  152. res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
  153. spin_unlock_irq(dev->sata_dev.ap->lock);
  154. return res;
  155. }
  156. task = sas_create_task(cmd, dev, GFP_ATOMIC);
  157. if (!task)
  158. return SCSI_MLQUEUE_HOST_BUSY;
  159. res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
  160. if (res)
  161. goto out_free_task;
  162. return 0;
  163. out_free_task:
  164. pr_debug("lldd_execute_task returned: %d\n", res);
  165. ASSIGN_SAS_TASK(cmd, NULL);
  166. sas_free_task(task);
  167. if (res == -SAS_QUEUE_FULL)
  168. cmd->result = DID_SOFT_ERROR << 16; /* retry */
  169. else
  170. cmd->result = DID_ERROR << 16;
  171. out_done:
  172. scsi_done(cmd);
  173. return 0;
  174. }
  175. EXPORT_SYMBOL_GPL(sas_queuecommand);
  176. static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
  177. {
  178. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
  179. struct domain_device *dev = cmd_to_domain_dev(cmd);
  180. struct sas_task *task = TO_SAS_TASK(cmd);
  181. /* At this point, we only get called following an actual abort
  182. * of the task, so we should be guaranteed not to be racing with
  183. * any completions from the LLD. Task is freed after this.
  184. */
  185. sas_end_task(cmd, task);
  186. if (dev_is_sata(dev)) {
  187. /* defer commands to libata so that libata EH can
  188. * handle ata qcs correctly
  189. */
  190. list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
  191. return;
  192. }
  193. /* now finish the command and move it on to the error
  194. * handler done list, this also takes it off the
  195. * error handler pending list.
  196. */
  197. scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
  198. }
  199. static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
  200. {
  201. struct scsi_cmnd *cmd, *n;
  202. list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
  203. if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
  204. cmd->device->lun == my_cmd->device->lun)
  205. sas_eh_finish_cmd(cmd);
  206. }
  207. }
  208. static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
  209. struct domain_device *dev)
  210. {
  211. struct scsi_cmnd *cmd, *n;
  212. list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
  213. struct domain_device *x = cmd_to_domain_dev(cmd);
  214. if (x == dev)
  215. sas_eh_finish_cmd(cmd);
  216. }
  217. }
  218. static void sas_scsi_clear_queue_port(struct list_head *error_q,
  219. struct asd_sas_port *port)
  220. {
  221. struct scsi_cmnd *cmd, *n;
  222. list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
  223. struct domain_device *dev = cmd_to_domain_dev(cmd);
  224. struct asd_sas_port *x = dev->port;
  225. if (x == port)
  226. sas_eh_finish_cmd(cmd);
  227. }
  228. }
  229. enum task_disposition {
  230. TASK_IS_DONE,
  231. TASK_IS_ABORTED,
  232. TASK_IS_AT_LU,
  233. TASK_IS_NOT_AT_LU,
  234. TASK_ABORT_FAILED,
  235. };
  236. static enum task_disposition sas_scsi_find_task(struct sas_task *task)
  237. {
  238. unsigned long flags;
  239. int i, res;
  240. struct sas_internal *si =
  241. to_sas_internal(task->dev->port->ha->shost->transportt);
  242. for (i = 0; i < 5; i++) {
  243. pr_notice("%s: aborting task 0x%p\n", __func__, task);
  244. res = si->dft->lldd_abort_task(task);
  245. spin_lock_irqsave(&task->task_state_lock, flags);
  246. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  247. spin_unlock_irqrestore(&task->task_state_lock, flags);
  248. pr_debug("%s: task 0x%p is done\n", __func__, task);
  249. return TASK_IS_DONE;
  250. }
  251. spin_unlock_irqrestore(&task->task_state_lock, flags);
  252. if (res == TMF_RESP_FUNC_COMPLETE) {
  253. pr_notice("%s: task 0x%p is aborted\n",
  254. __func__, task);
  255. return TASK_IS_ABORTED;
  256. } else if (si->dft->lldd_query_task) {
  257. pr_notice("%s: querying task 0x%p\n", __func__, task);
  258. res = si->dft->lldd_query_task(task);
  259. switch (res) {
  260. case TMF_RESP_FUNC_SUCC:
  261. pr_notice("%s: task 0x%p at LU\n", __func__,
  262. task);
  263. return TASK_IS_AT_LU;
  264. case TMF_RESP_FUNC_COMPLETE:
  265. pr_notice("%s: task 0x%p not at LU\n",
  266. __func__, task);
  267. return TASK_IS_NOT_AT_LU;
  268. case TMF_RESP_FUNC_FAILED:
  269. pr_notice("%s: task 0x%p failed to abort\n",
  270. __func__, task);
  271. return TASK_ABORT_FAILED;
  272. default:
  273. pr_notice("%s: task 0x%p result code %d not handled\n",
  274. __func__, task, res);
  275. }
  276. }
  277. }
  278. return TASK_ABORT_FAILED;
  279. }
  280. static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
  281. {
  282. int res = TMF_RESP_FUNC_FAILED;
  283. struct scsi_lun lun;
  284. struct sas_internal *i =
  285. to_sas_internal(dev->port->ha->shost->transportt);
  286. int_to_scsilun(cmd->device->lun, &lun);
  287. pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
  288. SAS_ADDR(dev->sas_addr),
  289. cmd->device->lun);
  290. if (i->dft->lldd_abort_task_set)
  291. res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
  292. if (res == TMF_RESP_FUNC_FAILED) {
  293. if (i->dft->lldd_clear_task_set)
  294. res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
  295. }
  296. if (res == TMF_RESP_FUNC_FAILED) {
  297. if (i->dft->lldd_lu_reset)
  298. res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
  299. }
  300. return res;
  301. }
  302. static int sas_recover_I_T(struct domain_device *dev)
  303. {
  304. int res = TMF_RESP_FUNC_FAILED;
  305. struct sas_internal *i =
  306. to_sas_internal(dev->port->ha->shost->transportt);
  307. pr_notice("I_T nexus reset for dev %016llx\n",
  308. SAS_ADDR(dev->sas_addr));
  309. if (i->dft->lldd_I_T_nexus_reset)
  310. res = i->dft->lldd_I_T_nexus_reset(dev);
  311. return res;
  312. }
  313. /* take a reference on the last known good phy for this device */
  314. struct sas_phy *sas_get_local_phy(struct domain_device *dev)
  315. {
  316. struct sas_ha_struct *ha = dev->port->ha;
  317. struct sas_phy *phy;
  318. unsigned long flags;
  319. /* a published domain device always has a valid phy, it may be
  320. * stale, but it is never NULL
  321. */
  322. BUG_ON(!dev->phy);
  323. spin_lock_irqsave(&ha->phy_port_lock, flags);
  324. phy = dev->phy;
  325. get_device(&phy->dev);
  326. spin_unlock_irqrestore(&ha->phy_port_lock, flags);
  327. return phy;
  328. }
  329. EXPORT_SYMBOL_GPL(sas_get_local_phy);
  330. static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
  331. {
  332. struct sas_ha_struct *ha = dev->port->ha;
  333. int scheduled = 0, tries = 100;
  334. /* ata: promote lun reset to bus reset */
  335. if (dev_is_sata(dev)) {
  336. sas_ata_schedule_reset(dev);
  337. return SUCCESS;
  338. }
  339. while (!scheduled && tries--) {
  340. spin_lock_irq(&ha->lock);
  341. if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
  342. !test_bit(reset_type, &dev->state)) {
  343. scheduled = 1;
  344. ha->eh_active++;
  345. list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
  346. set_bit(SAS_DEV_EH_PENDING, &dev->state);
  347. set_bit(reset_type, &dev->state);
  348. int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
  349. scsi_schedule_eh(ha->shost);
  350. }
  351. spin_unlock_irq(&ha->lock);
  352. if (scheduled)
  353. return SUCCESS;
  354. }
  355. pr_warn("%s reset of %s failed\n",
  356. reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
  357. dev_name(&dev->rphy->dev));
  358. return FAILED;
  359. }
  360. int sas_eh_abort_handler(struct scsi_cmnd *cmd)
  361. {
  362. int res = TMF_RESP_FUNC_FAILED;
  363. struct sas_task *task = TO_SAS_TASK(cmd);
  364. struct Scsi_Host *host = cmd->device->host;
  365. struct domain_device *dev = cmd_to_domain_dev(cmd);
  366. struct sas_internal *i = to_sas_internal(host->transportt);
  367. unsigned long flags;
  368. if (!i->dft->lldd_abort_task)
  369. return FAILED;
  370. spin_lock_irqsave(host->host_lock, flags);
  371. /* We cannot do async aborts for SATA devices */
  372. if (dev_is_sata(dev) && !host->host_eh_scheduled) {
  373. spin_unlock_irqrestore(host->host_lock, flags);
  374. return FAILED;
  375. }
  376. spin_unlock_irqrestore(host->host_lock, flags);
  377. if (task)
  378. res = i->dft->lldd_abort_task(task);
  379. else
  380. pr_notice("no task to abort\n");
  381. if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
  382. return SUCCESS;
  383. return FAILED;
  384. }
  385. EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
  386. /* Attempt to send a LUN reset message to a device */
  387. int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
  388. {
  389. int res;
  390. struct scsi_lun lun;
  391. struct Scsi_Host *host = cmd->device->host;
  392. struct domain_device *dev = cmd_to_domain_dev(cmd);
  393. struct sas_internal *i = to_sas_internal(host->transportt);
  394. if (current != host->ehandler)
  395. return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
  396. int_to_scsilun(cmd->device->lun, &lun);
  397. if (!i->dft->lldd_lu_reset)
  398. return FAILED;
  399. res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
  400. if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
  401. return SUCCESS;
  402. return FAILED;
  403. }
  404. EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
  405. int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
  406. {
  407. int res;
  408. struct Scsi_Host *host = cmd->device->host;
  409. struct domain_device *dev = cmd_to_domain_dev(cmd);
  410. struct sas_internal *i = to_sas_internal(host->transportt);
  411. if (current != host->ehandler)
  412. return sas_queue_reset(dev, SAS_DEV_RESET, 0);
  413. if (!i->dft->lldd_I_T_nexus_reset)
  414. return FAILED;
  415. res = i->dft->lldd_I_T_nexus_reset(dev);
  416. if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
  417. res == -ENODEV)
  418. return SUCCESS;
  419. return FAILED;
  420. }
  421. EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
  422. /* Try to reset a device */
  423. static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
  424. {
  425. int res;
  426. struct Scsi_Host *shost = cmd->device->host;
  427. if (!shost->hostt->eh_device_reset_handler)
  428. goto try_target_reset;
  429. res = shost->hostt->eh_device_reset_handler(cmd);
  430. if (res == SUCCESS)
  431. return res;
  432. try_target_reset:
  433. if (shost->hostt->eh_target_reset_handler)
  434. return shost->hostt->eh_target_reset_handler(cmd);
  435. return FAILED;
  436. }
  437. static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
  438. {
  439. struct scsi_cmnd *cmd, *n;
  440. enum task_disposition res = TASK_IS_DONE;
  441. int tmf_resp, need_reset;
  442. struct sas_internal *i = to_sas_internal(shost->transportt);
  443. unsigned long flags;
  444. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  445. LIST_HEAD(done);
  446. /* clean out any commands that won the completion vs eh race */
  447. list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
  448. struct domain_device *dev = cmd_to_domain_dev(cmd);
  449. struct sas_task *task;
  450. spin_lock_irqsave(&dev->done_lock, flags);
  451. /* by this point the lldd has either observed
  452. * SAS_HA_FROZEN and is leaving the task alone, or has
  453. * won the race with eh and decided to complete it
  454. */
  455. task = TO_SAS_TASK(cmd);
  456. spin_unlock_irqrestore(&dev->done_lock, flags);
  457. if (!task)
  458. list_move_tail(&cmd->eh_entry, &done);
  459. }
  460. Again:
  461. list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
  462. struct sas_task *task = TO_SAS_TASK(cmd);
  463. list_del_init(&cmd->eh_entry);
  464. spin_lock_irqsave(&task->task_state_lock, flags);
  465. need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
  466. spin_unlock_irqrestore(&task->task_state_lock, flags);
  467. if (need_reset) {
  468. pr_notice("%s: task 0x%p requests reset\n",
  469. __func__, task);
  470. goto reset;
  471. }
  472. pr_debug("trying to find task 0x%p\n", task);
  473. res = sas_scsi_find_task(task);
  474. switch (res) {
  475. case TASK_IS_DONE:
  476. pr_notice("%s: task 0x%p is done\n", __func__,
  477. task);
  478. sas_eh_finish_cmd(cmd);
  479. continue;
  480. case TASK_IS_ABORTED:
  481. pr_notice("%s: task 0x%p is aborted\n",
  482. __func__, task);
  483. sas_eh_finish_cmd(cmd);
  484. continue;
  485. case TASK_IS_AT_LU:
  486. pr_info("task 0x%p is at LU: lu recover\n", task);
  487. reset:
  488. tmf_resp = sas_recover_lu(task->dev, cmd);
  489. if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
  490. pr_notice("dev %016llx LU 0x%llx is recovered\n",
  491. SAS_ADDR(task->dev),
  492. cmd->device->lun);
  493. sas_eh_finish_cmd(cmd);
  494. sas_scsi_clear_queue_lu(work_q, cmd);
  495. goto Again;
  496. }
  497. fallthrough;
  498. case TASK_IS_NOT_AT_LU:
  499. case TASK_ABORT_FAILED:
  500. pr_notice("task 0x%p is not at LU: I_T recover\n",
  501. task);
  502. tmf_resp = sas_recover_I_T(task->dev);
  503. if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
  504. tmf_resp == -ENODEV) {
  505. struct domain_device *dev = task->dev;
  506. pr_notice("I_T %016llx recovered\n",
  507. SAS_ADDR(task->dev->sas_addr));
  508. sas_eh_finish_cmd(cmd);
  509. sas_scsi_clear_queue_I_T(work_q, dev);
  510. goto Again;
  511. }
  512. /* Hammer time :-) */
  513. try_to_reset_cmd_device(cmd);
  514. if (i->dft->lldd_clear_nexus_port) {
  515. struct asd_sas_port *port = task->dev->port;
  516. pr_debug("clearing nexus for port:%d\n",
  517. port->id);
  518. res = i->dft->lldd_clear_nexus_port(port);
  519. if (res == TMF_RESP_FUNC_COMPLETE) {
  520. pr_notice("clear nexus port:%d succeeded\n",
  521. port->id);
  522. sas_eh_finish_cmd(cmd);
  523. sas_scsi_clear_queue_port(work_q,
  524. port);
  525. goto Again;
  526. }
  527. }
  528. if (i->dft->lldd_clear_nexus_ha) {
  529. pr_debug("clear nexus ha\n");
  530. res = i->dft->lldd_clear_nexus_ha(ha);
  531. if (res == TMF_RESP_FUNC_COMPLETE) {
  532. pr_notice("clear nexus ha succeeded\n");
  533. sas_eh_finish_cmd(cmd);
  534. goto clear_q;
  535. }
  536. }
  537. /* If we are here -- this means that no amount
  538. * of effort could recover from errors. Quite
  539. * possibly the HA just disappeared.
  540. */
  541. pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
  542. SAS_ADDR(task->dev->sas_addr),
  543. cmd->device->lun);
  544. sas_eh_finish_cmd(cmd);
  545. goto clear_q;
  546. }
  547. }
  548. out:
  549. list_splice_tail(&done, work_q);
  550. list_splice_tail_init(&ha->eh_ata_q, work_q);
  551. return;
  552. clear_q:
  553. pr_debug("--- Exit %s -- clear_q\n", __func__);
  554. list_for_each_entry_safe(cmd, n, work_q, eh_entry)
  555. sas_eh_finish_cmd(cmd);
  556. goto out;
  557. }
  558. static void sas_eh_handle_resets(struct Scsi_Host *shost)
  559. {
  560. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  561. struct sas_internal *i = to_sas_internal(shost->transportt);
  562. /* handle directed resets to sas devices */
  563. spin_lock_irq(&ha->lock);
  564. while (!list_empty(&ha->eh_dev_q)) {
  565. struct domain_device *dev;
  566. struct ssp_device *ssp;
  567. ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
  568. list_del_init(&ssp->eh_list_node);
  569. dev = container_of(ssp, typeof(*dev), ssp_dev);
  570. kref_get(&dev->kref);
  571. WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
  572. spin_unlock_irq(&ha->lock);
  573. if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
  574. i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
  575. if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
  576. i->dft->lldd_I_T_nexus_reset(dev);
  577. sas_put_device(dev);
  578. spin_lock_irq(&ha->lock);
  579. clear_bit(SAS_DEV_EH_PENDING, &dev->state);
  580. ha->eh_active--;
  581. }
  582. spin_unlock_irq(&ha->lock);
  583. }
  584. void sas_scsi_recover_host(struct Scsi_Host *shost)
  585. {
  586. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  587. LIST_HEAD(eh_work_q);
  588. int tries = 0;
  589. bool retry;
  590. retry:
  591. tries++;
  592. retry = true;
  593. spin_lock_irq(shost->host_lock);
  594. list_splice_init(&shost->eh_cmd_q, &eh_work_q);
  595. spin_unlock_irq(shost->host_lock);
  596. pr_notice("Enter %s busy: %d failed: %d\n",
  597. __func__, scsi_host_busy(shost), shost->host_failed);
  598. /*
  599. * Deal with commands that still have SAS tasks (i.e. they didn't
  600. * complete via the normal sas_task completion mechanism),
  601. * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
  602. */
  603. set_bit(SAS_HA_FROZEN, &ha->state);
  604. sas_eh_handle_sas_errors(shost, &eh_work_q);
  605. clear_bit(SAS_HA_FROZEN, &ha->state);
  606. if (list_empty(&eh_work_q))
  607. goto out;
  608. /*
  609. * Now deal with SCSI commands that completed ok but have a an error
  610. * code (and hopefully sense data) attached. This is roughly what
  611. * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
  612. * command we see here has no sas_task and is thus unknown to the HA.
  613. */
  614. sas_ata_eh(shost, &eh_work_q);
  615. if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
  616. scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
  617. out:
  618. sas_eh_handle_resets(shost);
  619. /* now link into libata eh --- if we have any ata devices */
  620. sas_ata_strategy_handler(shost);
  621. scsi_eh_flush_done_q(&ha->eh_done_q);
  622. /* check if any new eh work was scheduled during the last run */
  623. spin_lock_irq(&ha->lock);
  624. if (ha->eh_active == 0) {
  625. shost->host_eh_scheduled = 0;
  626. retry = false;
  627. }
  628. spin_unlock_irq(&ha->lock);
  629. if (retry)
  630. goto retry;
  631. pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
  632. __func__, scsi_host_busy(shost),
  633. shost->host_failed, tries);
  634. }
  635. int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
  636. {
  637. struct domain_device *dev = sdev_to_domain_dev(sdev);
  638. if (dev_is_sata(dev))
  639. return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
  640. return -EINVAL;
  641. }
  642. EXPORT_SYMBOL_GPL(sas_ioctl);
  643. struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
  644. {
  645. struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
  646. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  647. struct domain_device *found_dev = NULL;
  648. int i;
  649. unsigned long flags;
  650. spin_lock_irqsave(&ha->phy_port_lock, flags);
  651. for (i = 0; i < ha->num_phys; i++) {
  652. struct asd_sas_port *port = ha->sas_port[i];
  653. struct domain_device *dev;
  654. spin_lock(&port->dev_list_lock);
  655. list_for_each_entry(dev, &port->dev_list, dev_list_node) {
  656. if (rphy == dev->rphy) {
  657. found_dev = dev;
  658. spin_unlock(&port->dev_list_lock);
  659. goto found;
  660. }
  661. }
  662. spin_unlock(&port->dev_list_lock);
  663. }
  664. found:
  665. spin_unlock_irqrestore(&ha->phy_port_lock, flags);
  666. return found_dev;
  667. }
  668. int sas_target_alloc(struct scsi_target *starget)
  669. {
  670. struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
  671. struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
  672. if (!found_dev)
  673. return -ENODEV;
  674. kref_get(&found_dev->kref);
  675. starget->hostdata = found_dev;
  676. return 0;
  677. }
  678. EXPORT_SYMBOL_GPL(sas_target_alloc);
  679. #define SAS_DEF_QD 256
  680. int sas_device_configure(struct scsi_device *scsi_dev,
  681. struct queue_limits *lim)
  682. {
  683. struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
  684. BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
  685. if (dev_is_sata(dev)) {
  686. ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap);
  687. return 0;
  688. }
  689. sas_read_port_mode_page(scsi_dev);
  690. if (scsi_dev->tagged_supported) {
  691. scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
  692. } else {
  693. pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
  694. SAS_ADDR(dev->sas_addr), scsi_dev->lun);
  695. scsi_change_queue_depth(scsi_dev, 1);
  696. }
  697. scsi_dev->allow_restart = 1;
  698. return 0;
  699. }
  700. EXPORT_SYMBOL_GPL(sas_device_configure);
  701. int sas_change_queue_depth(struct scsi_device *sdev, int depth)
  702. {
  703. struct domain_device *dev = sdev_to_domain_dev(sdev);
  704. if (dev_is_sata(dev))
  705. return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
  706. if (!sdev->tagged_supported)
  707. depth = 1;
  708. return scsi_change_queue_depth(sdev, depth);
  709. }
  710. EXPORT_SYMBOL_GPL(sas_change_queue_depth);
  711. int sas_bios_param(struct scsi_device *scsi_dev,
  712. struct block_device *bdev,
  713. sector_t capacity, int *hsc)
  714. {
  715. hsc[0] = 255;
  716. hsc[1] = 63;
  717. sector_div(capacity, 255*63);
  718. hsc[2] = capacity;
  719. return 0;
  720. }
  721. EXPORT_SYMBOL_GPL(sas_bios_param);
  722. void sas_task_internal_done(struct sas_task *task)
  723. {
  724. del_timer(&task->slow_task->timer);
  725. complete(&task->slow_task->completion);
  726. }
  727. void sas_task_internal_timedout(struct timer_list *t)
  728. {
  729. struct sas_task_slow *slow = from_timer(slow, t, timer);
  730. struct sas_task *task = slow->task;
  731. bool is_completed = true;
  732. unsigned long flags;
  733. spin_lock_irqsave(&task->task_state_lock, flags);
  734. if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
  735. task->task_state_flags |= SAS_TASK_STATE_ABORTED;
  736. is_completed = false;
  737. }
  738. spin_unlock_irqrestore(&task->task_state_lock, flags);
  739. if (!is_completed)
  740. complete(&task->slow_task->completion);
  741. }
  742. #define TASK_TIMEOUT (20 * HZ)
  743. #define TASK_RETRY 3
  744. static int sas_execute_internal_abort(struct domain_device *device,
  745. enum sas_internal_abort type, u16 tag,
  746. unsigned int qid, void *data)
  747. {
  748. struct sas_ha_struct *ha = device->port->ha;
  749. struct sas_internal *i = to_sas_internal(ha->shost->transportt);
  750. struct sas_task *task = NULL;
  751. int res, retry;
  752. for (retry = 0; retry < TASK_RETRY; retry++) {
  753. task = sas_alloc_slow_task(GFP_KERNEL);
  754. if (!task)
  755. return -ENOMEM;
  756. task->dev = device;
  757. task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
  758. task->task_done = sas_task_internal_done;
  759. task->slow_task->timer.function = sas_task_internal_timedout;
  760. task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
  761. add_timer(&task->slow_task->timer);
  762. task->abort_task.tag = tag;
  763. task->abort_task.type = type;
  764. task->abort_task.qid = qid;
  765. res = i->dft->lldd_execute_task(task, GFP_KERNEL);
  766. if (res) {
  767. del_timer_sync(&task->slow_task->timer);
  768. pr_err("Executing internal abort failed %016llx (%d)\n",
  769. SAS_ADDR(device->sas_addr), res);
  770. break;
  771. }
  772. wait_for_completion(&task->slow_task->completion);
  773. res = TMF_RESP_FUNC_FAILED;
  774. /* Even if the internal abort timed out, return direct. */
  775. if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
  776. bool quit = true;
  777. if (i->dft->lldd_abort_timeout)
  778. quit = i->dft->lldd_abort_timeout(task, data);
  779. else
  780. pr_err("Internal abort: timeout %016llx\n",
  781. SAS_ADDR(device->sas_addr));
  782. res = -EIO;
  783. if (quit)
  784. break;
  785. }
  786. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  787. task->task_status.stat == SAS_SAM_STAT_GOOD) {
  788. res = TMF_RESP_FUNC_COMPLETE;
  789. break;
  790. }
  791. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  792. task->task_status.stat == TMF_RESP_FUNC_SUCC) {
  793. res = TMF_RESP_FUNC_SUCC;
  794. break;
  795. }
  796. pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
  797. SAS_ADDR(device->sas_addr), task->task_status.resp,
  798. task->task_status.stat);
  799. sas_free_task(task);
  800. task = NULL;
  801. }
  802. BUG_ON(retry == TASK_RETRY && task != NULL);
  803. sas_free_task(task);
  804. return res;
  805. }
  806. int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
  807. unsigned int qid, void *data)
  808. {
  809. return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
  810. tag, qid, data);
  811. }
  812. EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
  813. int sas_execute_internal_abort_dev(struct domain_device *device,
  814. unsigned int qid, void *data)
  815. {
  816. return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
  817. SCSI_NO_TAG, qid, data);
  818. }
  819. EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
  820. int sas_execute_tmf(struct domain_device *device, void *parameter,
  821. int para_len, int force_phy_id,
  822. struct sas_tmf_task *tmf)
  823. {
  824. struct sas_task *task;
  825. struct sas_internal *i =
  826. to_sas_internal(device->port->ha->shost->transportt);
  827. int res, retry;
  828. for (retry = 0; retry < TASK_RETRY; retry++) {
  829. task = sas_alloc_slow_task(GFP_KERNEL);
  830. if (!task)
  831. return -ENOMEM;
  832. task->dev = device;
  833. task->task_proto = device->tproto;
  834. if (dev_is_sata(device)) {
  835. task->ata_task.device_control_reg_update = 1;
  836. if (force_phy_id >= 0) {
  837. task->ata_task.force_phy = true;
  838. task->ata_task.force_phy_id = force_phy_id;
  839. }
  840. memcpy(&task->ata_task.fis, parameter, para_len);
  841. } else {
  842. memcpy(&task->ssp_task, parameter, para_len);
  843. }
  844. task->task_done = sas_task_internal_done;
  845. task->tmf = tmf;
  846. task->slow_task->timer.function = sas_task_internal_timedout;
  847. task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
  848. add_timer(&task->slow_task->timer);
  849. res = i->dft->lldd_execute_task(task, GFP_KERNEL);
  850. if (res) {
  851. del_timer_sync(&task->slow_task->timer);
  852. pr_err("executing TMF task failed %016llx (%d)\n",
  853. SAS_ADDR(device->sas_addr), res);
  854. break;
  855. }
  856. wait_for_completion(&task->slow_task->completion);
  857. if (i->dft->lldd_tmf_exec_complete)
  858. i->dft->lldd_tmf_exec_complete(device);
  859. res = TMF_RESP_FUNC_FAILED;
  860. if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
  861. if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
  862. pr_err("TMF task timeout for %016llx and not done\n",
  863. SAS_ADDR(device->sas_addr));
  864. if (i->dft->lldd_tmf_aborted)
  865. i->dft->lldd_tmf_aborted(task);
  866. break;
  867. }
  868. pr_warn("TMF task timeout for %016llx and done\n",
  869. SAS_ADDR(device->sas_addr));
  870. }
  871. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  872. task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
  873. res = TMF_RESP_FUNC_COMPLETE;
  874. break;
  875. }
  876. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  877. task->task_status.stat == TMF_RESP_FUNC_SUCC) {
  878. res = TMF_RESP_FUNC_SUCC;
  879. break;
  880. }
  881. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  882. task->task_status.stat == SAS_DATA_UNDERRUN) {
  883. /* no error, but return the number of bytes of
  884. * underrun
  885. */
  886. pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
  887. SAS_ADDR(device->sas_addr),
  888. task->task_status.resp,
  889. task->task_status.stat);
  890. res = task->task_status.residual;
  891. break;
  892. }
  893. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  894. task->task_status.stat == SAS_DATA_OVERRUN) {
  895. pr_warn("TMF task blocked task error %016llx\n",
  896. SAS_ADDR(device->sas_addr));
  897. res = -EMSGSIZE;
  898. break;
  899. }
  900. if (task->task_status.resp == SAS_TASK_COMPLETE &&
  901. task->task_status.stat == SAS_OPEN_REJECT) {
  902. pr_warn("TMF task open reject failed %016llx\n",
  903. SAS_ADDR(device->sas_addr));
  904. res = -EIO;
  905. } else {
  906. pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
  907. SAS_ADDR(device->sas_addr),
  908. task->task_status.resp,
  909. task->task_status.stat);
  910. }
  911. sas_free_task(task);
  912. task = NULL;
  913. }
  914. if (retry == TASK_RETRY)
  915. pr_warn("executing TMF for %016llx failed after %d attempts!\n",
  916. SAS_ADDR(device->sas_addr), TASK_RETRY);
  917. sas_free_task(task);
  918. return res;
  919. }
  920. static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
  921. struct sas_tmf_task *tmf)
  922. {
  923. struct sas_ssp_task ssp_task;
  924. if (!(device->tproto & SAS_PROTOCOL_SSP))
  925. return TMF_RESP_FUNC_ESUPP;
  926. memcpy(ssp_task.LUN, lun, 8);
  927. return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
  928. }
  929. int sas_abort_task_set(struct domain_device *dev, u8 *lun)
  930. {
  931. struct sas_tmf_task tmf_task = {
  932. .tmf = TMF_ABORT_TASK_SET,
  933. };
  934. return sas_execute_ssp_tmf(dev, lun, &tmf_task);
  935. }
  936. EXPORT_SYMBOL_GPL(sas_abort_task_set);
  937. int sas_clear_task_set(struct domain_device *dev, u8 *lun)
  938. {
  939. struct sas_tmf_task tmf_task = {
  940. .tmf = TMF_CLEAR_TASK_SET,
  941. };
  942. return sas_execute_ssp_tmf(dev, lun, &tmf_task);
  943. }
  944. EXPORT_SYMBOL_GPL(sas_clear_task_set);
  945. int sas_lu_reset(struct domain_device *dev, u8 *lun)
  946. {
  947. struct sas_tmf_task tmf_task = {
  948. .tmf = TMF_LU_RESET,
  949. };
  950. return sas_execute_ssp_tmf(dev, lun, &tmf_task);
  951. }
  952. EXPORT_SYMBOL_GPL(sas_lu_reset);
  953. int sas_query_task(struct sas_task *task, u16 tag)
  954. {
  955. struct sas_tmf_task tmf_task = {
  956. .tmf = TMF_QUERY_TASK,
  957. .tag_of_task_to_be_managed = tag,
  958. };
  959. struct scsi_cmnd *cmnd = task->uldd_task;
  960. struct domain_device *dev = task->dev;
  961. struct scsi_lun lun;
  962. int_to_scsilun(cmnd->device->lun, &lun);
  963. return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
  964. }
  965. EXPORT_SYMBOL_GPL(sas_query_task);
  966. int sas_abort_task(struct sas_task *task, u16 tag)
  967. {
  968. struct sas_tmf_task tmf_task = {
  969. .tmf = TMF_ABORT_TASK,
  970. .tag_of_task_to_be_managed = tag,
  971. };
  972. struct scsi_cmnd *cmnd = task->uldd_task;
  973. struct domain_device *dev = task->dev;
  974. struct scsi_lun lun;
  975. int_to_scsilun(cmnd->device->lun, &lun);
  976. return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
  977. }
  978. EXPORT_SYMBOL_GPL(sas_abort_task);
  979. /*
  980. * Tell an upper layer that it needs to initiate an abort for a given task.
  981. * This should only ever be called by an LLDD.
  982. */
  983. void sas_task_abort(struct sas_task *task)
  984. {
  985. struct scsi_cmnd *sc = task->uldd_task;
  986. /* Escape for libsas internal commands */
  987. if (!sc) {
  988. struct sas_task_slow *slow = task->slow_task;
  989. if (!slow)
  990. return;
  991. if (!del_timer(&slow->timer))
  992. return;
  993. slow->timer.function(&slow->timer);
  994. return;
  995. }
  996. if (dev_is_sata(task->dev))
  997. sas_ata_task_abort(task);
  998. else
  999. blk_abort_request(scsi_cmd_to_rq(sc));
  1000. }
  1001. EXPORT_SYMBOL_GPL(sas_task_abort);
  1002. int sas_slave_alloc(struct scsi_device *sdev)
  1003. {
  1004. if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
  1005. return -ENXIO;
  1006. return 0;
  1007. }
  1008. EXPORT_SYMBOL_GPL(sas_slave_alloc);
  1009. void sas_target_destroy(struct scsi_target *starget)
  1010. {
  1011. struct domain_device *found_dev = starget->hostdata;
  1012. if (!found_dev)
  1013. return;
  1014. starget->hostdata = NULL;
  1015. sas_put_device(found_dev);
  1016. }
  1017. EXPORT_SYMBOL_GPL(sas_target_destroy);
  1018. #define SAS_STRING_ADDR_SIZE 16
  1019. int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
  1020. {
  1021. int res;
  1022. const struct firmware *fw;
  1023. res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
  1024. if (res)
  1025. return res;
  1026. if (fw->size < SAS_STRING_ADDR_SIZE) {
  1027. res = -ENODEV;
  1028. goto out;
  1029. }
  1030. res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
  1031. if (res)
  1032. goto out;
  1033. out:
  1034. release_firmware(fw);
  1035. return res;
  1036. }
  1037. EXPORT_SYMBOL_GPL(sas_request_addr);