aic94xx_tmf.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * Aic94xx Task Management Functions
  3. *
  4. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  5. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  6. *
  7. * This file is licensed under GPLv2.
  8. *
  9. * This file is part of the aic94xx driver.
  10. *
  11. * The aic94xx driver is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation; version 2 of the
  14. * License.
  15. *
  16. * The aic94xx driver is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with the aic94xx driver; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  24. *
  25. */
  26. #include <linux/spinlock.h>
  27. #include <linux/gfp.h>
  28. #include "aic94xx.h"
  29. #include "aic94xx_sas.h"
  30. #include "aic94xx_hwi.h"
  31. /* ---------- Internal enqueue ---------- */
  32. static int asd_enqueue_internal(struct asd_ascb *ascb,
  33. void (*tasklet_complete)(struct asd_ascb *,
  34. struct done_list_struct *),
  35. void (*timed_out)(struct timer_list *t))
  36. {
  37. int res;
  38. ascb->tasklet_complete = tasklet_complete;
  39. ascb->uldd_timer = 1;
  40. ascb->timer.function = timed_out;
  41. ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
  42. add_timer(&ascb->timer);
  43. res = asd_post_ascb_list(ascb->ha, ascb, 1);
  44. if (unlikely(res))
  45. del_timer(&ascb->timer);
  46. return res;
  47. }
  48. /* ---------- CLEAR NEXUS ---------- */
  49. struct tasklet_completion_status {
  50. int dl_opcode;
  51. int tmf_state;
  52. u8 tag_valid:1;
  53. __be16 tag;
  54. };
  55. #define DECLARE_TCS(tcs) \
  56. struct tasklet_completion_status tcs = { \
  57. .dl_opcode = 0, \
  58. .tmf_state = 0, \
  59. .tag_valid = 0, \
  60. .tag = 0, \
  61. }
  62. static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
  63. struct done_list_struct *dl)
  64. {
  65. struct tasklet_completion_status *tcs = ascb->uldd_task;
  66. ASD_DPRINTK("%s: here\n", __func__);
  67. if (!del_timer(&ascb->timer)) {
  68. ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
  69. return;
  70. }
  71. ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
  72. tcs->dl_opcode = dl->opcode;
  73. complete(ascb->completion);
  74. asd_ascb_free(ascb);
  75. }
  76. static void asd_clear_nexus_timedout(struct timer_list *t)
  77. {
  78. struct asd_ascb *ascb = from_timer(ascb, t, timer);
  79. struct tasklet_completion_status *tcs = ascb->uldd_task;
  80. ASD_DPRINTK("%s: here\n", __func__);
  81. tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
  82. complete(ascb->completion);
  83. }
  84. #define CLEAR_NEXUS_PRE \
  85. struct asd_ascb *ascb; \
  86. struct scb *scb; \
  87. int res; \
  88. DECLARE_COMPLETION_ONSTACK(completion); \
  89. DECLARE_TCS(tcs); \
  90. \
  91. ASD_DPRINTK("%s: PRE\n", __func__); \
  92. res = 1; \
  93. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
  94. if (!ascb) \
  95. return -ENOMEM; \
  96. \
  97. ascb->completion = &completion; \
  98. ascb->uldd_task = &tcs; \
  99. scb = ascb->scb; \
  100. scb->header.opcode = CLEAR_NEXUS
  101. #define CLEAR_NEXUS_POST \
  102. ASD_DPRINTK("%s: POST\n", __func__); \
  103. res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
  104. asd_clear_nexus_timedout); \
  105. if (res) \
  106. goto out_err; \
  107. ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
  108. wait_for_completion(&completion); \
  109. res = tcs.dl_opcode; \
  110. if (res == TC_NO_ERROR) \
  111. res = TMF_RESP_FUNC_COMPLETE; \
  112. return res; \
  113. out_err: \
  114. asd_ascb_free(ascb); \
  115. return res
  116. int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
  117. {
  118. struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
  119. CLEAR_NEXUS_PRE;
  120. scb->clear_nexus.nexus = NEXUS_ADAPTER;
  121. CLEAR_NEXUS_POST;
  122. }
  123. int asd_clear_nexus_port(struct asd_sas_port *port)
  124. {
  125. struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
  126. CLEAR_NEXUS_PRE;
  127. scb->clear_nexus.nexus = NEXUS_PORT;
  128. scb->clear_nexus.conn_mask = port->phy_mask;
  129. CLEAR_NEXUS_POST;
  130. }
  131. enum clear_nexus_phase {
  132. NEXUS_PHASE_PRE,
  133. NEXUS_PHASE_POST,
  134. NEXUS_PHASE_RESUME,
  135. };
  136. static int asd_clear_nexus_I_T(struct domain_device *dev,
  137. enum clear_nexus_phase phase)
  138. {
  139. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  140. CLEAR_NEXUS_PRE;
  141. scb->clear_nexus.nexus = NEXUS_I_T;
  142. switch (phase) {
  143. case NEXUS_PHASE_PRE:
  144. scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
  145. break;
  146. case NEXUS_PHASE_POST:
  147. scb->clear_nexus.flags = SEND_Q | NOTINQ;
  148. break;
  149. case NEXUS_PHASE_RESUME:
  150. scb->clear_nexus.flags = RESUME_TX;
  151. }
  152. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  153. dev->lldd_dev);
  154. CLEAR_NEXUS_POST;
  155. }
  156. int asd_I_T_nexus_reset(struct domain_device *dev)
  157. {
  158. int res, tmp_res, i;
  159. struct sas_phy *phy = sas_get_local_phy(dev);
  160. /* Standard mandates link reset for ATA (type 0) and
  161. * hard reset for SSP (type 1) */
  162. int reset_type = (dev->dev_type == SAS_SATA_DEV ||
  163. (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
  164. asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
  165. /* send a hard reset */
  166. ASD_DPRINTK("sending %s reset to %s\n",
  167. reset_type ? "hard" : "soft", dev_name(&phy->dev));
  168. res = sas_phy_reset(phy, reset_type);
  169. if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
  170. /* wait for the maximum settle time */
  171. msleep(500);
  172. /* clear all outstanding commands (keep nexus suspended) */
  173. asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
  174. }
  175. for (i = 0 ; i < 3; i++) {
  176. tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
  177. if (tmp_res == TC_RESUME)
  178. goto out;
  179. msleep(500);
  180. }
  181. /* This is a bit of a problem: the sequencer is still suspended
  182. * and is refusing to resume. Hope it will resume on a bigger hammer
  183. * or the disk is lost */
  184. dev_printk(KERN_ERR, &phy->dev,
  185. "Failed to resume nexus after reset 0x%x\n", tmp_res);
  186. res = TMF_RESP_FUNC_FAILED;
  187. out:
  188. sas_put_local_phy(phy);
  189. return res;
  190. }
  191. static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
  192. {
  193. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  194. CLEAR_NEXUS_PRE;
  195. scb->clear_nexus.nexus = NEXUS_I_T_L;
  196. scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
  197. memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
  198. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  199. dev->lldd_dev);
  200. CLEAR_NEXUS_POST;
  201. }
  202. static int asd_clear_nexus_tag(struct sas_task *task)
  203. {
  204. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  205. struct asd_ascb *tascb = task->lldd_task;
  206. CLEAR_NEXUS_PRE;
  207. scb->clear_nexus.nexus = NEXUS_TAG;
  208. memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
  209. scb->clear_nexus.ssp_task.tag = tascb->tag;
  210. if (task->dev->tproto)
  211. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  212. task->dev->lldd_dev);
  213. CLEAR_NEXUS_POST;
  214. }
  215. static int asd_clear_nexus_index(struct sas_task *task)
  216. {
  217. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  218. struct asd_ascb *tascb = task->lldd_task;
  219. CLEAR_NEXUS_PRE;
  220. scb->clear_nexus.nexus = NEXUS_TRANS_CX;
  221. if (task->dev->tproto)
  222. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  223. task->dev->lldd_dev);
  224. scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
  225. CLEAR_NEXUS_POST;
  226. }
  227. /* ---------- TMFs ---------- */
  228. static void asd_tmf_timedout(struct timer_list *t)
  229. {
  230. struct asd_ascb *ascb = from_timer(ascb, t, timer);
  231. struct tasklet_completion_status *tcs = ascb->uldd_task;
  232. ASD_DPRINTK("tmf timed out\n");
  233. tcs->tmf_state = TMF_RESP_FUNC_FAILED;
  234. complete(ascb->completion);
  235. }
  236. static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
  237. struct done_list_struct *dl)
  238. {
  239. struct asd_ha_struct *asd_ha = ascb->ha;
  240. unsigned long flags;
  241. struct tc_resp_sb_struct {
  242. __le16 index_escb;
  243. u8 len_lsb;
  244. u8 flags;
  245. } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
  246. int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
  247. struct asd_ascb *escb;
  248. struct asd_dma_tok *edb;
  249. struct ssp_frame_hdr *fh;
  250. struct ssp_response_iu *ru;
  251. int res = TMF_RESP_FUNC_FAILED;
  252. ASD_DPRINTK("tmf resp tasklet\n");
  253. spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
  254. escb = asd_tc_index_find(&asd_ha->seq,
  255. (int)le16_to_cpu(resp_sb->index_escb));
  256. spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
  257. if (!escb) {
  258. ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
  259. return res;
  260. }
  261. edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
  262. ascb->tag = *(__be16 *)(edb->vaddr+4);
  263. fh = edb->vaddr + 16;
  264. ru = edb->vaddr + 16 + sizeof(*fh);
  265. res = ru->status;
  266. if (ru->datapres == 1) /* Response data present */
  267. res = ru->resp_data[3];
  268. #if 0
  269. ascb->tag = fh->tag;
  270. #endif
  271. ascb->tag_valid = 1;
  272. asd_invalidate_edb(escb, edb_id);
  273. return res;
  274. }
  275. static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
  276. struct done_list_struct *dl)
  277. {
  278. struct tasklet_completion_status *tcs;
  279. if (!del_timer(&ascb->timer))
  280. return;
  281. tcs = ascb->uldd_task;
  282. ASD_DPRINTK("tmf tasklet complete\n");
  283. tcs->dl_opcode = dl->opcode;
  284. if (dl->opcode == TC_SSP_RESP) {
  285. tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
  286. tcs->tag_valid = ascb->tag_valid;
  287. tcs->tag = ascb->tag;
  288. }
  289. complete(ascb->completion);
  290. asd_ascb_free(ascb);
  291. }
  292. static int asd_clear_nexus(struct sas_task *task)
  293. {
  294. int res = TMF_RESP_FUNC_FAILED;
  295. int leftover;
  296. struct asd_ascb *tascb = task->lldd_task;
  297. DECLARE_COMPLETION_ONSTACK(completion);
  298. unsigned long flags;
  299. tascb->completion = &completion;
  300. ASD_DPRINTK("task not done, clearing nexus\n");
  301. if (tascb->tag_valid)
  302. res = asd_clear_nexus_tag(task);
  303. else
  304. res = asd_clear_nexus_index(task);
  305. leftover = wait_for_completion_timeout(&completion,
  306. AIC94XX_SCB_TIMEOUT);
  307. tascb->completion = NULL;
  308. ASD_DPRINTK("came back from clear nexus\n");
  309. spin_lock_irqsave(&task->task_state_lock, flags);
  310. if (leftover < 1)
  311. res = TMF_RESP_FUNC_FAILED;
  312. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  313. res = TMF_RESP_FUNC_COMPLETE;
  314. spin_unlock_irqrestore(&task->task_state_lock, flags);
  315. return res;
  316. }
  317. /**
  318. * asd_abort_task -- ABORT TASK TMF
  319. * @task: the task to be aborted
  320. *
  321. * Before calling ABORT TASK the task state flags should be ORed with
  322. * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
  323. * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
  324. *
  325. * Implements the ABORT TASK TMF, I_T_L_Q nexus.
  326. * Returns: SAS TMF responses (see sas_task.h),
  327. * -ENOMEM,
  328. * -SAS_QUEUE_FULL.
  329. *
  330. * When ABORT TASK returns, the caller of ABORT TASK checks first the
  331. * task->task_state_flags, and then the return value of ABORT TASK.
  332. *
  333. * If the task has task state bit SAS_TASK_STATE_DONE set, then the
  334. * task was completed successfully prior to it being aborted. The
  335. * caller of ABORT TASK has responsibility to call task->task_done()
  336. * xor free the task, depending on their framework. The return code
  337. * is TMF_RESP_FUNC_FAILED in this case.
  338. *
  339. * Else the SAS_TASK_STATE_DONE bit is not set,
  340. * If the return code is TMF_RESP_FUNC_COMPLETE, then
  341. * the task was aborted successfully. The caller of
  342. * ABORT TASK has responsibility to call task->task_done()
  343. * to finish the task, xor free the task depending on their
  344. * framework.
  345. * else
  346. * the ABORT TASK returned some kind of error. The task
  347. * was _not_ cancelled. Nothing can be assumed.
  348. * The caller of ABORT TASK may wish to retry.
  349. */
  350. int asd_abort_task(struct sas_task *task)
  351. {
  352. struct asd_ascb *tascb = task->lldd_task;
  353. struct asd_ha_struct *asd_ha = tascb->ha;
  354. int res = 1;
  355. unsigned long flags;
  356. struct asd_ascb *ascb = NULL;
  357. struct scb *scb;
  358. int leftover;
  359. DECLARE_TCS(tcs);
  360. DECLARE_COMPLETION_ONSTACK(completion);
  361. DECLARE_COMPLETION_ONSTACK(tascb_completion);
  362. tascb->completion = &tascb_completion;
  363. spin_lock_irqsave(&task->task_state_lock, flags);
  364. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  365. spin_unlock_irqrestore(&task->task_state_lock, flags);
  366. res = TMF_RESP_FUNC_COMPLETE;
  367. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  368. goto out_done;
  369. }
  370. spin_unlock_irqrestore(&task->task_state_lock, flags);
  371. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  372. if (!ascb)
  373. return -ENOMEM;
  374. ascb->uldd_task = &tcs;
  375. ascb->completion = &completion;
  376. scb = ascb->scb;
  377. scb->header.opcode = SCB_ABORT_TASK;
  378. switch (task->task_proto) {
  379. case SAS_PROTOCOL_SATA:
  380. case SAS_PROTOCOL_STP:
  381. scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
  382. break;
  383. case SAS_PROTOCOL_SSP:
  384. scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
  385. scb->abort_task.proto_conn_rate |= task->dev->linkrate;
  386. break;
  387. case SAS_PROTOCOL_SMP:
  388. break;
  389. default:
  390. break;
  391. }
  392. if (task->task_proto == SAS_PROTOCOL_SSP) {
  393. scb->abort_task.ssp_frame.frame_type = SSP_TASK;
  394. memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
  395. task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  396. memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
  397. task->dev->port->ha->hashed_sas_addr,
  398. HASHED_SAS_ADDR_SIZE);
  399. scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  400. memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
  401. scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
  402. scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
  403. }
  404. scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
  405. scb->abort_task.conn_handle = cpu_to_le16(
  406. (u16)(unsigned long)task->dev->lldd_dev);
  407. scb->abort_task.retry_count = 1;
  408. scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
  409. scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  410. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  411. asd_tmf_timedout);
  412. if (res)
  413. goto out_free;
  414. wait_for_completion(&completion);
  415. ASD_DPRINTK("tmf came back\n");
  416. tascb->tag = tcs.tag;
  417. tascb->tag_valid = tcs.tag_valid;
  418. spin_lock_irqsave(&task->task_state_lock, flags);
  419. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  420. spin_unlock_irqrestore(&task->task_state_lock, flags);
  421. res = TMF_RESP_FUNC_COMPLETE;
  422. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  423. goto out_done;
  424. }
  425. spin_unlock_irqrestore(&task->task_state_lock, flags);
  426. if (tcs.dl_opcode == TC_SSP_RESP) {
  427. /* The task to be aborted has been sent to the device.
  428. * We got a Response IU for the ABORT TASK TMF. */
  429. if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
  430. res = asd_clear_nexus(task);
  431. else
  432. res = tcs.tmf_state;
  433. } else if (tcs.dl_opcode == TC_NO_ERROR &&
  434. tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
  435. /* timeout */
  436. res = TMF_RESP_FUNC_FAILED;
  437. } else {
  438. /* In the following we assume that the managing layer
  439. * will _never_ make a mistake, when issuing ABORT
  440. * TASK.
  441. */
  442. switch (tcs.dl_opcode) {
  443. default:
  444. res = asd_clear_nexus(task);
  445. /* fallthrough */
  446. case TC_NO_ERROR:
  447. break;
  448. /* The task hasn't been sent to the device xor
  449. * we never got a (sane) Response IU for the
  450. * ABORT TASK TMF.
  451. */
  452. case TF_NAK_RECV:
  453. res = TMF_RESP_INVALID_FRAME;
  454. break;
  455. case TF_TMF_TASK_DONE: /* done but not reported yet */
  456. res = TMF_RESP_FUNC_FAILED;
  457. leftover =
  458. wait_for_completion_timeout(&tascb_completion,
  459. AIC94XX_SCB_TIMEOUT);
  460. spin_lock_irqsave(&task->task_state_lock, flags);
  461. if (leftover < 1)
  462. res = TMF_RESP_FUNC_FAILED;
  463. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  464. res = TMF_RESP_FUNC_COMPLETE;
  465. spin_unlock_irqrestore(&task->task_state_lock, flags);
  466. break;
  467. case TF_TMF_NO_TAG:
  468. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  469. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  470. res = TMF_RESP_FUNC_COMPLETE;
  471. break;
  472. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  473. res = TMF_RESP_FUNC_ESUPP;
  474. break;
  475. }
  476. }
  477. out_done:
  478. tascb->completion = NULL;
  479. if (res == TMF_RESP_FUNC_COMPLETE) {
  480. task->lldd_task = NULL;
  481. mb();
  482. asd_ascb_free(tascb);
  483. }
  484. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  485. return res;
  486. out_free:
  487. asd_ascb_free(ascb);
  488. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  489. return res;
  490. }
  491. /**
  492. * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
  493. * @dev: pointer to struct domain_device of interest
  494. * @lun: pointer to u8[8] which is the LUN
  495. * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
  496. * @index: the transaction context of the task to be queried if QT TMF
  497. *
  498. * This function is used to send ABORT TASK SET, CLEAR ACA,
  499. * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
  500. *
  501. * No SCBs should be queued to the I_T_L nexus when this SCB is
  502. * pending.
  503. *
  504. * Returns: TMF response code (see sas_task.h or the SAS spec)
  505. */
  506. static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
  507. int tmf, int index)
  508. {
  509. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  510. struct asd_ascb *ascb;
  511. int res = 1;
  512. struct scb *scb;
  513. DECLARE_COMPLETION_ONSTACK(completion);
  514. DECLARE_TCS(tcs);
  515. if (!(dev->tproto & SAS_PROTOCOL_SSP))
  516. return TMF_RESP_FUNC_ESUPP;
  517. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  518. if (!ascb)
  519. return -ENOMEM;
  520. ascb->completion = &completion;
  521. ascb->uldd_task = &tcs;
  522. scb = ascb->scb;
  523. if (tmf == TMF_QUERY_TASK)
  524. scb->header.opcode = QUERY_SSP_TASK;
  525. else
  526. scb->header.opcode = INITIATE_SSP_TMF;
  527. scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
  528. scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
  529. /* SSP frame header */
  530. scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
  531. memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
  532. dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  533. memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
  534. dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  535. scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  536. /* SSP Task IU */
  537. memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
  538. scb->ssp_tmf.ssp_task.tmf = tmf;
  539. scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
  540. scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
  541. dev->lldd_dev);
  542. scb->ssp_tmf.retry_count = 1;
  543. scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  544. if (tmf == TMF_QUERY_TASK)
  545. scb->ssp_tmf.index = cpu_to_le16(index);
  546. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  547. asd_tmf_timedout);
  548. if (res)
  549. goto out_err;
  550. wait_for_completion(&completion);
  551. switch (tcs.dl_opcode) {
  552. case TC_NO_ERROR:
  553. res = TMF_RESP_FUNC_COMPLETE;
  554. break;
  555. case TF_NAK_RECV:
  556. res = TMF_RESP_INVALID_FRAME;
  557. break;
  558. case TF_TMF_TASK_DONE:
  559. res = TMF_RESP_FUNC_FAILED;
  560. break;
  561. case TF_TMF_NO_TAG:
  562. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  563. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  564. res = TMF_RESP_FUNC_COMPLETE;
  565. break;
  566. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  567. res = TMF_RESP_FUNC_ESUPP;
  568. break;
  569. default:
  570. /* Allow TMF response codes to propagate upwards */
  571. res = tcs.dl_opcode;
  572. break;
  573. }
  574. return res;
  575. out_err:
  576. asd_ascb_free(ascb);
  577. return res;
  578. }
  579. int asd_abort_task_set(struct domain_device *dev, u8 *lun)
  580. {
  581. int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
  582. if (res == TMF_RESP_FUNC_COMPLETE)
  583. asd_clear_nexus_I_T_L(dev, lun);
  584. return res;
  585. }
  586. int asd_clear_aca(struct domain_device *dev, u8 *lun)
  587. {
  588. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
  589. if (res == TMF_RESP_FUNC_COMPLETE)
  590. asd_clear_nexus_I_T_L(dev, lun);
  591. return res;
  592. }
  593. int asd_clear_task_set(struct domain_device *dev, u8 *lun)
  594. {
  595. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
  596. if (res == TMF_RESP_FUNC_COMPLETE)
  597. asd_clear_nexus_I_T_L(dev, lun);
  598. return res;
  599. }
  600. int asd_lu_reset(struct domain_device *dev, u8 *lun)
  601. {
  602. int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
  603. if (res == TMF_RESP_FUNC_COMPLETE)
  604. asd_clear_nexus_I_T_L(dev, lun);
  605. return res;
  606. }
  607. /**
  608. * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
  609. * task: pointer to sas_task struct of interest
  610. *
  611. * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
  612. * or TMF_RESP_FUNC_SUCC if the task is in the task set.
  613. *
  614. * Normally the management layer sets the task to aborted state,
  615. * and then calls query task and then abort task.
  616. */
  617. int asd_query_task(struct sas_task *task)
  618. {
  619. struct asd_ascb *ascb = task->lldd_task;
  620. int index;
  621. if (ascb) {
  622. index = ascb->tc_index;
  623. return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
  624. TMF_QUERY_TASK, index);
  625. }
  626. return TMF_RESP_FUNC_COMPLETE;
  627. }