sas_ata.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Support for SATA devices on Serial Attached SCSI (SAS) controllers
  4. *
  5. * Copyright (C) 2006 IBM Corporation
  6. *
  7. * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
  8. */
  9. #include <linux/scatterlist.h>
  10. #include <linux/slab.h>
  11. #include <linux/async.h>
  12. #include <linux/export.h>
  13. #include <scsi/sas_ata.h>
  14. #include "sas_internal.h"
  15. #include <scsi/scsi_host.h>
  16. #include <scsi/scsi_device.h>
  17. #include <scsi/scsi_tcq.h>
  18. #include <scsi/scsi.h>
  19. #include <scsi/scsi_transport.h>
  20. #include <scsi/scsi_transport_sas.h>
  21. #include "scsi_sas_internal.h"
  22. #include "scsi_transport_api.h"
  23. #include <scsi/scsi_eh.h>
  24. static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
  25. {
  26. /* Cheesy attempt to translate SAS errors into ATA. Hah! */
  27. /* transport error */
  28. if (ts->resp == SAS_TASK_UNDELIVERED)
  29. return AC_ERR_ATA_BUS;
  30. /* ts->resp == SAS_TASK_COMPLETE */
  31. /* task delivered, what happened afterwards? */
  32. switch (ts->stat) {
  33. case SAS_DEV_NO_RESPONSE:
  34. return AC_ERR_TIMEOUT;
  35. case SAS_INTERRUPTED:
  36. case SAS_PHY_DOWN:
  37. case SAS_NAK_R_ERR:
  38. return AC_ERR_ATA_BUS;
  39. case SAS_DATA_UNDERRUN:
  40. /*
  41. * Some programs that use the taskfile interface
  42. * (smartctl in particular) can cause underrun
  43. * problems. Ignore these errors, perhaps at our
  44. * peril.
  45. */
  46. return 0;
  47. case SAS_DATA_OVERRUN:
  48. case SAS_QUEUE_FULL:
  49. case SAS_DEVICE_UNKNOWN:
  50. case SAS_OPEN_TO:
  51. case SAS_OPEN_REJECT:
  52. pr_warn("%s: Saw error %d. What to do?\n",
  53. __func__, ts->stat);
  54. return AC_ERR_OTHER;
  55. case SAM_STAT_CHECK_CONDITION:
  56. case SAS_ABORTED_TASK:
  57. return AC_ERR_DEV;
  58. case SAS_PROTO_RESPONSE:
  59. /* This means the ending_fis has the error
  60. * value; return 0 here to collect it
  61. */
  62. return 0;
  63. default:
  64. return 0;
  65. }
  66. }
  67. static void sas_ata_task_done(struct sas_task *task)
  68. {
  69. struct ata_queued_cmd *qc = task->uldd_task;
  70. struct domain_device *dev = task->dev;
  71. struct task_status_struct *stat = &task->task_status;
  72. struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
  73. struct sas_ha_struct *sas_ha = dev->port->ha;
  74. enum ata_completion_errors ac;
  75. unsigned long flags;
  76. struct ata_link *link;
  77. struct ata_port *ap;
  78. spin_lock_irqsave(&dev->done_lock, flags);
  79. if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
  80. task = NULL;
  81. else if (qc && qc->scsicmd)
  82. ASSIGN_SAS_TASK(qc->scsicmd, NULL);
  83. spin_unlock_irqrestore(&dev->done_lock, flags);
  84. /* check if libsas-eh got to the task before us */
  85. if (unlikely(!task))
  86. return;
  87. if (!qc)
  88. goto qc_already_gone;
  89. ap = qc->ap;
  90. link = &ap->link;
  91. spin_lock_irqsave(ap->lock, flags);
  92. /* check if we lost the race with libata/sas_ata_post_internal() */
  93. if (unlikely(ata_port_is_frozen(ap))) {
  94. spin_unlock_irqrestore(ap->lock, flags);
  95. if (qc->scsicmd)
  96. goto qc_already_gone;
  97. else {
  98. /* if eh is not involved and the port is frozen then the
  99. * ata internal abort process has taken responsibility
  100. * for this sas_task
  101. */
  102. return;
  103. }
  104. }
  105. if (stat->stat == SAS_PROTO_RESPONSE ||
  106. stat->stat == SAS_SAM_STAT_GOOD ||
  107. (stat->stat == SAS_SAM_STAT_CHECK_CONDITION &&
  108. dev->sata_dev.class == ATA_DEV_ATAPI)) {
  109. memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
  110. if (!link->sactive) {
  111. qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
  112. } else {
  113. link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
  114. if (unlikely(link->eh_info.err_mask))
  115. qc->flags |= ATA_QCFLAG_EH;
  116. }
  117. } else {
  118. ac = sas_to_ata_err(stat);
  119. if (ac) {
  120. pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat);
  121. /* We saw a SAS error. Send a vague error. */
  122. if (!link->sactive) {
  123. qc->err_mask = ac;
  124. } else {
  125. link->eh_info.err_mask |= AC_ERR_DEV;
  126. qc->flags |= ATA_QCFLAG_EH;
  127. }
  128. dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
  129. dev->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
  130. }
  131. }
  132. qc->lldd_task = NULL;
  133. ata_qc_complete(qc);
  134. spin_unlock_irqrestore(ap->lock, flags);
  135. qc_already_gone:
  136. sas_free_task(task);
  137. }
  138. static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
  139. __must_hold(ap->lock)
  140. {
  141. struct sas_task *task;
  142. struct scatterlist *sg;
  143. int ret = AC_ERR_SYSTEM;
  144. unsigned int si, xfer = 0;
  145. struct ata_port *ap = qc->ap;
  146. struct domain_device *dev = ap->private_data;
  147. struct sas_ha_struct *sas_ha = dev->port->ha;
  148. struct Scsi_Host *host = sas_ha->shost;
  149. struct sas_internal *i = to_sas_internal(host->transportt);
  150. /* TODO: we should try to remove that unlock */
  151. spin_unlock(ap->lock);
  152. /* If the device fell off, no sense in issuing commands */
  153. if (test_bit(SAS_DEV_GONE, &dev->state))
  154. goto out;
  155. task = sas_alloc_task(GFP_ATOMIC);
  156. if (!task)
  157. goto out;
  158. task->dev = dev;
  159. task->task_proto = SAS_PROTOCOL_STP;
  160. task->task_done = sas_ata_task_done;
  161. /* For NCQ commands, zero out the tag libata assigned us */
  162. if (ata_is_ncq(qc->tf.protocol))
  163. qc->tf.nsect = 0;
  164. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
  165. task->uldd_task = qc;
  166. if (ata_is_atapi(qc->tf.protocol)) {
  167. memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
  168. task->total_xfer_len = qc->nbytes;
  169. task->num_scatter = qc->n_elem;
  170. task->data_dir = qc->dma_dir;
  171. } else if (!ata_is_data(qc->tf.protocol)) {
  172. task->data_dir = DMA_NONE;
  173. } else {
  174. for_each_sg(qc->sg, sg, qc->n_elem, si)
  175. xfer += sg_dma_len(sg);
  176. task->total_xfer_len = xfer;
  177. task->num_scatter = si;
  178. task->data_dir = qc->dma_dir;
  179. }
  180. task->scatter = qc->sg;
  181. qc->lldd_task = task;
  182. task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
  183. task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
  184. if (qc->flags & ATA_QCFLAG_RESULT_TF)
  185. task->ata_task.return_fis_on_success = 1;
  186. if (qc->scsicmd)
  187. ASSIGN_SAS_TASK(qc->scsicmd, task);
  188. ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
  189. if (ret) {
  190. pr_debug("lldd_execute_task returned: %d\n", ret);
  191. if (qc->scsicmd)
  192. ASSIGN_SAS_TASK(qc->scsicmd, NULL);
  193. sas_free_task(task);
  194. qc->lldd_task = NULL;
  195. ret = AC_ERR_SYSTEM;
  196. }
  197. out:
  198. spin_lock(ap->lock);
  199. return ret;
  200. }
  201. static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
  202. {
  203. struct domain_device *dev = qc->ap->private_data;
  204. ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
  205. }
  206. static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
  207. {
  208. return to_sas_internal(dev->port->ha->shost->transportt);
  209. }
  210. static int sas_get_ata_command_set(struct domain_device *dev)
  211. {
  212. struct ata_taskfile tf;
  213. if (dev->dev_type == SAS_SATA_PENDING)
  214. return ATA_DEV_UNKNOWN;
  215. ata_tf_from_fis(dev->frame_rcvd, &tf);
  216. return ata_dev_classify(&tf);
  217. }
  218. int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
  219. {
  220. if (phy->attached_tproto & SAS_PROTOCOL_STP)
  221. dev->tproto = phy->attached_tproto;
  222. if (phy->attached_sata_dev)
  223. dev->tproto |= SAS_SATA_DEV;
  224. if (phy->attached_dev_type == SAS_SATA_PENDING)
  225. dev->dev_type = SAS_SATA_PENDING;
  226. else {
  227. int res;
  228. dev->dev_type = SAS_SATA_DEV;
  229. res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
  230. &dev->sata_dev.rps_resp);
  231. if (res) {
  232. pr_debug("report phy sata to %016llx:%02d returned 0x%x\n",
  233. SAS_ADDR(dev->parent->sas_addr),
  234. phy->phy_id, res);
  235. return res;
  236. }
  237. memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
  238. sizeof(struct dev_to_host_fis));
  239. dev->sata_dev.class = sas_get_ata_command_set(dev);
  240. }
  241. return 0;
  242. }
  243. static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
  244. {
  245. int res;
  246. /* we weren't pending, so successfully end the reset sequence now */
  247. if (dev->dev_type != SAS_SATA_PENDING)
  248. return 1;
  249. /* hmmm, if this succeeds do we need to repost the domain_device to the
  250. * lldd so it can pick up new parameters?
  251. */
  252. res = sas_get_ata_info(dev, phy);
  253. if (res)
  254. return 0; /* retry */
  255. else
  256. return 1;
  257. }
  258. int smp_ata_check_ready_type(struct ata_link *link)
  259. {
  260. struct domain_device *dev = link->ap->private_data;
  261. struct sas_phy *phy = sas_get_local_phy(dev);
  262. struct domain_device *ex_dev = dev->parent;
  263. enum sas_device_type type = SAS_PHY_UNUSED;
  264. u8 sas_addr[SAS_ADDR_SIZE];
  265. int res;
  266. res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type);
  267. sas_put_local_phy(phy);
  268. if (res)
  269. return res;
  270. switch (type) {
  271. case SAS_SATA_PENDING:
  272. return 0;
  273. case SAS_END_DEVICE:
  274. return 1;
  275. default:
  276. return -ENODEV;
  277. }
  278. }
  279. EXPORT_SYMBOL_GPL(smp_ata_check_ready_type);
  280. static int smp_ata_check_ready(struct ata_link *link)
  281. {
  282. int res;
  283. struct ata_port *ap = link->ap;
  284. struct domain_device *dev = ap->private_data;
  285. struct domain_device *ex_dev = dev->parent;
  286. struct sas_phy *phy = sas_get_local_phy(dev);
  287. struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
  288. res = sas_ex_phy_discover(ex_dev, phy->number);
  289. sas_put_local_phy(phy);
  290. /* break the wait early if the expander is unreachable,
  291. * otherwise keep polling
  292. */
  293. if (res == -ECOMM)
  294. return res;
  295. if (res != SMP_RESP_FUNC_ACC)
  296. return 0;
  297. switch (ex_phy->attached_dev_type) {
  298. case SAS_SATA_PENDING:
  299. return 0;
  300. case SAS_END_DEVICE:
  301. if (ex_phy->attached_sata_dev)
  302. return sas_ata_clear_pending(dev, ex_phy);
  303. fallthrough;
  304. default:
  305. return -ENODEV;
  306. }
  307. }
  308. static int local_ata_check_ready(struct ata_link *link)
  309. {
  310. struct ata_port *ap = link->ap;
  311. struct domain_device *dev = ap->private_data;
  312. struct sas_internal *i = dev_to_sas_internal(dev);
  313. if (i->dft->lldd_ata_check_ready)
  314. return i->dft->lldd_ata_check_ready(dev);
  315. else {
  316. /* lldd's that don't implement 'ready' checking get the
  317. * old default behavior of not coordinating reset
  318. * recovery with libata
  319. */
  320. return 1;
  321. }
  322. }
  323. static int sas_ata_printk(const char *level, const struct domain_device *ddev,
  324. const char *fmt, ...)
  325. {
  326. struct ata_port *ap = ddev->sata_dev.ap;
  327. struct device *dev = &ddev->rphy->dev;
  328. struct va_format vaf;
  329. va_list args;
  330. int r;
  331. va_start(args, fmt);
  332. vaf.fmt = fmt;
  333. vaf.va = &args;
  334. r = printk("%s" SAS_FMT "ata%u: %s: %pV",
  335. level, ap->print_id, dev_name(dev), &vaf);
  336. va_end(args);
  337. return r;
  338. }
  339. static int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline)
  340. {
  341. struct sata_device *sata_dev = &dev->sata_dev;
  342. int (*check_ready)(struct ata_link *link);
  343. struct ata_port *ap = sata_dev->ap;
  344. struct ata_link *link = &ap->link;
  345. struct sas_phy *phy;
  346. int ret;
  347. phy = sas_get_local_phy(dev);
  348. if (scsi_is_sas_phy_local(phy))
  349. check_ready = local_ata_check_ready;
  350. else
  351. check_ready = smp_ata_check_ready;
  352. sas_put_local_phy(phy);
  353. ret = ata_wait_after_reset(link, deadline, check_ready);
  354. if (ret && ret != -EAGAIN)
  355. sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
  356. return ret;
  357. }
  358. static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
  359. unsigned long deadline)
  360. {
  361. struct ata_port *ap = link->ap;
  362. struct domain_device *dev = ap->private_data;
  363. struct sas_internal *i = dev_to_sas_internal(dev);
  364. int ret;
  365. ret = i->dft->lldd_I_T_nexus_reset(dev);
  366. if (ret == -ENODEV)
  367. return ret;
  368. if (ret != TMF_RESP_FUNC_COMPLETE)
  369. sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
  370. ret = sas_ata_wait_after_reset(dev, deadline);
  371. *class = dev->sata_dev.class;
  372. ap->cbl = ATA_CBL_SATA;
  373. return ret;
  374. }
  375. /*
  376. * notify the lldd to forget the sas_task for this internal ata command
  377. * that bypasses scsi-eh
  378. */
  379. static void sas_ata_internal_abort(struct sas_task *task)
  380. {
  381. struct sas_internal *si = dev_to_sas_internal(task->dev);
  382. unsigned long flags;
  383. int res;
  384. spin_lock_irqsave(&task->task_state_lock, flags);
  385. if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
  386. task->task_state_flags & SAS_TASK_STATE_DONE) {
  387. spin_unlock_irqrestore(&task->task_state_lock, flags);
  388. pr_debug("%s: Task %p already finished.\n", __func__, task);
  389. goto out;
  390. }
  391. task->task_state_flags |= SAS_TASK_STATE_ABORTED;
  392. spin_unlock_irqrestore(&task->task_state_lock, flags);
  393. res = si->dft->lldd_abort_task(task);
  394. spin_lock_irqsave(&task->task_state_lock, flags);
  395. if (task->task_state_flags & SAS_TASK_STATE_DONE ||
  396. res == TMF_RESP_FUNC_COMPLETE) {
  397. spin_unlock_irqrestore(&task->task_state_lock, flags);
  398. goto out;
  399. }
  400. /* XXX we are not prepared to deal with ->lldd_abort_task()
  401. * failures. TODO: lldds need to unconditionally forget about
  402. * aborted ata tasks, otherwise we (likely) leak the sas task
  403. * here
  404. */
  405. pr_warn("%s: Task %p leaked.\n", __func__, task);
  406. if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
  407. task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
  408. spin_unlock_irqrestore(&task->task_state_lock, flags);
  409. return;
  410. out:
  411. sas_free_task(task);
  412. }
  413. static void sas_ata_post_internal(struct ata_queued_cmd *qc)
  414. {
  415. if (qc->flags & ATA_QCFLAG_EH)
  416. qc->err_mask |= AC_ERR_OTHER;
  417. if (qc->err_mask) {
  418. /*
  419. * Find the sas_task and kill it. By this point, libata
  420. * has decided to kill the qc and has frozen the port.
  421. * In this state sas_ata_task_done() will no longer free
  422. * the sas_task, so we need to notify the lldd (via
  423. * ->lldd_abort_task) that the task is dead and free it
  424. * ourselves.
  425. */
  426. struct sas_task *task = qc->lldd_task;
  427. qc->lldd_task = NULL;
  428. if (!task)
  429. return;
  430. task->uldd_task = NULL;
  431. sas_ata_internal_abort(task);
  432. }
  433. }
  434. static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
  435. {
  436. struct domain_device *dev = ap->private_data;
  437. struct sas_internal *i = dev_to_sas_internal(dev);
  438. if (i->dft->lldd_ata_set_dmamode)
  439. i->dft->lldd_ata_set_dmamode(dev);
  440. }
  441. static void sas_ata_sched_eh(struct ata_port *ap)
  442. {
  443. struct domain_device *dev = ap->private_data;
  444. struct sas_ha_struct *ha = dev->port->ha;
  445. unsigned long flags;
  446. spin_lock_irqsave(&ha->lock, flags);
  447. if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
  448. ha->eh_active++;
  449. ata_std_sched_eh(ap);
  450. spin_unlock_irqrestore(&ha->lock, flags);
  451. }
  452. void sas_ata_end_eh(struct ata_port *ap)
  453. {
  454. struct domain_device *dev = ap->private_data;
  455. struct sas_ha_struct *ha = dev->port->ha;
  456. unsigned long flags;
  457. spin_lock_irqsave(&ha->lock, flags);
  458. if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
  459. ha->eh_active--;
  460. spin_unlock_irqrestore(&ha->lock, flags);
  461. }
  462. static int sas_ata_prereset(struct ata_link *link, unsigned long deadline)
  463. {
  464. struct ata_port *ap = link->ap;
  465. struct domain_device *dev = ap->private_data;
  466. struct sas_phy *local_phy = sas_get_local_phy(dev);
  467. int res = 0;
  468. if (!local_phy->enabled || test_bit(SAS_DEV_GONE, &dev->state))
  469. res = -ENOENT;
  470. sas_put_local_phy(local_phy);
  471. return res;
  472. }
  473. static struct ata_port_operations sas_sata_ops = {
  474. .prereset = sas_ata_prereset,
  475. .hardreset = sas_ata_hard_reset,
  476. .error_handler = ata_std_error_handler,
  477. .post_internal_cmd = sas_ata_post_internal,
  478. .qc_defer = ata_std_qc_defer,
  479. .qc_issue = sas_ata_qc_issue,
  480. .qc_fill_rtf = sas_ata_qc_fill_rtf,
  481. .set_dmamode = sas_ata_set_dmamode,
  482. .sched_eh = sas_ata_sched_eh,
  483. .end_eh = sas_ata_end_eh,
  484. };
  485. int sas_ata_init(struct domain_device *found_dev)
  486. {
  487. struct sas_ha_struct *ha = found_dev->port->ha;
  488. struct Scsi_Host *shost = ha->shost;
  489. struct ata_host *ata_host;
  490. struct ata_port *ap;
  491. int rc;
  492. ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
  493. if (!ata_host) {
  494. pr_err("ata host alloc failed.\n");
  495. return -ENOMEM;
  496. }
  497. ata_host_init(ata_host, ha->dev, &sas_sata_ops);
  498. ap = ata_port_alloc(ata_host);
  499. if (!ap) {
  500. pr_err("ata_port_alloc failed.\n");
  501. rc = -ENODEV;
  502. goto free_host;
  503. }
  504. ap->port_no = 0;
  505. ap->pio_mask = ATA_PIO4;
  506. ap->mwdma_mask = ATA_MWDMA2;
  507. ap->udma_mask = ATA_UDMA6;
  508. ap->flags |= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
  509. ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX;
  510. ap->ops = &sas_sata_ops;
  511. ap->private_data = found_dev;
  512. ap->cbl = ATA_CBL_SATA;
  513. ap->scsi_host = shost;
  514. rc = ata_tport_add(ata_host->dev, ap);
  515. if (rc)
  516. goto free_port;
  517. found_dev->sata_dev.ata_host = ata_host;
  518. found_dev->sata_dev.ap = ap;
  519. return 0;
  520. free_port:
  521. ata_port_free(ap);
  522. free_host:
  523. ata_host_put(ata_host);
  524. return rc;
  525. }
  526. void sas_ata_task_abort(struct sas_task *task)
  527. {
  528. struct ata_queued_cmd *qc = task->uldd_task;
  529. struct completion *waiting;
  530. /* Bounce SCSI-initiated commands to the SCSI EH */
  531. if (qc->scsicmd) {
  532. blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
  533. return;
  534. }
  535. /* Internal command, fake a timeout and complete. */
  536. qc->flags &= ~ATA_QCFLAG_ACTIVE;
  537. qc->flags |= ATA_QCFLAG_EH;
  538. qc->err_mask |= AC_ERR_TIMEOUT;
  539. waiting = qc->private_data;
  540. complete(waiting);
  541. }
  542. void sas_probe_sata(struct asd_sas_port *port)
  543. {
  544. struct domain_device *dev, *n;
  545. mutex_lock(&port->ha->disco_mutex);
  546. list_for_each_entry(dev, &port->disco_list, disco_list_node) {
  547. if (!dev_is_sata(dev))
  548. continue;
  549. ata_port_probe(dev->sata_dev.ap);
  550. }
  551. mutex_unlock(&port->ha->disco_mutex);
  552. list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
  553. if (!dev_is_sata(dev))
  554. continue;
  555. sas_ata_wait_eh(dev);
  556. /* if libata could not bring the link up, don't surface
  557. * the device
  558. */
  559. if (!ata_dev_enabled(sas_to_ata_dev(dev)))
  560. sas_fail_probe(dev, __func__, -ENODEV);
  561. }
  562. }
  563. int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
  564. struct domain_device *child, int phy_id)
  565. {
  566. struct sas_rphy *rphy;
  567. int ret;
  568. if (child->linkrate > parent->min_linkrate) {
  569. struct sas_phy *cphy = child->phy;
  570. enum sas_linkrate min_prate = cphy->minimum_linkrate,
  571. parent_min_lrate = parent->min_linkrate,
  572. min_linkrate = (min_prate > parent_min_lrate) ?
  573. parent_min_lrate : 0;
  574. struct sas_phy_linkrates rates = {
  575. .maximum_linkrate = parent->min_linkrate,
  576. .minimum_linkrate = min_linkrate,
  577. };
  578. pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
  579. SAS_ADDR(child->sas_addr), phy_id);
  580. ret = sas_smp_phy_control(parent, phy_id,
  581. PHY_FUNC_LINK_RESET, &rates);
  582. if (ret) {
  583. pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
  584. SAS_ADDR(child->sas_addr), phy_id, ret);
  585. return ret;
  586. }
  587. pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
  588. SAS_ADDR(child->sas_addr), phy_id);
  589. child->linkrate = child->min_linkrate;
  590. }
  591. ret = sas_get_ata_info(child, phy);
  592. if (ret)
  593. return ret;
  594. sas_init_dev(child);
  595. ret = sas_ata_init(child);
  596. if (ret)
  597. return ret;
  598. rphy = sas_end_device_alloc(phy->port);
  599. if (!rphy)
  600. return -ENOMEM;
  601. rphy->identify.phy_identifier = phy_id;
  602. child->rphy = rphy;
  603. get_device(&rphy->dev);
  604. list_add_tail(&child->disco_list_node, &parent->port->disco_list);
  605. ret = sas_discover_sata(child);
  606. if (ret) {
  607. pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
  608. SAS_ADDR(child->sas_addr),
  609. SAS_ADDR(parent->sas_addr), phy_id, ret);
  610. sas_rphy_free(child->rphy);
  611. list_del(&child->disco_list_node);
  612. return ret;
  613. }
  614. return 0;
  615. }
  616. static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
  617. {
  618. struct domain_device *dev, *n;
  619. list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
  620. if (!dev_is_sata(dev))
  621. continue;
  622. sas_ata_wait_eh(dev);
  623. /* if libata failed to power manage the device, tear it down */
  624. if (ata_dev_disabled(sas_to_ata_dev(dev)))
  625. sas_fail_probe(dev, func, -ENODEV);
  626. }
  627. }
  628. void sas_suspend_sata(struct asd_sas_port *port)
  629. {
  630. struct domain_device *dev;
  631. mutex_lock(&port->ha->disco_mutex);
  632. list_for_each_entry(dev, &port->dev_list, dev_list_node) {
  633. struct sata_device *sata;
  634. if (!dev_is_sata(dev))
  635. continue;
  636. sata = &dev->sata_dev;
  637. if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
  638. continue;
  639. ata_sas_port_suspend(sata->ap);
  640. }
  641. mutex_unlock(&port->ha->disco_mutex);
  642. sas_ata_flush_pm_eh(port, __func__);
  643. }
  644. void sas_resume_sata(struct asd_sas_port *port)
  645. {
  646. struct domain_device *dev;
  647. mutex_lock(&port->ha->disco_mutex);
  648. list_for_each_entry(dev, &port->dev_list, dev_list_node) {
  649. struct sata_device *sata;
  650. if (!dev_is_sata(dev))
  651. continue;
  652. sata = &dev->sata_dev;
  653. if (sata->ap->pm_mesg.event == PM_EVENT_ON)
  654. continue;
  655. ata_sas_port_resume(sata->ap);
  656. }
  657. mutex_unlock(&port->ha->disco_mutex);
  658. sas_ata_flush_pm_eh(port, __func__);
  659. }
  660. /**
  661. * sas_discover_sata - discover an STP/SATA domain device
  662. * @dev: pointer to struct domain_device of interest
  663. *
  664. * Devices directly attached to a HA port, have no parents. All other
  665. * devices do, and should have their "parent" pointer set appropriately
  666. * before calling this function.
  667. */
  668. int sas_discover_sata(struct domain_device *dev)
  669. {
  670. if (dev->dev_type == SAS_SATA_PM)
  671. return -ENODEV;
  672. dev->sata_dev.class = sas_get_ata_command_set(dev);
  673. sas_fill_in_rphy(dev, dev->rphy);
  674. return sas_notify_lldd_dev_found(dev);
  675. }
  676. static void async_sas_ata_eh(void *data, async_cookie_t cookie)
  677. {
  678. struct domain_device *dev = data;
  679. struct ata_port *ap = dev->sata_dev.ap;
  680. struct sas_ha_struct *ha = dev->port->ha;
  681. sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
  682. ata_scsi_port_error_handler(ha->shost, ap);
  683. sas_put_device(dev);
  684. }
  685. void sas_ata_strategy_handler(struct Scsi_Host *shost)
  686. {
  687. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  688. ASYNC_DOMAIN_EXCLUSIVE(async);
  689. int i;
  690. /* it's ok to defer revalidation events during ata eh, these
  691. * disks are in one of three states:
  692. * 1/ present for initial domain discovery, and these
  693. * resets will cause bcn flutters
  694. * 2/ hot removed, we'll discover that after eh fails
  695. * 3/ hot added after initial discovery, lost the race, and need
  696. * to catch the next train.
  697. */
  698. sas_disable_revalidation(sas_ha);
  699. spin_lock_irq(&sas_ha->phy_port_lock);
  700. for (i = 0; i < sas_ha->num_phys; i++) {
  701. struct asd_sas_port *port = sas_ha->sas_port[i];
  702. struct domain_device *dev;
  703. spin_lock(&port->dev_list_lock);
  704. list_for_each_entry(dev, &port->dev_list, dev_list_node) {
  705. if (!dev_is_sata(dev))
  706. continue;
  707. /* hold a reference over eh since we may be
  708. * racing with final remove once all commands
  709. * are completed
  710. */
  711. kref_get(&dev->kref);
  712. async_schedule_domain(async_sas_ata_eh, dev, &async);
  713. }
  714. spin_unlock(&port->dev_list_lock);
  715. }
  716. spin_unlock_irq(&sas_ha->phy_port_lock);
  717. async_synchronize_full_domain(&async);
  718. sas_enable_revalidation(sas_ha);
  719. }
  720. void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
  721. {
  722. struct scsi_cmnd *cmd, *n;
  723. struct domain_device *eh_dev;
  724. do {
  725. LIST_HEAD(sata_q);
  726. eh_dev = NULL;
  727. list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
  728. struct domain_device *ddev = cmd_to_domain_dev(cmd);
  729. if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
  730. continue;
  731. if (eh_dev && eh_dev != ddev)
  732. continue;
  733. eh_dev = ddev;
  734. list_move(&cmd->eh_entry, &sata_q);
  735. }
  736. if (!list_empty(&sata_q)) {
  737. struct ata_port *ap = eh_dev->sata_dev.ap;
  738. sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
  739. ata_scsi_cmd_error_handler(shost, ap, &sata_q);
  740. /*
  741. * ata's error handler may leave the cmd on the list
  742. * so make sure they don't remain on a stack list
  743. * about to go out of scope.
  744. *
  745. * This looks strange, since the commands are
  746. * now part of no list, but the next error
  747. * action will be ata_port_error_handler()
  748. * which takes no list and sweeps them up
  749. * anyway from the ata tag array.
  750. */
  751. while (!list_empty(&sata_q))
  752. list_del_init(sata_q.next);
  753. }
  754. } while (eh_dev);
  755. }
  756. void sas_ata_schedule_reset(struct domain_device *dev)
  757. {
  758. struct ata_eh_info *ehi;
  759. struct ata_port *ap;
  760. unsigned long flags;
  761. if (!dev_is_sata(dev))
  762. return;
  763. ap = dev->sata_dev.ap;
  764. ehi = &ap->link.eh_info;
  765. spin_lock_irqsave(ap->lock, flags);
  766. ehi->err_mask |= AC_ERR_TIMEOUT;
  767. ehi->action |= ATA_EH_RESET;
  768. ata_port_schedule_eh(ap);
  769. spin_unlock_irqrestore(ap->lock, flags);
  770. }
  771. EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
  772. void sas_ata_wait_eh(struct domain_device *dev)
  773. {
  774. struct ata_port *ap;
  775. if (!dev_is_sata(dev))
  776. return;
  777. ap = dev->sata_dev.ap;
  778. ata_port_wait_eh(ap);
  779. }
  780. void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
  781. {
  782. struct ata_port *ap = device->sata_dev.ap;
  783. struct ata_link *link = &ap->link;
  784. unsigned long flags;
  785. spin_lock_irqsave(ap->lock, flags);
  786. device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
  787. device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
  788. link->eh_info.err_mask |= AC_ERR_DEV;
  789. if (force_reset)
  790. link->eh_info.action |= ATA_EH_RESET;
  791. ata_link_abort(link);
  792. spin_unlock_irqrestore(ap->lock, flags);
  793. }
  794. EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
  795. int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
  796. {
  797. struct sas_tmf_task tmf_task = {};
  798. return sas_execute_tmf(device, fis, sizeof(struct host_to_dev_fis),
  799. force_phy_id, &tmf_task);
  800. }
  801. EXPORT_SYMBOL_GPL(sas_execute_ata_cmd);
  802. static ssize_t sas_ncq_prio_supported_show(struct device *device,
  803. struct device_attribute *attr,
  804. char *buf)
  805. {
  806. struct scsi_device *sdev = to_scsi_device(device);
  807. struct domain_device *ddev = sdev_to_domain_dev(sdev);
  808. bool supported;
  809. int rc;
  810. rc = ata_ncq_prio_supported(ddev->sata_dev.ap, sdev, &supported);
  811. if (rc)
  812. return rc;
  813. return sysfs_emit(buf, "%d\n", supported);
  814. }
  815. static struct device_attribute dev_attr_sas_ncq_prio_supported =
  816. __ATTR(ncq_prio_supported, S_IRUGO, sas_ncq_prio_supported_show, NULL);
  817. static ssize_t sas_ncq_prio_enable_show(struct device *device,
  818. struct device_attribute *attr,
  819. char *buf)
  820. {
  821. struct scsi_device *sdev = to_scsi_device(device);
  822. struct domain_device *ddev = sdev_to_domain_dev(sdev);
  823. bool enabled;
  824. int rc;
  825. rc = ata_ncq_prio_enabled(ddev->sata_dev.ap, sdev, &enabled);
  826. if (rc)
  827. return rc;
  828. return sysfs_emit(buf, "%d\n", enabled);
  829. }
  830. static ssize_t sas_ncq_prio_enable_store(struct device *device,
  831. struct device_attribute *attr,
  832. const char *buf, size_t len)
  833. {
  834. struct scsi_device *sdev = to_scsi_device(device);
  835. struct domain_device *ddev = sdev_to_domain_dev(sdev);
  836. bool enable;
  837. int rc;
  838. rc = kstrtobool(buf, &enable);
  839. if (rc)
  840. return rc;
  841. rc = ata_ncq_prio_enable(ddev->sata_dev.ap, sdev, enable);
  842. if (rc)
  843. return rc;
  844. return len;
  845. }
  846. static struct device_attribute dev_attr_sas_ncq_prio_enable =
  847. __ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
  848. sas_ncq_prio_enable_show, sas_ncq_prio_enable_store);
  849. static struct attribute *sas_ata_sdev_attrs[] = {
  850. &dev_attr_sas_ncq_prio_supported.attr,
  851. &dev_attr_sas_ncq_prio_enable.attr,
  852. NULL
  853. };
  854. static umode_t sas_ata_attr_is_visible(struct kobject *kobj,
  855. struct attribute *attr, int i)
  856. {
  857. struct device *dev = kobj_to_dev(kobj);
  858. struct scsi_device *sdev = to_scsi_device(dev);
  859. struct domain_device *ddev = sdev_to_domain_dev(sdev);
  860. if (!dev_is_sata(ddev))
  861. return 0;
  862. return attr->mode;
  863. }
  864. const struct attribute_group sas_ata_sdev_attr_group = {
  865. .attrs = sas_ata_sdev_attrs,
  866. .is_visible = sas_ata_attr_is_visible,
  867. };
  868. EXPORT_SYMBOL_GPL(sas_ata_sdev_attr_group);