sata_dwc_460ex.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * drivers/ata/sata_dwc_460ex.c
  4. *
  5. * Synopsys DesignWare Cores (DWC) SATA host driver
  6. *
  7. * Author: Mark Miesfeld <mmiesfeld@amcc.com>
  8. *
  9. * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
  10. * Copyright 2008 DENX Software Engineering
  11. *
  12. * Based on versions provided by AMCC and Synopsys which are:
  13. * Copyright 2006 Applied Micro Circuits Corporation
  14. * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/device.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/phy/phy.h>
  24. #include <linux/libata.h>
  25. #include <linux/slab.h>
  26. #include <trace/events/libata.h>
  27. #include "libata.h"
  28. #include <scsi/scsi_host.h>
  29. #include <scsi/scsi_cmnd.h>
  30. /* These two are defined in "libata.h" */
  31. #undef DRV_NAME
  32. #undef DRV_VERSION
  33. #define DRV_NAME "sata-dwc"
  34. #define DRV_VERSION "1.3"
  35. #define sata_dwc_writel(a, v) writel_relaxed(v, a)
  36. #define sata_dwc_readl(a) readl_relaxed(a)
  37. #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
  38. enum {
  39. SATA_DWC_MAX_PORTS = 1,
  40. SATA_DWC_SCR_OFFSET = 0x24,
  41. SATA_DWC_REG_OFFSET = 0x64,
  42. };
  43. /* DWC SATA Registers */
  44. struct sata_dwc_regs {
  45. u32 fptagr; /* 1st party DMA tag */
  46. u32 fpbor; /* 1st party DMA buffer offset */
  47. u32 fptcr; /* 1st party DMA Xfr count */
  48. u32 dmacr; /* DMA Control */
  49. u32 dbtsr; /* DMA Burst Transac size */
  50. u32 intpr; /* Interrupt Pending */
  51. u32 intmr; /* Interrupt Mask */
  52. u32 errmr; /* Error Mask */
  53. u32 llcr; /* Link Layer Control */
  54. u32 phycr; /* PHY Control */
  55. u32 physr; /* PHY Status */
  56. u32 rxbistpd; /* Recvd BIST pattern def register */
  57. u32 rxbistpd1; /* Recvd BIST data dword1 */
  58. u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
  59. u32 txbistpd; /* Trans BIST pattern def register */
  60. u32 txbistpd1; /* Trans BIST data dword1 */
  61. u32 txbistpd2; /* Trans BIST data dword2 */
  62. u32 bistcr; /* BIST Control Register */
  63. u32 bistfctr; /* BIST FIS Count Register */
  64. u32 bistsr; /* BIST Status Register */
  65. u32 bistdecr; /* BIST Dword Error count register */
  66. u32 res[15]; /* Reserved locations */
  67. u32 testr; /* Test Register */
  68. u32 versionr; /* Version Register */
  69. u32 idr; /* ID Register */
  70. u32 unimpl[192]; /* Unimplemented */
  71. u32 dmadr[256]; /* FIFO Locations in DMA Mode */
  72. };
  73. enum {
  74. SCR_SCONTROL_DET_ENABLE = 0x00000001,
  75. SCR_SSTATUS_DET_PRESENT = 0x00000001,
  76. SCR_SERROR_DIAG_X = 0x04000000,
  77. /* DWC SATA Register Operations */
  78. SATA_DWC_TXFIFO_DEPTH = 0x01FF,
  79. SATA_DWC_RXFIFO_DEPTH = 0x01FF,
  80. SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
  81. SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
  82. SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
  83. SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
  84. SATA_DWC_INTPR_DMAT = 0x00000001,
  85. SATA_DWC_INTPR_NEWFP = 0x00000002,
  86. SATA_DWC_INTPR_PMABRT = 0x00000004,
  87. SATA_DWC_INTPR_ERR = 0x00000008,
  88. SATA_DWC_INTPR_NEWBIST = 0x00000010,
  89. SATA_DWC_INTPR_IPF = 0x10000000,
  90. SATA_DWC_INTMR_DMATM = 0x00000001,
  91. SATA_DWC_INTMR_NEWFPM = 0x00000002,
  92. SATA_DWC_INTMR_PMABRTM = 0x00000004,
  93. SATA_DWC_INTMR_ERRM = 0x00000008,
  94. SATA_DWC_INTMR_NEWBISTM = 0x00000010,
  95. SATA_DWC_LLCR_SCRAMEN = 0x00000001,
  96. SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
  97. SATA_DWC_LLCR_RPDEN = 0x00000004,
  98. /* This is all error bits, zero's are reserved fields. */
  99. SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
  100. };
  101. #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
  102. #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
  103. SATA_DWC_DMACR_TMOD_TXCHEN)
  104. #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
  105. SATA_DWC_DMACR_TMOD_TXCHEN)
  106. #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
  107. #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
  108. << 16)
  109. struct sata_dwc_device {
  110. struct device *dev; /* generic device struct */
  111. struct ata_probe_ent *pe; /* ptr to probe-ent */
  112. struct ata_host *host;
  113. struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
  114. u32 sactive_issued;
  115. u32 sactive_queued;
  116. struct phy *phy;
  117. phys_addr_t dmadr;
  118. #ifdef CONFIG_SATA_DWC_OLD_DMA
  119. struct dw_dma_chip *dma;
  120. #endif
  121. };
  122. /*
  123. * Allow one extra special slot for commands and DMA management
  124. * to account for libata internal commands.
  125. */
  126. #define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
  127. struct sata_dwc_device_port {
  128. struct sata_dwc_device *hsdev;
  129. int cmd_issued[SATA_DWC_QCMD_MAX];
  130. int dma_pending[SATA_DWC_QCMD_MAX];
  131. /* DMA info */
  132. struct dma_chan *chan;
  133. struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
  134. u32 dma_interrupt_count;
  135. };
  136. /*
  137. * Commonly used DWC SATA driver macros
  138. */
  139. #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
  140. #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
  141. #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
  142. #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
  143. #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
  144. enum {
  145. SATA_DWC_CMD_ISSUED_NOT = 0,
  146. SATA_DWC_CMD_ISSUED_PEND = 1,
  147. SATA_DWC_CMD_ISSUED_EXEC = 2,
  148. SATA_DWC_CMD_ISSUED_NODATA = 3,
  149. SATA_DWC_DMA_PENDING_NONE = 0,
  150. SATA_DWC_DMA_PENDING_TX = 1,
  151. SATA_DWC_DMA_PENDING_RX = 2,
  152. };
  153. /*
  154. * Prototypes
  155. */
  156. static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
  157. static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
  158. static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
  159. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
  160. #ifdef CONFIG_SATA_DWC_OLD_DMA
  161. #include <linux/platform_data/dma-dw.h>
  162. #include <linux/dma/dw.h>
  163. static struct dw_dma_slave sata_dwc_dma_dws = {
  164. .src_id = 0,
  165. .dst_id = 0,
  166. .m_master = 1,
  167. .p_master = 0,
  168. };
  169. static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
  170. {
  171. struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  172. if (dws->dma_dev != chan->device->dev)
  173. return false;
  174. chan->private = dws;
  175. return true;
  176. }
  177. static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
  178. {
  179. struct sata_dwc_device *hsdev = hsdevp->hsdev;
  180. struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  181. struct device *dev = hsdev->dev;
  182. dma_cap_mask_t mask;
  183. dws->dma_dev = dev;
  184. dma_cap_zero(mask);
  185. dma_cap_set(DMA_SLAVE, mask);
  186. /* Acquire DMA channel */
  187. hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
  188. if (!hsdevp->chan) {
  189. dev_err(dev, "%s: dma channel unavailable\n", __func__);
  190. return -EAGAIN;
  191. }
  192. return 0;
  193. }
  194. static int sata_dwc_dma_init_old(struct platform_device *pdev,
  195. struct sata_dwc_device *hsdev)
  196. {
  197. struct device *dev = &pdev->dev;
  198. struct device_node *np = dev->of_node;
  199. hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
  200. if (!hsdev->dma)
  201. return -ENOMEM;
  202. hsdev->dma->dev = dev;
  203. hsdev->dma->id = pdev->id;
  204. /* Get SATA DMA interrupt number */
  205. hsdev->dma->irq = irq_of_parse_and_map(np, 1);
  206. if (!hsdev->dma->irq) {
  207. dev_err(dev, "no SATA DMA irq\n");
  208. return -ENODEV;
  209. }
  210. /* Get physical SATA DMA register base address */
  211. hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
  212. if (IS_ERR(hsdev->dma->regs))
  213. return PTR_ERR(hsdev->dma->regs);
  214. /* Initialize AHB DMAC */
  215. return dw_dma_probe(hsdev->dma);
  216. }
  217. static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
  218. {
  219. if (!hsdev->dma)
  220. return;
  221. dw_dma_remove(hsdev->dma);
  222. }
  223. #endif
  224. static const char *get_prot_descript(u8 protocol)
  225. {
  226. switch (protocol) {
  227. case ATA_PROT_NODATA:
  228. return "ATA no data";
  229. case ATA_PROT_PIO:
  230. return "ATA PIO";
  231. case ATA_PROT_DMA:
  232. return "ATA DMA";
  233. case ATA_PROT_NCQ:
  234. return "ATA NCQ";
  235. case ATA_PROT_NCQ_NODATA:
  236. return "ATA NCQ no data";
  237. case ATAPI_PROT_NODATA:
  238. return "ATAPI no data";
  239. case ATAPI_PROT_PIO:
  240. return "ATAPI PIO";
  241. case ATAPI_PROT_DMA:
  242. return "ATAPI DMA";
  243. default:
  244. return "unknown";
  245. }
  246. }
  247. static void dma_dwc_xfer_done(void *hsdev_instance)
  248. {
  249. unsigned long flags;
  250. struct sata_dwc_device *hsdev = hsdev_instance;
  251. struct ata_host *host = (struct ata_host *)hsdev->host;
  252. struct ata_port *ap;
  253. struct sata_dwc_device_port *hsdevp;
  254. u8 tag = 0;
  255. unsigned int port = 0;
  256. spin_lock_irqsave(&host->lock, flags);
  257. ap = host->ports[port];
  258. hsdevp = HSDEVP_FROM_AP(ap);
  259. tag = ap->link.active_tag;
  260. /*
  261. * Each DMA command produces 2 interrupts. Only
  262. * complete the command after both interrupts have been
  263. * seen. (See sata_dwc_isr())
  264. */
  265. hsdevp->dma_interrupt_count++;
  266. sata_dwc_clear_dmacr(hsdevp, tag);
  267. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
  268. dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
  269. tag, hsdevp->dma_pending[tag]);
  270. }
  271. if ((hsdevp->dma_interrupt_count % 2) == 0)
  272. sata_dwc_dma_xfer_complete(ap);
  273. spin_unlock_irqrestore(&host->lock, flags);
  274. }
  275. static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
  276. {
  277. struct ata_port *ap = qc->ap;
  278. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  279. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  280. struct dma_slave_config sconf;
  281. struct dma_async_tx_descriptor *desc;
  282. if (qc->dma_dir == DMA_DEV_TO_MEM) {
  283. sconf.src_addr = hsdev->dmadr;
  284. sconf.device_fc = false;
  285. } else { /* DMA_MEM_TO_DEV */
  286. sconf.dst_addr = hsdev->dmadr;
  287. sconf.device_fc = false;
  288. }
  289. sconf.direction = qc->dma_dir;
  290. sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  291. sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  292. sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  293. sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  294. dmaengine_slave_config(hsdevp->chan, &sconf);
  295. /* Convert SG list to linked list of items (LLIs) for AHB DMA */
  296. desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
  297. qc->dma_dir,
  298. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  299. if (!desc)
  300. return NULL;
  301. desc->callback = dma_dwc_xfer_done;
  302. desc->callback_param = hsdev;
  303. dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
  304. qc->sg, qc->n_elem, &hsdev->dmadr);
  305. return desc;
  306. }
  307. static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
  308. {
  309. if (scr > SCR_NOTIFICATION) {
  310. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  311. __func__, scr);
  312. return -EINVAL;
  313. }
  314. *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
  315. dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  316. link->ap->print_id, scr, *val);
  317. return 0;
  318. }
  319. static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
  320. {
  321. dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  322. link->ap->print_id, scr, val);
  323. if (scr > SCR_NOTIFICATION) {
  324. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  325. __func__, scr);
  326. return -EINVAL;
  327. }
  328. sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
  329. return 0;
  330. }
  331. static void clear_serror(struct ata_port *ap)
  332. {
  333. u32 val;
  334. sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
  335. sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
  336. }
  337. static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
  338. {
  339. sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
  340. sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
  341. }
  342. static u32 qcmd_tag_to_mask(u8 tag)
  343. {
  344. return 0x00000001 << (tag & 0x1f);
  345. }
  346. /* See ahci.c */
  347. static void sata_dwc_error_intr(struct ata_port *ap,
  348. struct sata_dwc_device *hsdev, uint intpr)
  349. {
  350. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  351. struct ata_eh_info *ehi = &ap->link.eh_info;
  352. unsigned int err_mask = 0, action = 0;
  353. struct ata_queued_cmd *qc;
  354. u32 serror;
  355. u8 status, tag;
  356. ata_ehi_clear_desc(ehi);
  357. sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
  358. status = ap->ops->sff_check_status(ap);
  359. tag = ap->link.active_tag;
  360. dev_err(ap->dev,
  361. "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
  362. __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
  363. hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
  364. /* Clear error register and interrupt bit */
  365. clear_serror(ap);
  366. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
  367. /* This is the only error happening now. TODO check for exact error */
  368. err_mask |= AC_ERR_HOST_BUS;
  369. action |= ATA_EH_RESET;
  370. /* Pass this on to EH */
  371. ehi->serror |= serror;
  372. ehi->action |= action;
  373. qc = ata_qc_from_tag(ap, tag);
  374. if (qc)
  375. qc->err_mask |= err_mask;
  376. else
  377. ehi->err_mask |= err_mask;
  378. ata_port_abort(ap);
  379. }
  380. /*
  381. * Function : sata_dwc_isr
  382. * arguments : irq, void *dev_instance, struct pt_regs *regs
  383. * Return value : irqreturn_t - status of IRQ
  384. * This Interrupt handler called via port ops registered function.
  385. * .irq_handler = sata_dwc_isr
  386. */
  387. static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
  388. {
  389. struct ata_host *host = (struct ata_host *)dev_instance;
  390. struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
  391. struct ata_port *ap;
  392. struct ata_queued_cmd *qc;
  393. unsigned long flags;
  394. u8 status, tag;
  395. int handled, port = 0;
  396. uint intpr, sactive, sactive2, tag_mask;
  397. struct sata_dwc_device_port *hsdevp;
  398. hsdev->sactive_issued = 0;
  399. spin_lock_irqsave(&host->lock, flags);
  400. /* Read the interrupt register */
  401. intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
  402. ap = host->ports[port];
  403. hsdevp = HSDEVP_FROM_AP(ap);
  404. dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
  405. ap->link.active_tag);
  406. /* Check for error interrupt */
  407. if (intpr & SATA_DWC_INTPR_ERR) {
  408. sata_dwc_error_intr(ap, hsdev, intpr);
  409. handled = 1;
  410. goto DONE;
  411. }
  412. /* Check for DMA SETUP FIS (FP DMA) interrupt */
  413. if (intpr & SATA_DWC_INTPR_NEWFP) {
  414. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
  415. tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
  416. dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
  417. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
  418. dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
  419. hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
  420. qc = ata_qc_from_tag(ap, tag);
  421. if (unlikely(!qc)) {
  422. dev_err(ap->dev, "failed to get qc");
  423. handled = 1;
  424. goto DONE;
  425. }
  426. /*
  427. * Start FP DMA for NCQ command. At this point the tag is the
  428. * active tag. It is the tag that matches the command about to
  429. * be completed.
  430. */
  431. trace_ata_bmdma_start(ap, &qc->tf, tag);
  432. qc->ap->link.active_tag = tag;
  433. sata_dwc_bmdma_start_by_tag(qc, tag);
  434. handled = 1;
  435. goto DONE;
  436. }
  437. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  438. tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  439. /* If no sactive issued and tag_mask is zero then this is not NCQ */
  440. if (hsdev->sactive_issued == 0 && tag_mask == 0) {
  441. if (ap->link.active_tag == ATA_TAG_POISON)
  442. tag = 0;
  443. else
  444. tag = ap->link.active_tag;
  445. qc = ata_qc_from_tag(ap, tag);
  446. /* DEV interrupt w/ no active qc? */
  447. if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
  448. dev_err(ap->dev,
  449. "%s interrupt with no active qc qc=%p\n",
  450. __func__, qc);
  451. ap->ops->sff_check_status(ap);
  452. handled = 1;
  453. goto DONE;
  454. }
  455. status = ap->ops->sff_check_status(ap);
  456. qc->ap->link.active_tag = tag;
  457. hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
  458. if (status & ATA_ERR) {
  459. dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
  460. sata_dwc_qc_complete(ap, qc);
  461. handled = 1;
  462. goto DONE;
  463. }
  464. dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
  465. __func__, get_prot_descript(qc->tf.protocol));
  466. DRVSTILLBUSY:
  467. if (ata_is_dma(qc->tf.protocol)) {
  468. /*
  469. * Each DMA transaction produces 2 interrupts. The DMAC
  470. * transfer complete interrupt and the SATA controller
  471. * operation done interrupt. The command should be
  472. * completed only after both interrupts are seen.
  473. */
  474. hsdevp->dma_interrupt_count++;
  475. if (hsdevp->dma_pending[tag] == \
  476. SATA_DWC_DMA_PENDING_NONE) {
  477. dev_err(ap->dev,
  478. "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
  479. __func__, intpr, status,
  480. hsdevp->dma_pending[tag]);
  481. }
  482. if ((hsdevp->dma_interrupt_count % 2) == 0)
  483. sata_dwc_dma_xfer_complete(ap);
  484. } else if (ata_is_pio(qc->tf.protocol)) {
  485. ata_sff_hsm_move(ap, qc, status, 0);
  486. handled = 1;
  487. goto DONE;
  488. } else {
  489. if (unlikely(sata_dwc_qc_complete(ap, qc)))
  490. goto DRVSTILLBUSY;
  491. }
  492. handled = 1;
  493. goto DONE;
  494. }
  495. /*
  496. * This is a NCQ command. At this point we need to figure out for which
  497. * tags we have gotten a completion interrupt. One interrupt may serve
  498. * as completion for more than one operation when commands are queued
  499. * (NCQ). We need to process each completed command.
  500. */
  501. /* process completed commands */
  502. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  503. tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  504. if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
  505. dev_dbg(ap->dev,
  506. "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  507. __func__, sactive, hsdev->sactive_issued, tag_mask);
  508. }
  509. if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
  510. dev_warn(ap->dev,
  511. "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  512. sactive, hsdev->sactive_issued, tag_mask);
  513. }
  514. /* read just to clear ... not bad if currently still busy */
  515. status = ap->ops->sff_check_status(ap);
  516. dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
  517. tag = 0;
  518. while (tag_mask) {
  519. while (!(tag_mask & 0x00000001)) {
  520. tag++;
  521. tag_mask <<= 1;
  522. }
  523. tag_mask &= (~0x00000001);
  524. qc = ata_qc_from_tag(ap, tag);
  525. if (unlikely(!qc)) {
  526. dev_err(ap->dev, "failed to get qc");
  527. handled = 1;
  528. goto DONE;
  529. }
  530. /* To be picked up by completion functions */
  531. qc->ap->link.active_tag = tag;
  532. hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
  533. /* Let libata/scsi layers handle error */
  534. if (status & ATA_ERR) {
  535. dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
  536. status);
  537. sata_dwc_qc_complete(ap, qc);
  538. handled = 1;
  539. goto DONE;
  540. }
  541. /* Process completed command */
  542. dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
  543. get_prot_descript(qc->tf.protocol));
  544. if (ata_is_dma(qc->tf.protocol)) {
  545. hsdevp->dma_interrupt_count++;
  546. if (hsdevp->dma_pending[tag] == \
  547. SATA_DWC_DMA_PENDING_NONE)
  548. dev_warn(ap->dev, "%s: DMA not pending?\n",
  549. __func__);
  550. if ((hsdevp->dma_interrupt_count % 2) == 0)
  551. sata_dwc_dma_xfer_complete(ap);
  552. } else {
  553. if (unlikely(sata_dwc_qc_complete(ap, qc)))
  554. goto STILLBUSY;
  555. }
  556. continue;
  557. STILLBUSY:
  558. ap->stats.idle_irq++;
  559. dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
  560. ap->print_id);
  561. } /* while tag_mask */
  562. /*
  563. * Check to see if any commands completed while we were processing our
  564. * initial set of completed commands (read status clears interrupts,
  565. * so we might miss a completed command interrupt if one came in while
  566. * we were processing --we read status as part of processing a completed
  567. * command).
  568. */
  569. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
  570. if (sactive2 != sactive) {
  571. dev_dbg(ap->dev,
  572. "More completed - sactive=0x%x sactive2=0x%x\n",
  573. sactive, sactive2);
  574. }
  575. handled = 1;
  576. DONE:
  577. spin_unlock_irqrestore(&host->lock, flags);
  578. return IRQ_RETVAL(handled);
  579. }
  580. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
  581. {
  582. struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
  583. u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
  584. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
  585. dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
  586. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  587. } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
  588. dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
  589. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  590. } else {
  591. /*
  592. * This should not happen, it indicates the driver is out of
  593. * sync. If it does happen, clear dmacr anyway.
  594. */
  595. dev_err(hsdev->dev,
  596. "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
  597. __func__, tag, hsdevp->dma_pending[tag], dmacr);
  598. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  599. SATA_DWC_DMACR_TXRXCH_CLEAR);
  600. }
  601. }
  602. static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
  603. {
  604. struct ata_queued_cmd *qc;
  605. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  606. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  607. u8 tag = 0;
  608. tag = ap->link.active_tag;
  609. qc = ata_qc_from_tag(ap, tag);
  610. if (!qc) {
  611. dev_err(ap->dev, "failed to get qc");
  612. return;
  613. }
  614. if (ata_is_dma(qc->tf.protocol)) {
  615. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
  616. dev_err(ap->dev,
  617. "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
  618. __func__,
  619. sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
  620. }
  621. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
  622. sata_dwc_qc_complete(ap, qc);
  623. ap->link.active_tag = ATA_TAG_POISON;
  624. } else {
  625. sata_dwc_qc_complete(ap, qc);
  626. }
  627. }
  628. static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
  629. {
  630. u8 status = 0;
  631. u32 mask = 0x0;
  632. u8 tag = qc->hw_tag;
  633. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  634. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  635. hsdev->sactive_queued = 0;
  636. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
  637. dev_err(ap->dev, "TX DMA PENDING\n");
  638. else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
  639. dev_err(ap->dev, "RX DMA PENDING\n");
  640. dev_dbg(ap->dev,
  641. "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
  642. qc->tf.command, status, ap->print_id, qc->tf.protocol);
  643. /* clear active bit */
  644. mask = (~(qcmd_tag_to_mask(tag)));
  645. hsdev->sactive_queued = hsdev->sactive_queued & mask;
  646. hsdev->sactive_issued = hsdev->sactive_issued & mask;
  647. ata_qc_complete(qc);
  648. return 0;
  649. }
  650. static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
  651. {
  652. /* Enable selective interrupts by setting the interrupt maskregister*/
  653. sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
  654. SATA_DWC_INTMR_ERRM |
  655. SATA_DWC_INTMR_NEWFPM |
  656. SATA_DWC_INTMR_PMABRTM |
  657. SATA_DWC_INTMR_DMATM);
  658. /*
  659. * Unmask the error bits that should trigger an error interrupt by
  660. * setting the error mask register.
  661. */
  662. sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
  663. dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
  664. __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
  665. sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
  666. }
  667. static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
  668. {
  669. port->cmd_addr = base + 0x00;
  670. port->data_addr = base + 0x00;
  671. port->error_addr = base + 0x04;
  672. port->feature_addr = base + 0x04;
  673. port->nsect_addr = base + 0x08;
  674. port->lbal_addr = base + 0x0c;
  675. port->lbam_addr = base + 0x10;
  676. port->lbah_addr = base + 0x14;
  677. port->device_addr = base + 0x18;
  678. port->command_addr = base + 0x1c;
  679. port->status_addr = base + 0x1c;
  680. port->altstatus_addr = base + 0x20;
  681. port->ctl_addr = base + 0x20;
  682. }
  683. static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
  684. {
  685. struct sata_dwc_device *hsdev = hsdevp->hsdev;
  686. struct device *dev = hsdev->dev;
  687. #ifdef CONFIG_SATA_DWC_OLD_DMA
  688. if (!of_property_present(dev->of_node, "dmas"))
  689. return sata_dwc_dma_get_channel_old(hsdevp);
  690. #endif
  691. hsdevp->chan = dma_request_chan(dev, "sata-dma");
  692. if (IS_ERR(hsdevp->chan)) {
  693. dev_err(dev, "failed to allocate dma channel: %ld\n",
  694. PTR_ERR(hsdevp->chan));
  695. return PTR_ERR(hsdevp->chan);
  696. }
  697. return 0;
  698. }
  699. /*
  700. * Function : sata_dwc_port_start
  701. * arguments : struct ata_ioports *port
  702. * Return value : returns 0 if success, error code otherwise
  703. * This function allocates the scatter gather LLI table for AHB DMA
  704. */
  705. static int sata_dwc_port_start(struct ata_port *ap)
  706. {
  707. int err = 0;
  708. struct sata_dwc_device *hsdev;
  709. struct sata_dwc_device_port *hsdevp = NULL;
  710. struct device *pdev;
  711. int i;
  712. hsdev = HSDEV_FROM_AP(ap);
  713. dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
  714. hsdev->host = ap->host;
  715. pdev = ap->host->dev;
  716. if (!pdev) {
  717. dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
  718. err = -ENODEV;
  719. goto CLEANUP;
  720. }
  721. /* Allocate Port Struct */
  722. hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
  723. if (!hsdevp) {
  724. err = -ENOMEM;
  725. goto CLEANUP;
  726. }
  727. hsdevp->hsdev = hsdev;
  728. err = sata_dwc_dma_get_channel(hsdevp);
  729. if (err)
  730. goto CLEANUP_ALLOC;
  731. err = phy_power_on(hsdev->phy);
  732. if (err)
  733. goto CLEANUP_ALLOC;
  734. for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
  735. hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
  736. ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
  737. ap->bmdma_prd_dma = 0;
  738. if (ap->port_no == 0) {
  739. dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
  740. __func__);
  741. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  742. SATA_DWC_DMACR_TXRXCH_CLEAR);
  743. dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
  744. __func__);
  745. sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  746. (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  747. SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
  748. }
  749. /* Clear any error bits before libata starts issuing commands */
  750. clear_serror(ap);
  751. ap->private_data = hsdevp;
  752. dev_dbg(ap->dev, "%s: done\n", __func__);
  753. return 0;
  754. CLEANUP_ALLOC:
  755. kfree(hsdevp);
  756. CLEANUP:
  757. dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
  758. return err;
  759. }
  760. static void sata_dwc_port_stop(struct ata_port *ap)
  761. {
  762. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  763. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  764. dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
  765. dmaengine_terminate_sync(hsdevp->chan);
  766. dma_release_channel(hsdevp->chan);
  767. phy_power_off(hsdev->phy);
  768. kfree(hsdevp);
  769. ap->private_data = NULL;
  770. }
  771. /*
  772. * Function : sata_dwc_exec_command_by_tag
  773. * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
  774. * Return value : None
  775. * This function keeps track of individual command tag ids and calls
  776. * ata_exec_command in libata
  777. */
  778. static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
  779. struct ata_taskfile *tf,
  780. u8 tag, u32 cmd_issued)
  781. {
  782. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  783. hsdevp->cmd_issued[tag] = cmd_issued;
  784. /*
  785. * Clear SError before executing a new command.
  786. * sata_dwc_scr_write and read can not be used here. Clearing the PM
  787. * managed SError register for the disk needs to be done before the
  788. * task file is loaded.
  789. */
  790. clear_serror(ap);
  791. ata_sff_exec_command(ap, tf);
  792. }
  793. static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
  794. {
  795. sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
  796. SATA_DWC_CMD_ISSUED_PEND);
  797. }
  798. static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
  799. {
  800. u8 tag = qc->hw_tag;
  801. if (!ata_is_ncq(qc->tf.protocol))
  802. tag = 0;
  803. sata_dwc_bmdma_setup_by_tag(qc, tag);
  804. }
  805. static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
  806. {
  807. int start_dma;
  808. u32 reg;
  809. struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
  810. struct ata_port *ap = qc->ap;
  811. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  812. struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
  813. int dir = qc->dma_dir;
  814. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
  815. start_dma = 1;
  816. if (dir == DMA_TO_DEVICE)
  817. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
  818. else
  819. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
  820. } else {
  821. dev_err(ap->dev,
  822. "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
  823. __func__, hsdevp->cmd_issued[tag], tag);
  824. start_dma = 0;
  825. }
  826. if (start_dma) {
  827. sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
  828. if (reg & SATA_DWC_SERROR_ERR_BITS) {
  829. dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
  830. __func__, reg);
  831. }
  832. if (dir == DMA_TO_DEVICE)
  833. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  834. SATA_DWC_DMACR_TXCHEN);
  835. else
  836. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  837. SATA_DWC_DMACR_RXCHEN);
  838. /* Enable AHB DMA transfer on the specified channel */
  839. dmaengine_submit(desc);
  840. dma_async_issue_pending(hsdevp->chan);
  841. }
  842. }
  843. static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
  844. {
  845. u8 tag = qc->hw_tag;
  846. if (!ata_is_ncq(qc->tf.protocol))
  847. tag = 0;
  848. sata_dwc_bmdma_start_by_tag(qc, tag);
  849. }
  850. static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
  851. {
  852. u32 sactive;
  853. u8 tag = qc->hw_tag;
  854. struct ata_port *ap = qc->ap;
  855. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  856. if (!ata_is_ncq(qc->tf.protocol))
  857. tag = 0;
  858. if (ata_is_dma(qc->tf.protocol)) {
  859. hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
  860. if (!hsdevp->desc[tag])
  861. return AC_ERR_SYSTEM;
  862. } else {
  863. hsdevp->desc[tag] = NULL;
  864. }
  865. if (ata_is_ncq(qc->tf.protocol)) {
  866. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  867. sactive |= (0x00000001 << tag);
  868. sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
  869. trace_ata_tf_load(ap, &qc->tf);
  870. ap->ops->sff_tf_load(ap, &qc->tf);
  871. trace_ata_exec_command(ap, &qc->tf, tag);
  872. sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
  873. SATA_DWC_CMD_ISSUED_PEND);
  874. } else {
  875. return ata_bmdma_qc_issue(qc);
  876. }
  877. return 0;
  878. }
  879. static void sata_dwc_error_handler(struct ata_port *ap)
  880. {
  881. ata_sff_error_handler(ap);
  882. }
  883. static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
  884. unsigned long deadline)
  885. {
  886. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
  887. int ret;
  888. ret = sata_sff_hardreset(link, class, deadline);
  889. sata_dwc_enable_interrupts(hsdev);
  890. /* Reconfigure the DMA control register */
  891. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  892. SATA_DWC_DMACR_TXRXCH_CLEAR);
  893. /* Reconfigure the DMA Burst Transaction Size register */
  894. sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  895. SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  896. SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
  897. return ret;
  898. }
  899. static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
  900. {
  901. /* SATA DWC is master only */
  902. }
  903. /*
  904. * scsi mid-layer and libata interface structures
  905. */
  906. static const struct scsi_host_template sata_dwc_sht = {
  907. ATA_NCQ_SHT(DRV_NAME),
  908. /*
  909. * test-only: Currently this driver doesn't handle NCQ
  910. * correctly. We enable NCQ but set the queue depth to a
  911. * max of 1. This will get fixed in in a future release.
  912. */
  913. .sg_tablesize = LIBATA_MAX_PRD,
  914. /* .can_queue = ATA_MAX_QUEUE, */
  915. /*
  916. * Make sure a LLI block is not created that will span 8K max FIS
  917. * boundary. If the block spans such a FIS boundary, there is a chance
  918. * that a DMA burst will cross that boundary -- this results in an
  919. * error in the host controller.
  920. */
  921. .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
  922. };
  923. static struct ata_port_operations sata_dwc_ops = {
  924. .inherits = &ata_sff_port_ops,
  925. .error_handler = sata_dwc_error_handler,
  926. .hardreset = sata_dwc_hardreset,
  927. .qc_issue = sata_dwc_qc_issue,
  928. .scr_read = sata_dwc_scr_read,
  929. .scr_write = sata_dwc_scr_write,
  930. .port_start = sata_dwc_port_start,
  931. .port_stop = sata_dwc_port_stop,
  932. .sff_dev_select = sata_dwc_dev_select,
  933. .bmdma_setup = sata_dwc_bmdma_setup,
  934. .bmdma_start = sata_dwc_bmdma_start,
  935. };
  936. static const struct ata_port_info sata_dwc_port_info[] = {
  937. {
  938. .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
  939. .pio_mask = ATA_PIO4,
  940. .udma_mask = ATA_UDMA6,
  941. .port_ops = &sata_dwc_ops,
  942. },
  943. };
  944. static int sata_dwc_probe(struct platform_device *ofdev)
  945. {
  946. struct device *dev = &ofdev->dev;
  947. struct device_node *np = dev->of_node;
  948. struct sata_dwc_device *hsdev;
  949. u32 idr, versionr;
  950. char *ver = (char *)&versionr;
  951. void __iomem *base;
  952. int err = 0;
  953. int irq;
  954. struct ata_host *host;
  955. struct ata_port_info pi = sata_dwc_port_info[0];
  956. const struct ata_port_info *ppi[] = { &pi, NULL };
  957. struct resource *res;
  958. /* Allocate DWC SATA device */
  959. host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
  960. hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
  961. if (!host || !hsdev)
  962. return -ENOMEM;
  963. host->private_data = hsdev;
  964. /* Ioremap SATA registers */
  965. base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
  966. if (IS_ERR(base))
  967. return PTR_ERR(base);
  968. dev_dbg(dev, "ioremap done for SATA register address\n");
  969. /* Synopsys DWC SATA specific Registers */
  970. hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
  971. hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
  972. /* Setup port */
  973. host->ports[0]->ioaddr.cmd_addr = base;
  974. host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
  975. sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
  976. /* Read the ID and Version Registers */
  977. idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
  978. versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
  979. dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
  980. /* Save dev for later use in dev_xxx() routines */
  981. hsdev->dev = dev;
  982. /* Enable SATA Interrupts */
  983. sata_dwc_enable_interrupts(hsdev);
  984. /* Get SATA interrupt number */
  985. irq = irq_of_parse_and_map(np, 0);
  986. if (!irq) {
  987. dev_err(dev, "no SATA DMA irq\n");
  988. return -ENODEV;
  989. }
  990. #ifdef CONFIG_SATA_DWC_OLD_DMA
  991. if (!of_property_present(np, "dmas")) {
  992. err = sata_dwc_dma_init_old(ofdev, hsdev);
  993. if (err)
  994. return err;
  995. }
  996. #endif
  997. hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
  998. if (IS_ERR(hsdev->phy))
  999. return PTR_ERR(hsdev->phy);
  1000. err = phy_init(hsdev->phy);
  1001. if (err)
  1002. goto error_out;
  1003. /*
  1004. * Now, register with libATA core, this will also initiate the
  1005. * device discovery process, invoking our port_start() handler &
  1006. * error_handler() to execute a dummy Softreset EH session
  1007. */
  1008. err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
  1009. if (err)
  1010. dev_err(dev, "failed to activate host");
  1011. return 0;
  1012. error_out:
  1013. phy_exit(hsdev->phy);
  1014. return err;
  1015. }
  1016. static void sata_dwc_remove(struct platform_device *ofdev)
  1017. {
  1018. struct device *dev = &ofdev->dev;
  1019. struct ata_host *host = dev_get_drvdata(dev);
  1020. struct sata_dwc_device *hsdev = host->private_data;
  1021. ata_host_detach(host);
  1022. phy_exit(hsdev->phy);
  1023. #ifdef CONFIG_SATA_DWC_OLD_DMA
  1024. /* Free SATA DMA resources */
  1025. sata_dwc_dma_exit_old(hsdev);
  1026. #endif
  1027. dev_dbg(dev, "done\n");
  1028. }
  1029. static const struct of_device_id sata_dwc_match[] = {
  1030. { .compatible = "amcc,sata-460ex", },
  1031. {}
  1032. };
  1033. MODULE_DEVICE_TABLE(of, sata_dwc_match);
  1034. static struct platform_driver sata_dwc_driver = {
  1035. .driver = {
  1036. .name = DRV_NAME,
  1037. .of_match_table = sata_dwc_match,
  1038. },
  1039. .probe = sata_dwc_probe,
  1040. .remove_new = sata_dwc_remove,
  1041. };
  1042. module_platform_driver(sata_dwc_driver);
  1043. MODULE_LICENSE("GPL");
  1044. MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
  1045. MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
  1046. MODULE_VERSION(DRV_VERSION);