sata_mv.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) Excito Elektronik i Skåne AB, 2010.
  4. * Author: Tor Krill <tor@excito.com>
  5. *
  6. * Copyright (C) 2015 Stefan Roese <sr@denx.de>
  7. */
  8. /*
  9. * This driver supports the SATA controller of some Mavell SoC's.
  10. * Here a (most likely incomplete) list of the supported SoC's:
  11. * - Kirkwood
  12. * - Armada 370
  13. * - Armada XP
  14. *
  15. * This driver implementation is an alternative to the already available
  16. * driver via the "ide" commands interface (drivers/block/mvsata_ide.c).
  17. * But this driver only supports PIO mode and as this new driver also
  18. * supports transfer via DMA, its much faster.
  19. *
  20. * Please note, that the newer SoC's (e.g. Armada 38x) are not supported
  21. * by this driver. As they have an AHCI compatible SATA controller
  22. * integrated.
  23. */
  24. /*
  25. * TODO:
  26. * Better error recovery
  27. * No support for using PRDs (Thus max 64KB transfers)
  28. * No NCQ support
  29. * No port multiplier support
  30. */
  31. #include <common.h>
  32. #include <fis.h>
  33. #include <libata.h>
  34. #include <malloc.h>
  35. #include <sata.h>
  36. #include <linux/errno.h>
  37. #include <asm/io.h>
  38. #include <linux/mbus.h>
  39. #if defined(CONFIG_KIRKWOOD)
  40. #include <asm/arch/kirkwood.h>
  41. #define SATAHC_BASE KW_SATA_BASE
  42. #else
  43. #include <asm/arch/soc.h>
  44. #define SATAHC_BASE MVEBU_AXP_SATA_BASE
  45. #endif
  46. #define SATA0_BASE (SATAHC_BASE + 0x2000)
  47. #define SATA1_BASE (SATAHC_BASE + 0x4000)
  48. /* EDMA registers */
  49. #define EDMA_CFG 0x000
  50. #define EDMA_CFG_NCQ (1 << 5)
  51. #define EDMA_CFG_EQUE (1 << 9)
  52. #define EDMA_TIMER 0x004
  53. #define EDMA_IECR 0x008
  54. #define EDMA_IEMR 0x00c
  55. #define EDMA_RQBA_HI 0x010
  56. #define EDMA_RQIPR 0x014
  57. #define EDMA_RQIPR_IPMASK (0x1f << 5)
  58. #define EDMA_RQIPR_IPSHIFT 5
  59. #define EDMA_RQOPR 0x018
  60. #define EDMA_RQOPR_OPMASK (0x1f << 5)
  61. #define EDMA_RQOPR_OPSHIFT 5
  62. #define EDMA_RSBA_HI 0x01c
  63. #define EDMA_RSIPR 0x020
  64. #define EDMA_RSIPR_IPMASK (0x1f << 3)
  65. #define EDMA_RSIPR_IPSHIFT 3
  66. #define EDMA_RSOPR 0x024
  67. #define EDMA_RSOPR_OPMASK (0x1f << 3)
  68. #define EDMA_RSOPR_OPSHIFT 3
  69. #define EDMA_CMD 0x028
  70. #define EDMA_CMD_ENEDMA (0x01 << 0)
  71. #define EDMA_CMD_DISEDMA (0x01 << 1)
  72. #define EDMA_CMD_ATARST (0x01 << 2)
  73. #define EDMA_CMD_FREEZE (0x01 << 4)
  74. #define EDMA_TEST_CTL 0x02c
  75. #define EDMA_STATUS 0x030
  76. #define EDMA_IORTO 0x034
  77. #define EDMA_CDTR 0x040
  78. #define EDMA_HLTCND 0x060
  79. #define EDMA_NTSR 0x094
  80. /* Basic DMA registers */
  81. #define BDMA_CMD 0x224
  82. #define BDMA_STATUS 0x228
  83. #define BDMA_DTLB 0x22c
  84. #define BDMA_DTHB 0x230
  85. #define BDMA_DRL 0x234
  86. #define BDMA_DRH 0x238
  87. /* SATA Interface registers */
  88. #define SIR_ICFG 0x050
  89. #define SIR_CFG_GEN2EN (0x1 << 7)
  90. #define SIR_PLL_CFG 0x054
  91. #define SIR_SSTATUS 0x300
  92. #define SSTATUS_DET_MASK (0x0f << 0)
  93. #define SIR_SERROR 0x304
  94. #define SIR_SCONTROL 0x308
  95. #define SIR_SCONTROL_DETEN (0x01 << 0)
  96. #define SIR_LTMODE 0x30c
  97. #define SIR_LTMODE_NELBE (0x01 << 7)
  98. #define SIR_PHYMODE3 0x310
  99. #define SIR_PHYMODE4 0x314
  100. #define SIR_PHYMODE1 0x32c
  101. #define SIR_PHYMODE2 0x330
  102. #define SIR_BIST_CTRL 0x334
  103. #define SIR_BIST_DW1 0x338
  104. #define SIR_BIST_DW2 0x33c
  105. #define SIR_SERR_IRQ_MASK 0x340
  106. #define SIR_SATA_IFCTRL 0x344
  107. #define SIR_SATA_TESTCTRL 0x348
  108. #define SIR_SATA_IFSTATUS 0x34c
  109. #define SIR_VEND_UNIQ 0x35c
  110. #define SIR_FIS_CFG 0x360
  111. #define SIR_FIS_IRQ_CAUSE 0x364
  112. #define SIR_FIS_IRQ_MASK 0x368
  113. #define SIR_FIS_DWORD0 0x370
  114. #define SIR_FIS_DWORD1 0x374
  115. #define SIR_FIS_DWORD2 0x378
  116. #define SIR_FIS_DWORD3 0x37c
  117. #define SIR_FIS_DWORD4 0x380
  118. #define SIR_FIS_DWORD5 0x384
  119. #define SIR_FIS_DWORD6 0x388
  120. #define SIR_PHYM9_GEN2 0x398
  121. #define SIR_PHYM9_GEN1 0x39c
  122. #define SIR_PHY_CFG 0x3a0
  123. #define SIR_PHYCTL 0x3a4
  124. #define SIR_PHYM10 0x3a8
  125. #define SIR_PHYM12 0x3b0
  126. /* Shadow registers */
  127. #define PIO_DATA 0x100
  128. #define PIO_ERR_FEATURES 0x104
  129. #define PIO_SECTOR_COUNT 0x108
  130. #define PIO_LBA_LOW 0x10c
  131. #define PIO_LBA_MID 0x110
  132. #define PIO_LBA_HI 0x114
  133. #define PIO_DEVICE 0x118
  134. #define PIO_CMD_STATUS 0x11c
  135. #define PIO_STATUS_ERR (0x01 << 0)
  136. #define PIO_STATUS_DRQ (0x01 << 3)
  137. #define PIO_STATUS_DF (0x01 << 5)
  138. #define PIO_STATUS_DRDY (0x01 << 6)
  139. #define PIO_STATUS_BSY (0x01 << 7)
  140. #define PIO_CTRL_ALTSTAT 0x120
  141. /* SATAHC arbiter registers */
  142. #define SATAHC_CFG 0x000
  143. #define SATAHC_RQOP 0x004
  144. #define SATAHC_RQIP 0x008
  145. #define SATAHC_ICT 0x00c
  146. #define SATAHC_ITT 0x010
  147. #define SATAHC_ICR 0x014
  148. #define SATAHC_ICR_PORT0 (0x01 << 0)
  149. #define SATAHC_ICR_PORT1 (0x01 << 1)
  150. #define SATAHC_MIC 0x020
  151. #define SATAHC_MIM 0x024
  152. #define SATAHC_LED_CFG 0x02c
  153. #define REQUEST_QUEUE_SIZE 32
  154. #define RESPONSE_QUEUE_SIZE REQUEST_QUEUE_SIZE
  155. struct crqb {
  156. u32 dtb_low; /* DW0 */
  157. u32 dtb_high; /* DW1 */
  158. u32 control_flags; /* DW2 */
  159. u32 drb_count; /* DW3 */
  160. u32 ata_cmd_feat; /* DW4 */
  161. u32 ata_addr; /* DW5 */
  162. u32 ata_addr_exp; /* DW6 */
  163. u32 ata_sect_count; /* DW7 */
  164. };
  165. #define CRQB_ALIGN 0x400
  166. #define CRQB_CNTRLFLAGS_DIR (0x01 << 0)
  167. #define CRQB_CNTRLFLAGS_DQTAGMASK (0x1f << 1)
  168. #define CRQB_CNTRLFLAGS_DQTAGSHIFT 1
  169. #define CRQB_CNTRLFLAGS_PMPORTMASK (0x0f << 12)
  170. #define CRQB_CNTRLFLAGS_PMPORTSHIFT 12
  171. #define CRQB_CNTRLFLAGS_PRDMODE (0x01 << 16)
  172. #define CRQB_CNTRLFLAGS_HQTAGMASK (0x1f << 17)
  173. #define CRQB_CNTRLFLAGS_HQTAGSHIFT 17
  174. #define CRQB_CMDFEAT_CMDMASK (0xff << 16)
  175. #define CRQB_CMDFEAT_CMDSHIFT 16
  176. #define CRQB_CMDFEAT_FEATMASK (0xff << 16)
  177. #define CRQB_CMDFEAT_FEATSHIFT 24
  178. #define CRQB_ADDR_LBA_LOWMASK (0xff << 0)
  179. #define CRQB_ADDR_LBA_LOWSHIFT 0
  180. #define CRQB_ADDR_LBA_MIDMASK (0xff << 8)
  181. #define CRQB_ADDR_LBA_MIDSHIFT 8
  182. #define CRQB_ADDR_LBA_HIGHMASK (0xff << 16)
  183. #define CRQB_ADDR_LBA_HIGHSHIFT 16
  184. #define CRQB_ADDR_DEVICE_MASK (0xff << 24)
  185. #define CRQB_ADDR_DEVICE_SHIFT 24
  186. #define CRQB_ADDR_LBA_LOW_EXP_MASK (0xff << 0)
  187. #define CRQB_ADDR_LBA_LOW_EXP_SHIFT 0
  188. #define CRQB_ADDR_LBA_MID_EXP_MASK (0xff << 8)
  189. #define CRQB_ADDR_LBA_MID_EXP_SHIFT 8
  190. #define CRQB_ADDR_LBA_HIGH_EXP_MASK (0xff << 16)
  191. #define CRQB_ADDR_LBA_HIGH_EXP_SHIFT 16
  192. #define CRQB_ADDR_FEATURE_EXP_MASK (0xff << 24)
  193. #define CRQB_ADDR_FEATURE_EXP_SHIFT 24
  194. #define CRQB_SECTCOUNT_COUNT_MASK (0xff << 0)
  195. #define CRQB_SECTCOUNT_COUNT_SHIFT 0
  196. #define CRQB_SECTCOUNT_COUNT_EXP_MASK (0xff << 8)
  197. #define CRQB_SECTCOUNT_COUNT_EXP_SHIFT 8
  198. #define MVSATA_WIN_CONTROL(w) (MVEBU_AXP_SATA_BASE + 0x30 + ((w) << 4))
  199. #define MVSATA_WIN_BASE(w) (MVEBU_AXP_SATA_BASE + 0x34 + ((w) << 4))
  200. struct eprd {
  201. u32 phyaddr_low;
  202. u32 bytecount_eot;
  203. u32 phyaddr_hi;
  204. u32 reserved;
  205. };
  206. #define EPRD_PHYADDR_MASK 0xfffffffe
  207. #define EPRD_BYTECOUNT_MASK 0x0000ffff
  208. #define EPRD_EOT (0x01 << 31)
  209. struct crpb {
  210. u32 id;
  211. u32 flags;
  212. u32 timestamp;
  213. };
  214. #define CRPB_ALIGN 0x100
  215. #define READ_CMD 0
  216. #define WRITE_CMD 1
  217. /*
  218. * Since we don't use PRDs yet max transfer size
  219. * is 64KB
  220. */
  221. #define MV_ATA_MAX_SECTORS (65535 / ATA_SECT_SIZE)
  222. /* Keep track if hw is initialized or not */
  223. static u32 hw_init;
  224. struct mv_priv {
  225. char name[12];
  226. u32 link;
  227. u32 regbase;
  228. u32 queue_depth;
  229. u16 pio;
  230. u16 mwdma;
  231. u16 udma;
  232. void *crqb_alloc;
  233. struct crqb *request;
  234. void *crpb_alloc;
  235. struct crpb *response;
  236. };
  237. static int ata_wait_register(u32 *addr, u32 mask, u32 val, u32 timeout_msec)
  238. {
  239. ulong start;
  240. start = get_timer(0);
  241. do {
  242. if ((in_le32(addr) & mask) == val)
  243. return 0;
  244. } while (get_timer(start) < timeout_msec);
  245. return -ETIMEDOUT;
  246. }
  247. /* Cut from sata_mv in linux kernel */
  248. static int mv_stop_edma_engine(int port)
  249. {
  250. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  251. int i;
  252. /* Disable eDMA. The disable bit auto clears. */
  253. out_le32(priv->regbase + EDMA_CMD, EDMA_CMD_DISEDMA);
  254. /* Wait for the chip to confirm eDMA is off. */
  255. for (i = 10000; i > 0; i--) {
  256. u32 reg = in_le32(priv->regbase + EDMA_CMD);
  257. if (!(reg & EDMA_CMD_ENEDMA)) {
  258. debug("EDMA stop on port %d succesful\n", port);
  259. return 0;
  260. }
  261. udelay(10);
  262. }
  263. debug("EDMA stop on port %d failed\n", port);
  264. return -1;
  265. }
  266. static int mv_start_edma_engine(int port)
  267. {
  268. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  269. u32 tmp;
  270. /* Check preconditions */
  271. tmp = in_le32(priv->regbase + SIR_SSTATUS);
  272. if ((tmp & SSTATUS_DET_MASK) != 0x03) {
  273. printf("Device error on port: %d\n", port);
  274. return -1;
  275. }
  276. tmp = in_le32(priv->regbase + PIO_CMD_STATUS);
  277. if (tmp & (ATA_BUSY | ATA_DRQ)) {
  278. printf("Device not ready on port: %d\n", port);
  279. return -1;
  280. }
  281. /* Clear interrupt cause */
  282. out_le32(priv->regbase + EDMA_IECR, 0x0);
  283. tmp = in_le32(SATAHC_BASE + SATAHC_ICR);
  284. tmp &= ~(port == 0 ? SATAHC_ICR_PORT0 : SATAHC_ICR_PORT1);
  285. out_le32(SATAHC_BASE + SATAHC_ICR, tmp);
  286. /* Configure edma operation */
  287. tmp = in_le32(priv->regbase + EDMA_CFG);
  288. tmp &= ~EDMA_CFG_NCQ; /* No NCQ */
  289. tmp &= ~EDMA_CFG_EQUE; /* Dont queue operations */
  290. out_le32(priv->regbase + EDMA_CFG, tmp);
  291. out_le32(priv->regbase + SIR_FIS_IRQ_CAUSE, 0x0);
  292. /* Configure fis, set all to no-wait for now */
  293. out_le32(priv->regbase + SIR_FIS_CFG, 0x0);
  294. /* Setup request queue */
  295. out_le32(priv->regbase + EDMA_RQBA_HI, 0x0);
  296. out_le32(priv->regbase + EDMA_RQIPR, priv->request);
  297. out_le32(priv->regbase + EDMA_RQOPR, 0x0);
  298. /* Setup response queue */
  299. out_le32(priv->regbase + EDMA_RSBA_HI, 0x0);
  300. out_le32(priv->regbase + EDMA_RSOPR, priv->response);
  301. out_le32(priv->regbase + EDMA_RSIPR, 0x0);
  302. /* Start edma */
  303. out_le32(priv->regbase + EDMA_CMD, EDMA_CMD_ENEDMA);
  304. return 0;
  305. }
  306. static int mv_reset_channel(int port)
  307. {
  308. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  309. /* Make sure edma is stopped */
  310. mv_stop_edma_engine(port);
  311. out_le32(priv->regbase + EDMA_CMD, EDMA_CMD_ATARST);
  312. udelay(25); /* allow reset propagation */
  313. out_le32(priv->regbase + EDMA_CMD, 0);
  314. mdelay(10);
  315. return 0;
  316. }
  317. static void mv_reset_port(int port)
  318. {
  319. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  320. mv_reset_channel(port);
  321. out_le32(priv->regbase + EDMA_CMD, 0x0);
  322. out_le32(priv->regbase + EDMA_CFG, 0x101f);
  323. out_le32(priv->regbase + EDMA_IECR, 0x0);
  324. out_le32(priv->regbase + EDMA_IEMR, 0x0);
  325. out_le32(priv->regbase + EDMA_RQBA_HI, 0x0);
  326. out_le32(priv->regbase + EDMA_RQIPR, 0x0);
  327. out_le32(priv->regbase + EDMA_RQOPR, 0x0);
  328. out_le32(priv->regbase + EDMA_RSBA_HI, 0x0);
  329. out_le32(priv->regbase + EDMA_RSIPR, 0x0);
  330. out_le32(priv->regbase + EDMA_RSOPR, 0x0);
  331. out_le32(priv->regbase + EDMA_IORTO, 0xfa);
  332. }
  333. static void mv_reset_one_hc(void)
  334. {
  335. out_le32(SATAHC_BASE + SATAHC_ICT, 0x00);
  336. out_le32(SATAHC_BASE + SATAHC_ITT, 0x00);
  337. out_le32(SATAHC_BASE + SATAHC_ICR, 0x00);
  338. }
  339. static int probe_port(int port)
  340. {
  341. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  342. int tries, tries2, set15 = 0;
  343. u32 tmp;
  344. debug("Probe port: %d\n", port);
  345. for (tries = 0; tries < 2; tries++) {
  346. /* Clear SError */
  347. out_le32(priv->regbase + SIR_SERROR, 0x0);
  348. /* trigger com-init */
  349. tmp = in_le32(priv->regbase + SIR_SCONTROL);
  350. tmp = (tmp & 0x0f0) | 0x300 | SIR_SCONTROL_DETEN;
  351. out_le32(priv->regbase + SIR_SCONTROL, tmp);
  352. mdelay(1);
  353. tmp = in_le32(priv->regbase + SIR_SCONTROL);
  354. tries2 = 5;
  355. do {
  356. tmp = (tmp & 0x0f0) | 0x300;
  357. out_le32(priv->regbase + SIR_SCONTROL, tmp);
  358. mdelay(10);
  359. tmp = in_le32(priv->regbase + SIR_SCONTROL);
  360. } while ((tmp & 0xf0f) != 0x300 && tries2--);
  361. mdelay(10);
  362. for (tries2 = 0; tries2 < 200; tries2++) {
  363. tmp = in_le32(priv->regbase + SIR_SSTATUS);
  364. if ((tmp & SSTATUS_DET_MASK) == 0x03) {
  365. debug("Found device on port\n");
  366. return 0;
  367. }
  368. mdelay(1);
  369. }
  370. if ((tmp & SSTATUS_DET_MASK) == 0) {
  371. debug("No device attached on port %d\n", port);
  372. return -ENODEV;
  373. }
  374. if (!set15) {
  375. /* Try on 1.5Gb/S */
  376. debug("Try 1.5Gb link\n");
  377. set15 = 1;
  378. out_le32(priv->regbase + SIR_SCONTROL, 0x304);
  379. tmp = in_le32(priv->regbase + SIR_ICFG);
  380. tmp &= ~SIR_CFG_GEN2EN;
  381. out_le32(priv->regbase + SIR_ICFG, tmp);
  382. mv_reset_channel(port);
  383. }
  384. }
  385. debug("Failed to probe port\n");
  386. return -1;
  387. }
  388. /* Get request queue in pointer */
  389. static int get_reqip(int port)
  390. {
  391. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  392. u32 tmp;
  393. tmp = in_le32(priv->regbase + EDMA_RQIPR) & EDMA_RQIPR_IPMASK;
  394. tmp = tmp >> EDMA_RQIPR_IPSHIFT;
  395. return tmp;
  396. }
  397. static void set_reqip(int port, int reqin)
  398. {
  399. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  400. u32 tmp;
  401. tmp = in_le32(priv->regbase + EDMA_RQIPR) & ~EDMA_RQIPR_IPMASK;
  402. tmp |= ((reqin << EDMA_RQIPR_IPSHIFT) & EDMA_RQIPR_IPMASK);
  403. out_le32(priv->regbase + EDMA_RQIPR, tmp);
  404. }
  405. /* Get next available slot, ignoring possible overwrite */
  406. static int get_next_reqip(int port)
  407. {
  408. int slot = get_reqip(port);
  409. slot = (slot + 1) % REQUEST_QUEUE_SIZE;
  410. return slot;
  411. }
  412. /* Get response queue in pointer */
  413. static int get_rspip(int port)
  414. {
  415. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  416. u32 tmp;
  417. tmp = in_le32(priv->regbase + EDMA_RSIPR) & EDMA_RSIPR_IPMASK;
  418. tmp = tmp >> EDMA_RSIPR_IPSHIFT;
  419. return tmp;
  420. }
  421. /* Get response queue out pointer */
  422. static int get_rspop(int port)
  423. {
  424. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  425. u32 tmp;
  426. tmp = in_le32(priv->regbase + EDMA_RSOPR) & EDMA_RSOPR_OPMASK;
  427. tmp = tmp >> EDMA_RSOPR_OPSHIFT;
  428. return tmp;
  429. }
  430. /* Get next response queue pointer */
  431. static int get_next_rspop(int port)
  432. {
  433. return (get_rspop(port) + 1) % RESPONSE_QUEUE_SIZE;
  434. }
  435. /* Set response queue pointer */
  436. static void set_rspop(int port, int reqin)
  437. {
  438. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  439. u32 tmp;
  440. tmp = in_le32(priv->regbase + EDMA_RSOPR) & ~EDMA_RSOPR_OPMASK;
  441. tmp |= ((reqin << EDMA_RSOPR_OPSHIFT) & EDMA_RSOPR_OPMASK);
  442. out_le32(priv->regbase + EDMA_RSOPR, tmp);
  443. }
  444. static int wait_dma_completion(int port, int index, u32 timeout_msec)
  445. {
  446. u32 tmp, res;
  447. tmp = port == 0 ? SATAHC_ICR_PORT0 : SATAHC_ICR_PORT1;
  448. res = ata_wait_register((u32 *)(SATAHC_BASE + SATAHC_ICR), tmp,
  449. tmp, timeout_msec);
  450. if (res)
  451. printf("Failed to wait for completion on port %d\n", port);
  452. return res;
  453. }
  454. static void process_responses(int port)
  455. {
  456. #ifdef DEBUG
  457. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  458. #endif
  459. u32 tmp;
  460. u32 outind = get_rspop(port);
  461. /* Ack interrupts */
  462. tmp = in_le32(SATAHC_BASE + SATAHC_ICR);
  463. if (port == 0)
  464. tmp &= ~(BIT(0) | BIT(8));
  465. else
  466. tmp &= ~(BIT(1) | BIT(9));
  467. tmp &= ~(BIT(4));
  468. out_le32(SATAHC_BASE + SATAHC_ICR, tmp);
  469. while (get_rspip(port) != outind) {
  470. #ifdef DEBUG
  471. debug("Response index %d flags %08x on port %d\n", outind,
  472. priv->response[outind].flags, port);
  473. #endif
  474. outind = get_next_rspop(port);
  475. set_rspop(port, outind);
  476. }
  477. }
  478. static int mv_ata_exec_ata_cmd(int port, struct sata_fis_h2d *cfis,
  479. u8 *buffer, u32 len, u32 iswrite)
  480. {
  481. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  482. struct crqb *req;
  483. int slot;
  484. u32 start;
  485. if (len >= 64 * 1024) {
  486. printf("We only support <64K transfers for now\n");
  487. return -1;
  488. }
  489. /* Initialize request */
  490. slot = get_reqip(port);
  491. memset(&priv->request[slot], 0, sizeof(struct crqb));
  492. req = &priv->request[slot];
  493. req->dtb_low = (u32)buffer;
  494. /* Dont use PRDs */
  495. req->control_flags = CRQB_CNTRLFLAGS_PRDMODE;
  496. req->control_flags |= iswrite ? 0 : CRQB_CNTRLFLAGS_DIR;
  497. req->control_flags |=
  498. ((cfis->pm_port_c << CRQB_CNTRLFLAGS_PMPORTSHIFT)
  499. & CRQB_CNTRLFLAGS_PMPORTMASK);
  500. req->drb_count = len;
  501. req->ata_cmd_feat = (cfis->command << CRQB_CMDFEAT_CMDSHIFT) &
  502. CRQB_CMDFEAT_CMDMASK;
  503. req->ata_cmd_feat |= (cfis->features << CRQB_CMDFEAT_FEATSHIFT) &
  504. CRQB_CMDFEAT_FEATMASK;
  505. req->ata_addr = (cfis->lba_low << CRQB_ADDR_LBA_LOWSHIFT) &
  506. CRQB_ADDR_LBA_LOWMASK;
  507. req->ata_addr |= (cfis->lba_mid << CRQB_ADDR_LBA_MIDSHIFT) &
  508. CRQB_ADDR_LBA_MIDMASK;
  509. req->ata_addr |= (cfis->lba_high << CRQB_ADDR_LBA_HIGHSHIFT) &
  510. CRQB_ADDR_LBA_HIGHMASK;
  511. req->ata_addr |= (cfis->device << CRQB_ADDR_DEVICE_SHIFT) &
  512. CRQB_ADDR_DEVICE_MASK;
  513. req->ata_addr_exp = (cfis->lba_low_exp << CRQB_ADDR_LBA_LOW_EXP_SHIFT) &
  514. CRQB_ADDR_LBA_LOW_EXP_MASK;
  515. req->ata_addr_exp |=
  516. (cfis->lba_mid_exp << CRQB_ADDR_LBA_MID_EXP_SHIFT) &
  517. CRQB_ADDR_LBA_MID_EXP_MASK;
  518. req->ata_addr_exp |=
  519. (cfis->lba_high_exp << CRQB_ADDR_LBA_HIGH_EXP_SHIFT) &
  520. CRQB_ADDR_LBA_HIGH_EXP_MASK;
  521. req->ata_addr_exp |=
  522. (cfis->features_exp << CRQB_ADDR_FEATURE_EXP_SHIFT) &
  523. CRQB_ADDR_FEATURE_EXP_MASK;
  524. req->ata_sect_count =
  525. (cfis->sector_count << CRQB_SECTCOUNT_COUNT_SHIFT) &
  526. CRQB_SECTCOUNT_COUNT_MASK;
  527. req->ata_sect_count |=
  528. (cfis->sector_count_exp << CRQB_SECTCOUNT_COUNT_EXP_SHIFT) &
  529. CRQB_SECTCOUNT_COUNT_EXP_MASK;
  530. /* Flush data */
  531. start = (u32)req & ~(ARCH_DMA_MINALIGN - 1);
  532. flush_dcache_range(start,
  533. start + ALIGN(sizeof(*req), ARCH_DMA_MINALIGN));
  534. /* Trigger operation */
  535. slot = get_next_reqip(port);
  536. set_reqip(port, slot);
  537. /* Wait for completion */
  538. if (wait_dma_completion(port, slot, 10000)) {
  539. printf("ATA operation timed out\n");
  540. return -1;
  541. }
  542. process_responses(port);
  543. /* Invalidate data on read */
  544. if (buffer && len) {
  545. start = (u32)buffer & ~(ARCH_DMA_MINALIGN - 1);
  546. invalidate_dcache_range(start,
  547. start + ALIGN(len, ARCH_DMA_MINALIGN));
  548. }
  549. return len;
  550. }
  551. static u32 mv_sata_rw_cmd_ext(int port, lbaint_t start, u32 blkcnt,
  552. u8 *buffer, int is_write)
  553. {
  554. struct sata_fis_h2d cfis;
  555. u32 res;
  556. u64 block;
  557. block = (u64)start;
  558. memset(&cfis, 0, sizeof(struct sata_fis_h2d));
  559. cfis.fis_type = SATA_FIS_TYPE_REGISTER_H2D;
  560. cfis.command = (is_write) ? ATA_CMD_WRITE_EXT : ATA_CMD_READ_EXT;
  561. cfis.lba_high_exp = (block >> 40) & 0xff;
  562. cfis.lba_mid_exp = (block >> 32) & 0xff;
  563. cfis.lba_low_exp = (block >> 24) & 0xff;
  564. cfis.lba_high = (block >> 16) & 0xff;
  565. cfis.lba_mid = (block >> 8) & 0xff;
  566. cfis.lba_low = block & 0xff;
  567. cfis.device = ATA_LBA;
  568. cfis.sector_count_exp = (blkcnt >> 8) & 0xff;
  569. cfis.sector_count = blkcnt & 0xff;
  570. res = mv_ata_exec_ata_cmd(port, &cfis, buffer, ATA_SECT_SIZE * blkcnt,
  571. is_write);
  572. return res >= 0 ? blkcnt : res;
  573. }
  574. static u32 mv_sata_rw_cmd(int port, lbaint_t start, u32 blkcnt, u8 *buffer,
  575. int is_write)
  576. {
  577. struct sata_fis_h2d cfis;
  578. lbaint_t block;
  579. u32 res;
  580. block = start;
  581. memset(&cfis, 0, sizeof(struct sata_fis_h2d));
  582. cfis.fis_type = SATA_FIS_TYPE_REGISTER_H2D;
  583. cfis.command = (is_write) ? ATA_CMD_WRITE : ATA_CMD_READ;
  584. cfis.device = ATA_LBA;
  585. cfis.device |= (block >> 24) & 0xf;
  586. cfis.lba_high = (block >> 16) & 0xff;
  587. cfis.lba_mid = (block >> 8) & 0xff;
  588. cfis.lba_low = block & 0xff;
  589. cfis.sector_count = (u8)(blkcnt & 0xff);
  590. res = mv_ata_exec_ata_cmd(port, &cfis, buffer, ATA_SECT_SIZE * blkcnt,
  591. is_write);
  592. return res >= 0 ? blkcnt : res;
  593. }
  594. static u32 ata_low_level_rw(int dev, lbaint_t blknr, lbaint_t blkcnt,
  595. void *buffer, int is_write)
  596. {
  597. lbaint_t start, blks;
  598. u8 *addr;
  599. int max_blks;
  600. debug("%s: %ld %ld\n", __func__, blknr, blkcnt);
  601. start = blknr;
  602. blks = blkcnt;
  603. addr = (u8 *)buffer;
  604. max_blks = MV_ATA_MAX_SECTORS;
  605. do {
  606. if (blks > max_blks) {
  607. if (sata_dev_desc[dev].lba48) {
  608. mv_sata_rw_cmd_ext(dev, start, max_blks, addr,
  609. is_write);
  610. } else {
  611. mv_sata_rw_cmd(dev, start, max_blks, addr,
  612. is_write);
  613. }
  614. start += max_blks;
  615. blks -= max_blks;
  616. addr += ATA_SECT_SIZE * max_blks;
  617. } else {
  618. if (sata_dev_desc[dev].lba48) {
  619. mv_sata_rw_cmd_ext(dev, start, blks, addr,
  620. is_write);
  621. } else {
  622. mv_sata_rw_cmd(dev, start, blks, addr,
  623. is_write);
  624. }
  625. start += blks;
  626. blks = 0;
  627. addr += ATA_SECT_SIZE * blks;
  628. }
  629. } while (blks != 0);
  630. return blkcnt;
  631. }
  632. static int mv_ata_exec_ata_cmd_nondma(int port,
  633. struct sata_fis_h2d *cfis, u8 *buffer,
  634. u32 len, u32 iswrite)
  635. {
  636. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  637. int i;
  638. u16 *tp;
  639. debug("%s\n", __func__);
  640. out_le32(priv->regbase + PIO_SECTOR_COUNT, cfis->sector_count);
  641. out_le32(priv->regbase + PIO_LBA_HI, cfis->lba_high);
  642. out_le32(priv->regbase + PIO_LBA_MID, cfis->lba_mid);
  643. out_le32(priv->regbase + PIO_LBA_LOW, cfis->lba_low);
  644. out_le32(priv->regbase + PIO_ERR_FEATURES, cfis->features);
  645. out_le32(priv->regbase + PIO_DEVICE, cfis->device);
  646. out_le32(priv->regbase + PIO_CMD_STATUS, cfis->command);
  647. if (ata_wait_register((u32 *)(priv->regbase + PIO_CMD_STATUS),
  648. ATA_BUSY, 0x0, 10000)) {
  649. debug("Failed to wait for completion\n");
  650. return -1;
  651. }
  652. if (len > 0) {
  653. tp = (u16 *)buffer;
  654. for (i = 0; i < len / 2; i++) {
  655. if (iswrite)
  656. out_le16(priv->regbase + PIO_DATA, *tp++);
  657. else
  658. *tp++ = in_le16(priv->regbase + PIO_DATA);
  659. }
  660. }
  661. return len;
  662. }
  663. static int mv_sata_identify(int port, u16 *id)
  664. {
  665. struct sata_fis_h2d h2d;
  666. memset(&h2d, 0, sizeof(struct sata_fis_h2d));
  667. h2d.fis_type = SATA_FIS_TYPE_REGISTER_H2D;
  668. h2d.command = ATA_CMD_ID_ATA;
  669. /* Give device time to get operational */
  670. mdelay(10);
  671. return mv_ata_exec_ata_cmd_nondma(port, &h2d, (u8 *)id,
  672. ATA_ID_WORDS * 2, READ_CMD);
  673. }
  674. static void mv_sata_xfer_mode(int port, u16 *id)
  675. {
  676. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  677. priv->pio = id[ATA_ID_PIO_MODES];
  678. priv->mwdma = id[ATA_ID_MWDMA_MODES];
  679. priv->udma = id[ATA_ID_UDMA_MODES];
  680. debug("pio %04x, mwdma %04x, udma %04x\n", priv->pio, priv->mwdma,
  681. priv->udma);
  682. }
  683. static void mv_sata_set_features(int port)
  684. {
  685. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  686. struct sata_fis_h2d cfis;
  687. u8 udma_cap;
  688. memset(&cfis, 0, sizeof(struct sata_fis_h2d));
  689. cfis.fis_type = SATA_FIS_TYPE_REGISTER_H2D;
  690. cfis.command = ATA_CMD_SET_FEATURES;
  691. cfis.features = SETFEATURES_XFER;
  692. /* First check the device capablity */
  693. udma_cap = (u8) (priv->udma & 0xff);
  694. if (udma_cap == ATA_UDMA6)
  695. cfis.sector_count = XFER_UDMA_6;
  696. if (udma_cap == ATA_UDMA5)
  697. cfis.sector_count = XFER_UDMA_5;
  698. if (udma_cap == ATA_UDMA4)
  699. cfis.sector_count = XFER_UDMA_4;
  700. if (udma_cap == ATA_UDMA3)
  701. cfis.sector_count = XFER_UDMA_3;
  702. mv_ata_exec_ata_cmd_nondma(port, &cfis, NULL, 0, READ_CMD);
  703. }
  704. int mv_sata_spin_down(int dev)
  705. {
  706. struct sata_fis_h2d cfis;
  707. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[dev].priv;
  708. if (priv->link == 0) {
  709. debug("No device on port: %d\n", dev);
  710. return 1;
  711. }
  712. memset(&cfis, 0, sizeof(struct sata_fis_h2d));
  713. cfis.fis_type = SATA_FIS_TYPE_REGISTER_H2D;
  714. cfis.command = ATA_CMD_STANDBY;
  715. return mv_ata_exec_ata_cmd_nondma(dev, &cfis, NULL, 0, READ_CMD);
  716. }
  717. int mv_sata_spin_up(int dev)
  718. {
  719. struct sata_fis_h2d cfis;
  720. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[dev].priv;
  721. if (priv->link == 0) {
  722. debug("No device on port: %d\n", dev);
  723. return 1;
  724. }
  725. memset(&cfis, 0, sizeof(struct sata_fis_h2d));
  726. cfis.fis_type = SATA_FIS_TYPE_REGISTER_H2D;
  727. cfis.command = ATA_CMD_IDLE;
  728. return mv_ata_exec_ata_cmd_nondma(dev, &cfis, NULL, 0, READ_CMD);
  729. }
  730. ulong sata_read(int dev, ulong blknr, lbaint_t blkcnt, void *buffer)
  731. {
  732. return ata_low_level_rw(dev, blknr, blkcnt, buffer, READ_CMD);
  733. }
  734. ulong sata_write(int dev, ulong blknr, lbaint_t blkcnt, const void *buffer)
  735. {
  736. return ata_low_level_rw(dev, blknr, blkcnt, (void *)buffer, WRITE_CMD);
  737. }
  738. /*
  739. * Initialize SATA memory windows
  740. */
  741. static void mvsata_ide_conf_mbus_windows(void)
  742. {
  743. const struct mbus_dram_target_info *dram;
  744. int i;
  745. dram = mvebu_mbus_dram_info();
  746. /* Disable windows, Set Size/Base to 0 */
  747. for (i = 0; i < 4; i++) {
  748. writel(0, MVSATA_WIN_CONTROL(i));
  749. writel(0, MVSATA_WIN_BASE(i));
  750. }
  751. for (i = 0; i < dram->num_cs; i++) {
  752. const struct mbus_dram_window *cs = dram->cs + i;
  753. writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
  754. (dram->mbus_dram_target_id << 4) | 1,
  755. MVSATA_WIN_CONTROL(i));
  756. writel(cs->base & 0xffff0000, MVSATA_WIN_BASE(i));
  757. }
  758. }
  759. int init_sata(int dev)
  760. {
  761. struct mv_priv *priv;
  762. debug("Initialize sata dev: %d\n", dev);
  763. if (dev < 0 || dev >= CONFIG_SYS_SATA_MAX_DEVICE) {
  764. printf("Invalid sata device %d\n", dev);
  765. return -1;
  766. }
  767. priv = (struct mv_priv *)malloc(sizeof(struct mv_priv));
  768. if (!priv) {
  769. printf("Failed to allocate memory for private sata data\n");
  770. return -ENOMEM;
  771. }
  772. memset((void *)priv, 0, sizeof(struct mv_priv));
  773. /* Allocate and align request buffer */
  774. priv->crqb_alloc = malloc(sizeof(struct crqb) * REQUEST_QUEUE_SIZE +
  775. CRQB_ALIGN);
  776. if (!priv->crqb_alloc) {
  777. printf("Unable to allocate memory for request queue\n");
  778. return -ENOMEM;
  779. }
  780. memset(priv->crqb_alloc, 0,
  781. sizeof(struct crqb) * REQUEST_QUEUE_SIZE + CRQB_ALIGN);
  782. priv->request = (struct crqb *)(((u32) priv->crqb_alloc + CRQB_ALIGN) &
  783. ~(CRQB_ALIGN - 1));
  784. /* Allocate and align response buffer */
  785. priv->crpb_alloc = malloc(sizeof(struct crpb) * REQUEST_QUEUE_SIZE +
  786. CRPB_ALIGN);
  787. if (!priv->crpb_alloc) {
  788. printf("Unable to allocate memory for response queue\n");
  789. return -ENOMEM;
  790. }
  791. memset(priv->crpb_alloc, 0,
  792. sizeof(struct crpb) * REQUEST_QUEUE_SIZE + CRPB_ALIGN);
  793. priv->response = (struct crpb *)(((u32) priv->crpb_alloc + CRPB_ALIGN) &
  794. ~(CRPB_ALIGN - 1));
  795. sata_dev_desc[dev].priv = (void *)priv;
  796. sprintf(priv->name, "SATA%d", dev);
  797. priv->regbase = dev == 0 ? SATA0_BASE : SATA1_BASE;
  798. if (!hw_init) {
  799. debug("Initialize sata hw\n");
  800. hw_init = 1;
  801. mv_reset_one_hc();
  802. mvsata_ide_conf_mbus_windows();
  803. }
  804. mv_reset_port(dev);
  805. if (probe_port(dev)) {
  806. priv->link = 0;
  807. return -ENODEV;
  808. }
  809. priv->link = 1;
  810. return 0;
  811. }
  812. int reset_sata(int dev)
  813. {
  814. return 0;
  815. }
  816. int scan_sata(int port)
  817. {
  818. unsigned char serial[ATA_ID_SERNO_LEN + 1];
  819. unsigned char firmware[ATA_ID_FW_REV_LEN + 1];
  820. unsigned char product[ATA_ID_PROD_LEN + 1];
  821. u64 n_sectors;
  822. u16 *id;
  823. struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
  824. if (!priv->link)
  825. return -ENODEV;
  826. id = (u16 *)malloc(ATA_ID_WORDS * 2);
  827. if (!id) {
  828. printf("Failed to malloc id data\n");
  829. return -ENOMEM;
  830. }
  831. mv_sata_identify(port, id);
  832. ata_swap_buf_le16(id, ATA_ID_WORDS);
  833. #ifdef DEBUG
  834. ata_dump_id(id);
  835. #endif
  836. /* Serial number */
  837. ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
  838. memcpy(sata_dev_desc[port].product, serial, sizeof(serial));
  839. /* Firmware version */
  840. ata_id_c_string(id, firmware, ATA_ID_FW_REV, sizeof(firmware));
  841. memcpy(sata_dev_desc[port].revision, firmware, sizeof(firmware));
  842. /* Product model */
  843. ata_id_c_string(id, product, ATA_ID_PROD, sizeof(product));
  844. memcpy(sata_dev_desc[port].vendor, product, sizeof(product));
  845. /* Total sectors */
  846. n_sectors = ata_id_n_sectors(id);
  847. sata_dev_desc[port].lba = n_sectors;
  848. /* Check if support LBA48 */
  849. if (ata_id_has_lba48(id)) {
  850. sata_dev_desc[port].lba48 = 1;
  851. debug("Device support LBA48\n");
  852. }
  853. /* Get the NCQ queue depth from device */
  854. priv->queue_depth = ata_id_queue_depth(id);
  855. /* Get the xfer mode from device */
  856. mv_sata_xfer_mode(port, id);
  857. /* Set the xfer mode to highest speed */
  858. mv_sata_set_features(port);
  859. /* Start up */
  860. mv_start_edma_engine(port);
  861. return 0;
  862. }