sata_sil24.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
  4. *
  5. * Copyright 2005 Tejun Heo
  6. *
  7. * Based on preview driver from Silicon Image.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/pci.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/delay.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/device.h>
  18. #include <scsi/scsi_host.h>
  19. #include <scsi/scsi_cmnd.h>
  20. #include <linux/libata.h>
  21. #define DRV_NAME "sata_sil24"
  22. #define DRV_VERSION "1.1"
  23. /*
  24. * Port request block (PRB) 32 bytes
  25. */
  26. struct sil24_prb {
  27. __le16 ctrl;
  28. __le16 prot;
  29. __le32 rx_cnt;
  30. u8 fis[6 * 4];
  31. };
  32. /*
  33. * Scatter gather entry (SGE) 16 bytes
  34. */
  35. struct sil24_sge {
  36. __le64 addr;
  37. __le32 cnt;
  38. __le32 flags;
  39. };
  40. enum {
  41. SIL24_HOST_BAR = 0,
  42. SIL24_PORT_BAR = 2,
  43. /* sil24 fetches in chunks of 64bytes. The first block
  44. * contains the PRB and two SGEs. From the second block, it's
  45. * consisted of four SGEs and called SGT. Calculate the
  46. * number of SGTs that fit into one page.
  47. */
  48. SIL24_PRB_SZ = sizeof(struct sil24_prb)
  49. + 2 * sizeof(struct sil24_sge),
  50. SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ)
  51. / (4 * sizeof(struct sil24_sge)),
  52. /* This will give us one unused SGEs for ATA. This extra SGE
  53. * will be used to store CDB for ATAPI devices.
  54. */
  55. SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1,
  56. /*
  57. * Global controller registers (128 bytes @ BAR0)
  58. */
  59. /* 32 bit regs */
  60. HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
  61. HOST_CTRL = 0x40,
  62. HOST_IRQ_STAT = 0x44,
  63. HOST_PHY_CFG = 0x48,
  64. HOST_BIST_CTRL = 0x50,
  65. HOST_BIST_PTRN = 0x54,
  66. HOST_BIST_STAT = 0x58,
  67. HOST_MEM_BIST_STAT = 0x5c,
  68. HOST_FLASH_CMD = 0x70,
  69. /* 8 bit regs */
  70. HOST_FLASH_DATA = 0x74,
  71. HOST_TRANSITION_DETECT = 0x75,
  72. HOST_GPIO_CTRL = 0x76,
  73. HOST_I2C_ADDR = 0x78, /* 32 bit */
  74. HOST_I2C_DATA = 0x7c,
  75. HOST_I2C_XFER_CNT = 0x7e,
  76. HOST_I2C_CTRL = 0x7f,
  77. /* HOST_SLOT_STAT bits */
  78. HOST_SSTAT_ATTN = (1 << 31),
  79. /* HOST_CTRL bits */
  80. HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
  81. HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
  82. HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
  83. HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
  84. HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
  85. HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
  86. /*
  87. * Port registers
  88. * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
  89. */
  90. PORT_REGS_SIZE = 0x2000,
  91. PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
  92. PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
  93. PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
  94. PORT_PMP_STATUS = 0x0000, /* port device status offset */
  95. PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
  96. PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
  97. /* 32 bit regs */
  98. PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
  99. PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
  100. PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
  101. PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
  102. PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
  103. PORT_ACTIVATE_UPPER_ADDR= 0x101c,
  104. PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
  105. PORT_CMD_ERR = 0x1024, /* command error number */
  106. PORT_FIS_CFG = 0x1028,
  107. PORT_FIFO_THRES = 0x102c,
  108. /* 16 bit regs */
  109. PORT_DECODE_ERR_CNT = 0x1040,
  110. PORT_DECODE_ERR_THRESH = 0x1042,
  111. PORT_CRC_ERR_CNT = 0x1044,
  112. PORT_CRC_ERR_THRESH = 0x1046,
  113. PORT_HSHK_ERR_CNT = 0x1048,
  114. PORT_HSHK_ERR_THRESH = 0x104a,
  115. /* 32 bit regs */
  116. PORT_PHY_CFG = 0x1050,
  117. PORT_SLOT_STAT = 0x1800,
  118. PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
  119. PORT_CONTEXT = 0x1e04,
  120. PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
  121. PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
  122. PORT_SCONTROL = 0x1f00,
  123. PORT_SSTATUS = 0x1f04,
  124. PORT_SERROR = 0x1f08,
  125. PORT_SACTIVE = 0x1f0c,
  126. /* PORT_CTRL_STAT bits */
  127. PORT_CS_PORT_RST = (1 << 0), /* port reset */
  128. PORT_CS_DEV_RST = (1 << 1), /* device reset */
  129. PORT_CS_INIT = (1 << 2), /* port initialize */
  130. PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
  131. PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
  132. PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
  133. PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
  134. PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
  135. PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
  136. /* PORT_IRQ_STAT/ENABLE_SET/CLR */
  137. /* bits[11:0] are masked */
  138. PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
  139. PORT_IRQ_ERROR = (1 << 1), /* command execution error */
  140. PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
  141. PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
  142. PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
  143. PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
  144. PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
  145. PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
  146. PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
  147. PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
  148. PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
  149. PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
  150. DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
  151. PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
  152. PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
  153. /* bits[27:16] are unmasked (raw) */
  154. PORT_IRQ_RAW_SHIFT = 16,
  155. PORT_IRQ_MASKED_MASK = 0x7ff,
  156. PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
  157. /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
  158. PORT_IRQ_STEER_SHIFT = 30,
  159. PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
  160. /* PORT_CMD_ERR constants */
  161. PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
  162. PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
  163. PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
  164. PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
  165. PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
  166. PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
  167. PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
  168. PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
  169. PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
  170. PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
  171. PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
  172. PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
  173. PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
  174. PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
  175. PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
  176. PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
  177. PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
  178. PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
  179. PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
  180. PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
  181. PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
  182. PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
  183. /* bits of PRB control field */
  184. PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
  185. PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
  186. PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
  187. PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
  188. PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
  189. /* PRB protocol field */
  190. PRB_PROT_PACKET = (1 << 0),
  191. PRB_PROT_TCQ = (1 << 1),
  192. PRB_PROT_NCQ = (1 << 2),
  193. PRB_PROT_READ = (1 << 3),
  194. PRB_PROT_WRITE = (1 << 4),
  195. PRB_PROT_TRANSPARENT = (1 << 5),
  196. /*
  197. * Other constants
  198. */
  199. SGE_TRM = (1 << 31), /* Last SGE in chain */
  200. SGE_LNK = (1 << 30), /* linked list
  201. Points to SGT, not SGE */
  202. SGE_DRD = (1 << 29), /* discard data read (/dev/null)
  203. data address ignored */
  204. SIL24_MAX_CMDS = 31,
  205. /* board id */
  206. BID_SIL3124 = 0,
  207. BID_SIL3132 = 1,
  208. BID_SIL3131 = 2,
  209. /* host flags */
  210. SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
  211. ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
  212. ATA_FLAG_AN | ATA_FLAG_PMP,
  213. SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
  214. IRQ_STAT_4PORTS = 0xf,
  215. };
  216. struct sil24_ata_block {
  217. struct sil24_prb prb;
  218. struct sil24_sge sge[SIL24_MAX_SGE];
  219. };
  220. struct sil24_atapi_block {
  221. struct sil24_prb prb;
  222. u8 cdb[16];
  223. struct sil24_sge sge[SIL24_MAX_SGE];
  224. };
  225. union sil24_cmd_block {
  226. struct sil24_ata_block ata;
  227. struct sil24_atapi_block atapi;
  228. };
  229. static const struct sil24_cerr_info {
  230. unsigned int err_mask, action;
  231. const char *desc;
  232. } sil24_cerr_db[] = {
  233. [0] = { AC_ERR_DEV, 0,
  234. "device error" },
  235. [PORT_CERR_DEV] = { AC_ERR_DEV, 0,
  236. "device error via D2H FIS" },
  237. [PORT_CERR_SDB] = { AC_ERR_DEV, 0,
  238. "device error via SDB FIS" },
  239. [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
  240. "error in data FIS" },
  241. [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
  242. "failed to transmit command FIS" },
  243. [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
  244. "protocol mismatch" },
  245. [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
  246. "data direction mismatch" },
  247. [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
  248. "ran out of SGEs while writing" },
  249. [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
  250. "ran out of SGEs while reading" },
  251. [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
  252. "invalid data direction for ATAPI CDB" },
  253. [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
  254. "SGT not on qword boundary" },
  255. [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  256. "PCI target abort while fetching SGT" },
  257. [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  258. "PCI master abort while fetching SGT" },
  259. [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  260. "PCI parity error while fetching SGT" },
  261. [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
  262. "PRB not on qword boundary" },
  263. [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  264. "PCI target abort while fetching PRB" },
  265. [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  266. "PCI master abort while fetching PRB" },
  267. [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  268. "PCI parity error while fetching PRB" },
  269. [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  270. "undefined error while transferring data" },
  271. [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  272. "PCI target abort while transferring data" },
  273. [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  274. "PCI master abort while transferring data" },
  275. [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  276. "PCI parity error while transferring data" },
  277. [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET,
  278. "FIS received while sending service FIS" },
  279. };
  280. /*
  281. * ap->private_data
  282. *
  283. * The preview driver always returned 0 for status. We emulate it
  284. * here from the previous interrupt.
  285. */
  286. struct sil24_port_priv {
  287. union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
  288. dma_addr_t cmd_block_dma; /* DMA base addr for them */
  289. int do_port_rst;
  290. };
  291. static void sil24_dev_config(struct ata_device *dev);
  292. static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
  293. static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
  294. static int sil24_qc_defer(struct ata_queued_cmd *qc);
  295. static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
  296. static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
  297. static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
  298. static void sil24_pmp_attach(struct ata_port *ap);
  299. static void sil24_pmp_detach(struct ata_port *ap);
  300. static void sil24_freeze(struct ata_port *ap);
  301. static void sil24_thaw(struct ata_port *ap);
  302. static int sil24_softreset(struct ata_link *link, unsigned int *class,
  303. unsigned long deadline);
  304. static int sil24_hardreset(struct ata_link *link, unsigned int *class,
  305. unsigned long deadline);
  306. static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
  307. unsigned long deadline);
  308. static void sil24_error_handler(struct ata_port *ap);
  309. static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
  310. static int sil24_port_start(struct ata_port *ap);
  311. static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  312. #ifdef CONFIG_PM_SLEEP
  313. static int sil24_pci_device_resume(struct pci_dev *pdev);
  314. #endif
  315. #ifdef CONFIG_PM
  316. static int sil24_port_resume(struct ata_port *ap);
  317. #endif
  318. static const struct pci_device_id sil24_pci_tbl[] = {
  319. { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
  320. { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
  321. { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
  322. { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
  323. { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 },
  324. { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
  325. { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
  326. { } /* terminate list */
  327. };
  328. static struct pci_driver sil24_pci_driver = {
  329. .name = DRV_NAME,
  330. .id_table = sil24_pci_tbl,
  331. .probe = sil24_init_one,
  332. .remove = ata_pci_remove_one,
  333. #ifdef CONFIG_PM_SLEEP
  334. .suspend = ata_pci_device_suspend,
  335. .resume = sil24_pci_device_resume,
  336. #endif
  337. };
  338. static const struct scsi_host_template sil24_sht = {
  339. __ATA_BASE_SHT(DRV_NAME),
  340. .can_queue = SIL24_MAX_CMDS,
  341. .sg_tablesize = SIL24_MAX_SGE,
  342. .dma_boundary = ATA_DMA_BOUNDARY,
  343. .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
  344. .sdev_groups = ata_ncq_sdev_groups,
  345. .change_queue_depth = ata_scsi_change_queue_depth,
  346. .device_configure = ata_scsi_device_configure
  347. };
  348. static struct ata_port_operations sil24_ops = {
  349. .inherits = &sata_pmp_port_ops,
  350. .qc_defer = sil24_qc_defer,
  351. .qc_prep = sil24_qc_prep,
  352. .qc_issue = sil24_qc_issue,
  353. .qc_fill_rtf = sil24_qc_fill_rtf,
  354. .freeze = sil24_freeze,
  355. .thaw = sil24_thaw,
  356. .softreset = sil24_softreset,
  357. .hardreset = sil24_hardreset,
  358. .pmp_softreset = sil24_softreset,
  359. .pmp_hardreset = sil24_pmp_hardreset,
  360. .error_handler = sil24_error_handler,
  361. .post_internal_cmd = sil24_post_internal_cmd,
  362. .dev_config = sil24_dev_config,
  363. .scr_read = sil24_scr_read,
  364. .scr_write = sil24_scr_write,
  365. .pmp_attach = sil24_pmp_attach,
  366. .pmp_detach = sil24_pmp_detach,
  367. .port_start = sil24_port_start,
  368. #ifdef CONFIG_PM
  369. .port_resume = sil24_port_resume,
  370. #endif
  371. };
  372. static bool sata_sil24_msi; /* Disable MSI */
  373. module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
  374. MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
  375. /*
  376. * Use bits 30-31 of port_flags to encode available port numbers.
  377. * Current maxium is 4.
  378. */
  379. #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
  380. #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
  381. static const struct ata_port_info sil24_port_info[] = {
  382. /* sil_3124 */
  383. {
  384. .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
  385. SIL24_FLAG_PCIX_IRQ_WOC,
  386. .pio_mask = ATA_PIO4,
  387. .mwdma_mask = ATA_MWDMA2,
  388. .udma_mask = ATA_UDMA5,
  389. .port_ops = &sil24_ops,
  390. },
  391. /* sil_3132 */
  392. {
  393. .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
  394. .pio_mask = ATA_PIO4,
  395. .mwdma_mask = ATA_MWDMA2,
  396. .udma_mask = ATA_UDMA5,
  397. .port_ops = &sil24_ops,
  398. },
  399. /* sil_3131/sil_3531 */
  400. {
  401. .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
  402. .pio_mask = ATA_PIO4,
  403. .mwdma_mask = ATA_MWDMA2,
  404. .udma_mask = ATA_UDMA5,
  405. .port_ops = &sil24_ops,
  406. },
  407. };
  408. static int sil24_tag(int tag)
  409. {
  410. if (unlikely(ata_tag_internal(tag)))
  411. return 0;
  412. return tag;
  413. }
  414. static unsigned long sil24_port_offset(struct ata_port *ap)
  415. {
  416. return ap->port_no * PORT_REGS_SIZE;
  417. }
  418. static void __iomem *sil24_port_base(struct ata_port *ap)
  419. {
  420. return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
  421. }
  422. static void sil24_dev_config(struct ata_device *dev)
  423. {
  424. void __iomem *port = sil24_port_base(dev->link->ap);
  425. if (dev->cdb_len == 16)
  426. writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
  427. else
  428. writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
  429. }
  430. static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
  431. {
  432. void __iomem *port = sil24_port_base(ap);
  433. struct sil24_prb __iomem *prb;
  434. u8 fis[6 * 4];
  435. prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
  436. memcpy_fromio(fis, prb->fis, sizeof(fis));
  437. ata_tf_from_fis(fis, tf);
  438. }
  439. static int sil24_scr_map[] = {
  440. [SCR_CONTROL] = 0,
  441. [SCR_STATUS] = 1,
  442. [SCR_ERROR] = 2,
  443. [SCR_ACTIVE] = 3,
  444. };
  445. static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
  446. {
  447. void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
  448. if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
  449. *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
  450. return 0;
  451. }
  452. return -EINVAL;
  453. }
  454. static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
  455. {
  456. void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
  457. if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
  458. writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
  459. return 0;
  460. }
  461. return -EINVAL;
  462. }
  463. static void sil24_config_port(struct ata_port *ap)
  464. {
  465. void __iomem *port = sil24_port_base(ap);
  466. /* configure IRQ WoC */
  467. if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
  468. writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
  469. else
  470. writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
  471. /* zero error counters. */
  472. writew(0x8000, port + PORT_DECODE_ERR_THRESH);
  473. writew(0x8000, port + PORT_CRC_ERR_THRESH);
  474. writew(0x8000, port + PORT_HSHK_ERR_THRESH);
  475. writew(0x0000, port + PORT_DECODE_ERR_CNT);
  476. writew(0x0000, port + PORT_CRC_ERR_CNT);
  477. writew(0x0000, port + PORT_HSHK_ERR_CNT);
  478. /* always use 64bit activation */
  479. writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
  480. /* clear port multiplier enable and resume bits */
  481. writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
  482. }
  483. static void sil24_config_pmp(struct ata_port *ap, int attached)
  484. {
  485. void __iomem *port = sil24_port_base(ap);
  486. if (attached)
  487. writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
  488. else
  489. writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
  490. }
  491. static void sil24_clear_pmp(struct ata_port *ap)
  492. {
  493. void __iomem *port = sil24_port_base(ap);
  494. int i;
  495. writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
  496. for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
  497. void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
  498. writel(0, pmp_base + PORT_PMP_STATUS);
  499. writel(0, pmp_base + PORT_PMP_QACTIVE);
  500. }
  501. }
  502. static int sil24_init_port(struct ata_port *ap)
  503. {
  504. void __iomem *port = sil24_port_base(ap);
  505. struct sil24_port_priv *pp = ap->private_data;
  506. u32 tmp;
  507. /* clear PMP error status */
  508. if (sata_pmp_attached(ap))
  509. sil24_clear_pmp(ap);
  510. writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
  511. ata_wait_register(ap, port + PORT_CTRL_STAT,
  512. PORT_CS_INIT, PORT_CS_INIT, 10, 100);
  513. tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
  514. PORT_CS_RDY, 0, 10, 100);
  515. if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
  516. pp->do_port_rst = 1;
  517. ap->link.eh_context.i.action |= ATA_EH_RESET;
  518. return -EIO;
  519. }
  520. return 0;
  521. }
  522. static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
  523. const struct ata_taskfile *tf,
  524. int is_cmd, u32 ctrl,
  525. unsigned int timeout_msec)
  526. {
  527. void __iomem *port = sil24_port_base(ap);
  528. struct sil24_port_priv *pp = ap->private_data;
  529. struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
  530. dma_addr_t paddr = pp->cmd_block_dma;
  531. u32 irq_enabled, irq_mask, irq_stat;
  532. int rc;
  533. prb->ctrl = cpu_to_le16(ctrl);
  534. ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
  535. /* temporarily plug completion and error interrupts */
  536. irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
  537. writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
  538. /*
  539. * The barrier is required to ensure that writes to cmd_block reach
  540. * the memory before the write to PORT_CMD_ACTIVATE.
  541. */
  542. wmb();
  543. writel((u32)paddr, port + PORT_CMD_ACTIVATE);
  544. writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
  545. irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
  546. irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0,
  547. 10, timeout_msec);
  548. writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
  549. irq_stat >>= PORT_IRQ_RAW_SHIFT;
  550. if (irq_stat & PORT_IRQ_COMPLETE)
  551. rc = 0;
  552. else {
  553. /* force port into known state */
  554. sil24_init_port(ap);
  555. if (irq_stat & PORT_IRQ_ERROR)
  556. rc = -EIO;
  557. else
  558. rc = -EBUSY;
  559. }
  560. /* restore IRQ enabled */
  561. writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
  562. return rc;
  563. }
  564. static int sil24_softreset(struct ata_link *link, unsigned int *class,
  565. unsigned long deadline)
  566. {
  567. struct ata_port *ap = link->ap;
  568. int pmp = sata_srst_pmp(link);
  569. unsigned int timeout_msec = 0;
  570. struct ata_taskfile tf;
  571. const char *reason;
  572. int rc;
  573. /* put the port into known state */
  574. if (sil24_init_port(ap)) {
  575. reason = "port not ready";
  576. goto err;
  577. }
  578. /* do SRST */
  579. if (time_after(deadline, jiffies))
  580. timeout_msec = jiffies_to_msecs(deadline - jiffies);
  581. ata_tf_init(link->device, &tf); /* doesn't really matter */
  582. rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
  583. timeout_msec);
  584. if (rc == -EBUSY) {
  585. reason = "timeout";
  586. goto err;
  587. } else if (rc) {
  588. reason = "SRST command error";
  589. goto err;
  590. }
  591. sil24_read_tf(ap, 0, &tf);
  592. *class = ata_port_classify(ap, &tf);
  593. return 0;
  594. err:
  595. ata_link_err(link, "softreset failed (%s)\n", reason);
  596. return -EIO;
  597. }
  598. static int sil24_hardreset(struct ata_link *link, unsigned int *class,
  599. unsigned long deadline)
  600. {
  601. struct ata_port *ap = link->ap;
  602. void __iomem *port = sil24_port_base(ap);
  603. struct sil24_port_priv *pp = ap->private_data;
  604. int did_port_rst = 0;
  605. const char *reason;
  606. int tout_msec, rc;
  607. u32 tmp;
  608. retry:
  609. /* Sometimes, DEV_RST is not enough to recover the controller.
  610. * This happens often after PM DMA CS errata.
  611. */
  612. if (pp->do_port_rst) {
  613. ata_port_warn(ap,
  614. "controller in dubious state, performing PORT_RST\n");
  615. writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
  616. ata_msleep(ap, 10);
  617. writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
  618. ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
  619. 10, 5000);
  620. /* restore port configuration */
  621. sil24_config_port(ap);
  622. sil24_config_pmp(ap, ap->nr_pmp_links);
  623. pp->do_port_rst = 0;
  624. did_port_rst = 1;
  625. }
  626. /* sil24 does the right thing(tm) without any protection */
  627. sata_set_spd(link);
  628. tout_msec = 100;
  629. if (ata_link_online(link))
  630. tout_msec = 5000;
  631. writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
  632. tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
  633. PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
  634. tout_msec);
  635. /* SStatus oscillates between zero and valid status after
  636. * DEV_RST, debounce it.
  637. */
  638. rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
  639. if (rc) {
  640. reason = "PHY debouncing failed";
  641. goto err;
  642. }
  643. if (tmp & PORT_CS_DEV_RST) {
  644. if (ata_link_offline(link))
  645. return 0;
  646. reason = "link not ready";
  647. goto err;
  648. }
  649. /* Sil24 doesn't store signature FIS after hardreset, so we
  650. * can't wait for BSY to clear. Some devices take a long time
  651. * to get ready and those devices will choke if we don't wait
  652. * for BSY clearance here. Tell libata to perform follow-up
  653. * softreset.
  654. */
  655. return -EAGAIN;
  656. err:
  657. if (!did_port_rst) {
  658. pp->do_port_rst = 1;
  659. goto retry;
  660. }
  661. ata_link_err(link, "hardreset failed (%s)\n", reason);
  662. return -EIO;
  663. }
  664. static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
  665. struct sil24_sge *sge)
  666. {
  667. struct scatterlist *sg;
  668. struct sil24_sge *last_sge = NULL;
  669. unsigned int si;
  670. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  671. sge->addr = cpu_to_le64(sg_dma_address(sg));
  672. sge->cnt = cpu_to_le32(sg_dma_len(sg));
  673. sge->flags = 0;
  674. last_sge = sge;
  675. sge++;
  676. }
  677. last_sge->flags = cpu_to_le32(SGE_TRM);
  678. }
  679. static int sil24_qc_defer(struct ata_queued_cmd *qc)
  680. {
  681. struct ata_link *link = qc->dev->link;
  682. struct ata_port *ap = link->ap;
  683. u8 prot = qc->tf.protocol;
  684. /*
  685. * There is a bug in the chip:
  686. * Port LRAM Causes the PRB/SGT Data to be Corrupted
  687. * If the host issues a read request for LRAM and SActive registers
  688. * while active commands are available in the port, PRB/SGT data in
  689. * the LRAM can become corrupted. This issue applies only when
  690. * reading from, but not writing to, the LRAM.
  691. *
  692. * Therefore, reading LRAM when there is no particular error [and
  693. * other commands may be outstanding] is prohibited.
  694. *
  695. * To avoid this bug there are two situations where a command must run
  696. * exclusive of any other commands on the port:
  697. *
  698. * - ATAPI commands which check the sense data
  699. * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
  700. * set.
  701. *
  702. */
  703. int is_excl = (ata_is_atapi(prot) ||
  704. (qc->flags & ATA_QCFLAG_RESULT_TF));
  705. if (unlikely(ap->excl_link)) {
  706. if (link == ap->excl_link) {
  707. if (ap->nr_active_links)
  708. return ATA_DEFER_PORT;
  709. qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
  710. } else
  711. return ATA_DEFER_PORT;
  712. } else if (unlikely(is_excl)) {
  713. ap->excl_link = link;
  714. if (ap->nr_active_links)
  715. return ATA_DEFER_PORT;
  716. qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
  717. }
  718. return ata_std_qc_defer(qc);
  719. }
  720. static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
  721. {
  722. struct ata_port *ap = qc->ap;
  723. struct sil24_port_priv *pp = ap->private_data;
  724. union sil24_cmd_block *cb;
  725. struct sil24_prb *prb;
  726. struct sil24_sge *sge;
  727. u16 ctrl = 0;
  728. cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
  729. if (!ata_is_atapi(qc->tf.protocol)) {
  730. prb = &cb->ata.prb;
  731. sge = cb->ata.sge;
  732. if (ata_is_data(qc->tf.protocol)) {
  733. u16 prot = 0;
  734. ctrl = PRB_CTRL_PROTOCOL;
  735. if (ata_is_ncq(qc->tf.protocol))
  736. prot |= PRB_PROT_NCQ;
  737. if (qc->tf.flags & ATA_TFLAG_WRITE)
  738. prot |= PRB_PROT_WRITE;
  739. else
  740. prot |= PRB_PROT_READ;
  741. prb->prot = cpu_to_le16(prot);
  742. }
  743. } else {
  744. prb = &cb->atapi.prb;
  745. sge = cb->atapi.sge;
  746. memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
  747. memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
  748. if (ata_is_data(qc->tf.protocol)) {
  749. if (qc->tf.flags & ATA_TFLAG_WRITE)
  750. ctrl = PRB_CTRL_PACKET_WRITE;
  751. else
  752. ctrl = PRB_CTRL_PACKET_READ;
  753. }
  754. }
  755. prb->ctrl = cpu_to_le16(ctrl);
  756. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
  757. if (qc->flags & ATA_QCFLAG_DMAMAP)
  758. sil24_fill_sg(qc, sge);
  759. return AC_ERR_OK;
  760. }
  761. static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
  762. {
  763. struct ata_port *ap = qc->ap;
  764. struct sil24_port_priv *pp = ap->private_data;
  765. void __iomem *port = sil24_port_base(ap);
  766. unsigned int tag = sil24_tag(qc->hw_tag);
  767. dma_addr_t paddr;
  768. void __iomem *activate;
  769. paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
  770. activate = port + PORT_CMD_ACTIVATE + tag * 8;
  771. /*
  772. * The barrier is required to ensure that writes to cmd_block reach
  773. * the memory before the write to PORT_CMD_ACTIVATE.
  774. */
  775. wmb();
  776. writel((u32)paddr, activate);
  777. writel((u64)paddr >> 32, activate + 4);
  778. return 0;
  779. }
  780. static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
  781. {
  782. sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
  783. }
  784. static void sil24_pmp_attach(struct ata_port *ap)
  785. {
  786. u32 *gscr = ap->link.device->gscr;
  787. sil24_config_pmp(ap, 1);
  788. sil24_init_port(ap);
  789. if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
  790. sata_pmp_gscr_devid(gscr) == 0x4140) {
  791. ata_port_info(ap,
  792. "disabling NCQ support due to sil24-mv4140 quirk\n");
  793. ap->flags &= ~ATA_FLAG_NCQ;
  794. }
  795. }
  796. static void sil24_pmp_detach(struct ata_port *ap)
  797. {
  798. sil24_init_port(ap);
  799. sil24_config_pmp(ap, 0);
  800. ap->flags |= ATA_FLAG_NCQ;
  801. }
  802. static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
  803. unsigned long deadline)
  804. {
  805. int rc;
  806. rc = sil24_init_port(link->ap);
  807. if (rc) {
  808. ata_link_err(link, "hardreset failed (port not ready)\n");
  809. return rc;
  810. }
  811. return sata_std_hardreset(link, class, deadline);
  812. }
  813. static void sil24_freeze(struct ata_port *ap)
  814. {
  815. void __iomem *port = sil24_port_base(ap);
  816. /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
  817. * PORT_IRQ_ENABLE instead.
  818. */
  819. writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
  820. }
  821. static void sil24_thaw(struct ata_port *ap)
  822. {
  823. void __iomem *port = sil24_port_base(ap);
  824. u32 tmp;
  825. /* clear IRQ */
  826. tmp = readl(port + PORT_IRQ_STAT);
  827. writel(tmp, port + PORT_IRQ_STAT);
  828. /* turn IRQ back on */
  829. writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
  830. }
  831. static void sil24_error_intr(struct ata_port *ap)
  832. {
  833. void __iomem *port = sil24_port_base(ap);
  834. struct sil24_port_priv *pp = ap->private_data;
  835. struct ata_queued_cmd *qc = NULL;
  836. struct ata_link *link;
  837. struct ata_eh_info *ehi;
  838. int abort = 0, freeze = 0;
  839. u32 irq_stat;
  840. /* on error, we need to clear IRQ explicitly */
  841. irq_stat = readl(port + PORT_IRQ_STAT);
  842. writel(irq_stat, port + PORT_IRQ_STAT);
  843. /* first, analyze and record host port events */
  844. link = &ap->link;
  845. ehi = &link->eh_info;
  846. ata_ehi_clear_desc(ehi);
  847. ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
  848. if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
  849. ata_ehi_push_desc(ehi, "SDB notify");
  850. sata_async_notification(ap);
  851. }
  852. if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
  853. ata_ehi_hotplugged(ehi);
  854. ata_ehi_push_desc(ehi, "%s",
  855. irq_stat & PORT_IRQ_PHYRDY_CHG ?
  856. "PHY RDY changed" : "device exchanged");
  857. freeze = 1;
  858. }
  859. if (irq_stat & PORT_IRQ_UNK_FIS) {
  860. ehi->err_mask |= AC_ERR_HSM;
  861. ehi->action |= ATA_EH_RESET;
  862. ata_ehi_push_desc(ehi, "unknown FIS");
  863. freeze = 1;
  864. }
  865. /* deal with command error */
  866. if (irq_stat & PORT_IRQ_ERROR) {
  867. const struct sil24_cerr_info *ci = NULL;
  868. unsigned int err_mask = 0, action = 0;
  869. u32 context, cerr;
  870. int pmp;
  871. abort = 1;
  872. /* DMA Context Switch Failure in Port Multiplier Mode
  873. * errata. If we have active commands to 3 or more
  874. * devices, any error condition on active devices can
  875. * corrupt DMA context switching.
  876. */
  877. if (ap->nr_active_links >= 3) {
  878. ehi->err_mask |= AC_ERR_OTHER;
  879. ehi->action |= ATA_EH_RESET;
  880. ata_ehi_push_desc(ehi, "PMP DMA CS errata");
  881. pp->do_port_rst = 1;
  882. freeze = 1;
  883. }
  884. /* find out the offending link and qc */
  885. if (sata_pmp_attached(ap)) {
  886. context = readl(port + PORT_CONTEXT);
  887. pmp = (context >> 5) & 0xf;
  888. if (pmp < ap->nr_pmp_links) {
  889. link = &ap->pmp_link[pmp];
  890. ehi = &link->eh_info;
  891. qc = ata_qc_from_tag(ap, link->active_tag);
  892. ata_ehi_clear_desc(ehi);
  893. ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
  894. irq_stat);
  895. } else {
  896. err_mask |= AC_ERR_HSM;
  897. action |= ATA_EH_RESET;
  898. freeze = 1;
  899. }
  900. } else
  901. qc = ata_qc_from_tag(ap, link->active_tag);
  902. /* analyze CMD_ERR */
  903. cerr = readl(port + PORT_CMD_ERR);
  904. if (cerr < ARRAY_SIZE(sil24_cerr_db))
  905. ci = &sil24_cerr_db[cerr];
  906. if (ci && ci->desc) {
  907. err_mask |= ci->err_mask;
  908. action |= ci->action;
  909. if (action & ATA_EH_RESET)
  910. freeze = 1;
  911. ata_ehi_push_desc(ehi, "%s", ci->desc);
  912. } else {
  913. err_mask |= AC_ERR_OTHER;
  914. action |= ATA_EH_RESET;
  915. freeze = 1;
  916. ata_ehi_push_desc(ehi, "unknown command error %d",
  917. cerr);
  918. }
  919. /* record error info */
  920. if (qc)
  921. qc->err_mask |= err_mask;
  922. else
  923. ehi->err_mask |= err_mask;
  924. ehi->action |= action;
  925. /* if PMP, resume */
  926. if (sata_pmp_attached(ap))
  927. writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
  928. }
  929. /* freeze or abort */
  930. if (freeze)
  931. ata_port_freeze(ap);
  932. else if (abort) {
  933. if (qc)
  934. ata_link_abort(qc->dev->link);
  935. else
  936. ata_port_abort(ap);
  937. }
  938. }
  939. static inline void sil24_host_intr(struct ata_port *ap)
  940. {
  941. void __iomem *port = sil24_port_base(ap);
  942. u32 slot_stat, qc_active;
  943. int rc;
  944. /* If PCIX_IRQ_WOC, there's an inherent race window between
  945. * clearing IRQ pending status and reading PORT_SLOT_STAT
  946. * which may cause spurious interrupts afterwards. This is
  947. * unavoidable and much better than losing interrupts which
  948. * happens if IRQ pending is cleared after reading
  949. * PORT_SLOT_STAT.
  950. */
  951. if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
  952. writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
  953. slot_stat = readl(port + PORT_SLOT_STAT);
  954. if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
  955. sil24_error_intr(ap);
  956. return;
  957. }
  958. qc_active = slot_stat & ~HOST_SSTAT_ATTN;
  959. rc = ata_qc_complete_multiple(ap, qc_active);
  960. if (rc > 0)
  961. return;
  962. if (rc < 0) {
  963. struct ata_eh_info *ehi = &ap->link.eh_info;
  964. ehi->err_mask |= AC_ERR_HSM;
  965. ehi->action |= ATA_EH_RESET;
  966. ata_port_freeze(ap);
  967. return;
  968. }
  969. /* spurious interrupts are expected if PCIX_IRQ_WOC */
  970. if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
  971. ata_port_info(ap,
  972. "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
  973. slot_stat, ap->link.active_tag, ap->link.sactive);
  974. }
  975. static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
  976. {
  977. struct ata_host *host = dev_instance;
  978. void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
  979. unsigned handled = 0;
  980. u32 status;
  981. int i;
  982. status = readl(host_base + HOST_IRQ_STAT);
  983. if (status == 0xffffffff) {
  984. dev_err(host->dev, "IRQ status == 0xffffffff, "
  985. "PCI fault or device removal?\n");
  986. goto out;
  987. }
  988. if (!(status & IRQ_STAT_4PORTS))
  989. goto out;
  990. spin_lock(&host->lock);
  991. for (i = 0; i < host->n_ports; i++)
  992. if (status & (1 << i)) {
  993. sil24_host_intr(host->ports[i]);
  994. handled++;
  995. }
  996. spin_unlock(&host->lock);
  997. out:
  998. return IRQ_RETVAL(handled);
  999. }
  1000. static void sil24_error_handler(struct ata_port *ap)
  1001. {
  1002. struct sil24_port_priv *pp = ap->private_data;
  1003. if (sil24_init_port(ap))
  1004. ata_eh_freeze_port(ap);
  1005. sata_pmp_error_handler(ap);
  1006. pp->do_port_rst = 0;
  1007. }
  1008. static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
  1009. {
  1010. struct ata_port *ap = qc->ap;
  1011. /* make DMA engine forget about the failed command */
  1012. if ((qc->flags & ATA_QCFLAG_EH) && sil24_init_port(ap))
  1013. ata_eh_freeze_port(ap);
  1014. }
  1015. static int sil24_port_start(struct ata_port *ap)
  1016. {
  1017. struct device *dev = ap->host->dev;
  1018. struct sil24_port_priv *pp;
  1019. union sil24_cmd_block *cb;
  1020. size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
  1021. dma_addr_t cb_dma;
  1022. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1023. if (!pp)
  1024. return -ENOMEM;
  1025. cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
  1026. if (!cb)
  1027. return -ENOMEM;
  1028. pp->cmd_block = cb;
  1029. pp->cmd_block_dma = cb_dma;
  1030. ap->private_data = pp;
  1031. ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
  1032. ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
  1033. return 0;
  1034. }
  1035. static void sil24_init_controller(struct ata_host *host)
  1036. {
  1037. void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
  1038. u32 tmp;
  1039. int i;
  1040. /* GPIO off */
  1041. writel(0, host_base + HOST_FLASH_CMD);
  1042. /* clear global reset & mask interrupts during initialization */
  1043. writel(0, host_base + HOST_CTRL);
  1044. /* init ports */
  1045. for (i = 0; i < host->n_ports; i++) {
  1046. struct ata_port *ap = host->ports[i];
  1047. void __iomem *port = sil24_port_base(ap);
  1048. /* Initial PHY setting */
  1049. writel(0x20c, port + PORT_PHY_CFG);
  1050. /* Clear port RST */
  1051. tmp = readl(port + PORT_CTRL_STAT);
  1052. if (tmp & PORT_CS_PORT_RST) {
  1053. writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
  1054. tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT,
  1055. PORT_CS_PORT_RST,
  1056. PORT_CS_PORT_RST, 10, 100);
  1057. if (tmp & PORT_CS_PORT_RST)
  1058. dev_err(host->dev,
  1059. "failed to clear port RST\n");
  1060. }
  1061. /* configure port */
  1062. sil24_config_port(ap);
  1063. }
  1064. /* Turn on interrupts */
  1065. writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
  1066. }
  1067. static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1068. {
  1069. extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
  1070. struct ata_port_info pi = sil24_port_info[ent->driver_data];
  1071. const struct ata_port_info *ppi[] = { &pi, NULL };
  1072. void __iomem * const *iomap;
  1073. struct ata_host *host;
  1074. int rc;
  1075. u32 tmp;
  1076. /* cause link error if sil24_cmd_block is sized wrongly */
  1077. if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
  1078. __MARKER__sil24_cmd_block_is_sized_wrongly = 1;
  1079. ata_print_version_once(&pdev->dev, DRV_VERSION);
  1080. /* acquire resources */
  1081. rc = pcim_enable_device(pdev);
  1082. if (rc)
  1083. return rc;
  1084. rc = pcim_iomap_regions(pdev,
  1085. (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
  1086. DRV_NAME);
  1087. if (rc)
  1088. return rc;
  1089. iomap = pcim_iomap_table(pdev);
  1090. /* apply workaround for completion IRQ loss on PCI-X errata */
  1091. if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
  1092. tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
  1093. if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
  1094. dev_info(&pdev->dev,
  1095. "Applying completion IRQ loss on PCI-X errata fix\n");
  1096. else
  1097. pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
  1098. }
  1099. /* allocate and fill host */
  1100. host = ata_host_alloc_pinfo(&pdev->dev, ppi,
  1101. SIL24_FLAG2NPORTS(ppi[0]->flags));
  1102. if (!host)
  1103. return -ENOMEM;
  1104. host->iomap = iomap;
  1105. /* configure and activate the device */
  1106. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1107. if (rc) {
  1108. dev_err(&pdev->dev, "DMA enable failed\n");
  1109. return rc;
  1110. }
  1111. /* Set max read request size to 4096. This slightly increases
  1112. * write throughput for pci-e variants.
  1113. */
  1114. pcie_set_readrq(pdev, 4096);
  1115. sil24_init_controller(host);
  1116. if (sata_sil24_msi && !pci_enable_msi(pdev)) {
  1117. dev_info(&pdev->dev, "Using MSI\n");
  1118. pci_intx(pdev, 0);
  1119. }
  1120. pci_set_master(pdev);
  1121. return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
  1122. &sil24_sht);
  1123. }
  1124. #ifdef CONFIG_PM_SLEEP
  1125. static int sil24_pci_device_resume(struct pci_dev *pdev)
  1126. {
  1127. struct ata_host *host = pci_get_drvdata(pdev);
  1128. void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
  1129. int rc;
  1130. rc = ata_pci_device_do_resume(pdev);
  1131. if (rc)
  1132. return rc;
  1133. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
  1134. writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
  1135. sil24_init_controller(host);
  1136. ata_host_resume(host);
  1137. return 0;
  1138. }
  1139. #endif
  1140. #ifdef CONFIG_PM
  1141. static int sil24_port_resume(struct ata_port *ap)
  1142. {
  1143. sil24_config_pmp(ap, ap->nr_pmp_links);
  1144. return 0;
  1145. }
  1146. #endif
  1147. module_pci_driver(sil24_pci_driver);
  1148. MODULE_AUTHOR("Tejun Heo");
  1149. MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
  1150. MODULE_LICENSE("GPL");
  1151. MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);