sh_mmcif.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MMCIF eMMC driver.
  4. *
  5. * Copyright (C) 2010 Renesas Solutions Corp.
  6. * Yusuke Goda <yusuke.goda.sx@renesas.com>
  7. */
  8. /*
  9. * The MMCIF driver is now processing MMC requests asynchronously, according
  10. * to the Linux MMC API requirement.
  11. *
  12. * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
  13. * data, and optional stop. To achieve asynchronous processing each of these
  14. * stages is split into two halves: a top and a bottom half. The top half
  15. * initialises the hardware, installs a timeout handler to handle completion
  16. * timeouts, and returns. In case of the command stage this immediately returns
  17. * control to the caller, leaving all further processing to run asynchronously.
  18. * All further request processing is performed by the bottom halves.
  19. *
  20. * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
  21. * thread, a DMA completion callback, if DMA is used, a timeout work, and
  22. * request- and stage-specific handler methods.
  23. *
  24. * Each bottom half run begins with either a hardware interrupt, a DMA callback
  25. * invocation, or a timeout work run. In case of an error or a successful
  26. * processing completion, the MMC core is informed and the request processing is
  27. * finished. In case processing has to continue, i.e., if data has to be read
  28. * from or written to the card, or if a stop command has to be sent, the next
  29. * top half is called, which performs the necessary hardware handling and
  30. * reschedules the timeout work. This returns the driver state machine into the
  31. * bottom half waiting state.
  32. */
  33. #include <linux/bitops.h>
  34. #include <linux/clk.h>
  35. #include <linux/completion.h>
  36. #include <linux/delay.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/mmc/card.h>
  40. #include <linux/mmc/core.h>
  41. #include <linux/mmc/host.h>
  42. #include <linux/mmc/mmc.h>
  43. #include <linux/mmc/sdio.h>
  44. #include <linux/mmc/slot-gpio.h>
  45. #include <linux/mod_devicetable.h>
  46. #include <linux/mutex.h>
  47. #include <linux/pagemap.h>
  48. #include <linux/platform_data/sh_mmcif.h>
  49. #include <linux/platform_device.h>
  50. #include <linux/pm_qos.h>
  51. #include <linux/pm_runtime.h>
  52. #include <linux/sh_dma.h>
  53. #include <linux/spinlock.h>
  54. #include <linux/module.h>
  55. #define DRIVER_NAME "sh_mmcif"
  56. /* CE_CMD_SET */
  57. #define CMD_MASK 0x3f000000
  58. #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
  59. #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
  60. #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
  61. #define CMD_SET_RBSY (1 << 21) /* R1b */
  62. #define CMD_SET_CCSEN (1 << 20)
  63. #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
  64. #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
  65. #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
  66. #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
  67. #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
  68. #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
  69. #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
  70. #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
  71. #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
  72. #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
  73. #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
  74. #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
  75. #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
  76. #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
  77. #define CMD_SET_CCSH (1 << 5)
  78. #define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
  79. #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
  80. #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
  81. #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
  82. /* CE_CMD_CTRL */
  83. #define CMD_CTRL_BREAK (1 << 0)
  84. /* CE_BLOCK_SET */
  85. #define BLOCK_SIZE_MASK 0x0000ffff
  86. /* CE_INT */
  87. #define INT_CCSDE (1 << 29)
  88. #define INT_CMD12DRE (1 << 26)
  89. #define INT_CMD12RBE (1 << 25)
  90. #define INT_CMD12CRE (1 << 24)
  91. #define INT_DTRANE (1 << 23)
  92. #define INT_BUFRE (1 << 22)
  93. #define INT_BUFWEN (1 << 21)
  94. #define INT_BUFREN (1 << 20)
  95. #define INT_CCSRCV (1 << 19)
  96. #define INT_RBSYE (1 << 17)
  97. #define INT_CRSPE (1 << 16)
  98. #define INT_CMDVIO (1 << 15)
  99. #define INT_BUFVIO (1 << 14)
  100. #define INT_WDATERR (1 << 11)
  101. #define INT_RDATERR (1 << 10)
  102. #define INT_RIDXERR (1 << 9)
  103. #define INT_RSPERR (1 << 8)
  104. #define INT_CCSTO (1 << 5)
  105. #define INT_CRCSTO (1 << 4)
  106. #define INT_WDATTO (1 << 3)
  107. #define INT_RDATTO (1 << 2)
  108. #define INT_RBSYTO (1 << 1)
  109. #define INT_RSPTO (1 << 0)
  110. #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
  111. INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
  112. INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
  113. INT_RDATTO | INT_RBSYTO | INT_RSPTO)
  114. #define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
  115. INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
  116. INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
  117. #define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
  118. /* CE_INT_MASK */
  119. #define MASK_ALL 0x00000000
  120. #define MASK_MCCSDE (1 << 29)
  121. #define MASK_MCMD12DRE (1 << 26)
  122. #define MASK_MCMD12RBE (1 << 25)
  123. #define MASK_MCMD12CRE (1 << 24)
  124. #define MASK_MDTRANE (1 << 23)
  125. #define MASK_MBUFRE (1 << 22)
  126. #define MASK_MBUFWEN (1 << 21)
  127. #define MASK_MBUFREN (1 << 20)
  128. #define MASK_MCCSRCV (1 << 19)
  129. #define MASK_MRBSYE (1 << 17)
  130. #define MASK_MCRSPE (1 << 16)
  131. #define MASK_MCMDVIO (1 << 15)
  132. #define MASK_MBUFVIO (1 << 14)
  133. #define MASK_MWDATERR (1 << 11)
  134. #define MASK_MRDATERR (1 << 10)
  135. #define MASK_MRIDXERR (1 << 9)
  136. #define MASK_MRSPERR (1 << 8)
  137. #define MASK_MCCSTO (1 << 5)
  138. #define MASK_MCRCSTO (1 << 4)
  139. #define MASK_MWDATTO (1 << 3)
  140. #define MASK_MRDATTO (1 << 2)
  141. #define MASK_MRBSYTO (1 << 1)
  142. #define MASK_MRSPTO (1 << 0)
  143. #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
  144. MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
  145. MASK_MCRCSTO | MASK_MWDATTO | \
  146. MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
  147. #define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
  148. MASK_MBUFREN | MASK_MBUFWEN | \
  149. MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
  150. MASK_MCMD12RBE | MASK_MCMD12CRE)
  151. /* CE_HOST_STS1 */
  152. #define STS1_CMDSEQ (1 << 31)
  153. /* CE_HOST_STS2 */
  154. #define STS2_CRCSTE (1 << 31)
  155. #define STS2_CRC16E (1 << 30)
  156. #define STS2_AC12CRCE (1 << 29)
  157. #define STS2_RSPCRC7E (1 << 28)
  158. #define STS2_CRCSTEBE (1 << 27)
  159. #define STS2_RDATEBE (1 << 26)
  160. #define STS2_AC12REBE (1 << 25)
  161. #define STS2_RSPEBE (1 << 24)
  162. #define STS2_AC12IDXE (1 << 23)
  163. #define STS2_RSPIDXE (1 << 22)
  164. #define STS2_CCSTO (1 << 15)
  165. #define STS2_RDATTO (1 << 14)
  166. #define STS2_DATBSYTO (1 << 13)
  167. #define STS2_CRCSTTO (1 << 12)
  168. #define STS2_AC12BSYTO (1 << 11)
  169. #define STS2_RSPBSYTO (1 << 10)
  170. #define STS2_AC12RSPTO (1 << 9)
  171. #define STS2_RSPTO (1 << 8)
  172. #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
  173. STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
  174. #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
  175. STS2_DATBSYTO | STS2_CRCSTTO | \
  176. STS2_AC12BSYTO | STS2_RSPBSYTO | \
  177. STS2_AC12RSPTO | STS2_RSPTO)
  178. #define CLKDEV_EMMC_DATA 52000000 /* 52 MHz */
  179. #define CLKDEV_MMC_DATA 20000000 /* 20 MHz */
  180. #define CLKDEV_INIT 400000 /* 400 kHz */
  181. enum sh_mmcif_state {
  182. STATE_IDLE,
  183. STATE_REQUEST,
  184. STATE_IOS,
  185. STATE_TIMEOUT,
  186. };
  187. enum sh_mmcif_wait_for {
  188. MMCIF_WAIT_FOR_REQUEST,
  189. MMCIF_WAIT_FOR_CMD,
  190. MMCIF_WAIT_FOR_MREAD,
  191. MMCIF_WAIT_FOR_MWRITE,
  192. MMCIF_WAIT_FOR_READ,
  193. MMCIF_WAIT_FOR_WRITE,
  194. MMCIF_WAIT_FOR_READ_END,
  195. MMCIF_WAIT_FOR_WRITE_END,
  196. MMCIF_WAIT_FOR_STOP,
  197. };
  198. /*
  199. * difference for each SoC
  200. */
  201. struct sh_mmcif_host {
  202. struct mmc_host *mmc;
  203. struct mmc_request *mrq;
  204. struct platform_device *pd;
  205. struct clk *clk;
  206. int bus_width;
  207. unsigned char timing;
  208. bool sd_error;
  209. bool dying;
  210. long timeout;
  211. void __iomem *addr;
  212. spinlock_t lock; /* protect sh_mmcif_host::state */
  213. enum sh_mmcif_state state;
  214. enum sh_mmcif_wait_for wait_for;
  215. struct delayed_work timeout_work;
  216. size_t blocksize;
  217. struct sg_mapping_iter sg_miter;
  218. bool power;
  219. bool ccs_enable; /* Command Completion Signal support */
  220. bool clk_ctrl2_enable;
  221. struct mutex thread_lock;
  222. u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */
  223. /* DMA support */
  224. struct dma_chan *chan_rx;
  225. struct dma_chan *chan_tx;
  226. struct completion dma_complete;
  227. bool dma_active;
  228. };
  229. static const struct of_device_id sh_mmcif_of_match[] = {
  230. { .compatible = "renesas,sh-mmcif" },
  231. { }
  232. };
  233. MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
  234. #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
  235. static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
  236. unsigned int reg, u32 val)
  237. {
  238. writel(val | readl(host->addr + reg), host->addr + reg);
  239. }
  240. static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
  241. unsigned int reg, u32 val)
  242. {
  243. writel(~val & readl(host->addr + reg), host->addr + reg);
  244. }
  245. static void sh_mmcif_dma_complete(void *arg)
  246. {
  247. struct sh_mmcif_host *host = arg;
  248. struct mmc_request *mrq = host->mrq;
  249. struct device *dev = sh_mmcif_host_to_dev(host);
  250. dev_dbg(dev, "Command completed\n");
  251. if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
  252. dev_name(dev)))
  253. return;
  254. complete(&host->dma_complete);
  255. }
  256. static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
  257. {
  258. struct mmc_data *data = host->mrq->data;
  259. struct scatterlist *sg = data->sg;
  260. struct dma_async_tx_descriptor *desc = NULL;
  261. struct dma_chan *chan = host->chan_rx;
  262. struct device *dev = sh_mmcif_host_to_dev(host);
  263. dma_cookie_t cookie = -EINVAL;
  264. int ret;
  265. ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
  266. DMA_FROM_DEVICE);
  267. if (ret > 0) {
  268. host->dma_active = true;
  269. desc = dmaengine_prep_slave_sg(chan, sg, ret,
  270. DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  271. }
  272. if (desc) {
  273. desc->callback = sh_mmcif_dma_complete;
  274. desc->callback_param = host;
  275. cookie = dmaengine_submit(desc);
  276. sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
  277. dma_async_issue_pending(chan);
  278. }
  279. dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
  280. __func__, data->sg_len, ret, cookie);
  281. if (!desc) {
  282. /* DMA failed, fall back to PIO */
  283. if (ret >= 0)
  284. ret = -EIO;
  285. host->chan_rx = NULL;
  286. host->dma_active = false;
  287. dma_release_channel(chan);
  288. /* Free the Tx channel too */
  289. chan = host->chan_tx;
  290. if (chan) {
  291. host->chan_tx = NULL;
  292. dma_release_channel(chan);
  293. }
  294. dev_warn(dev,
  295. "DMA failed: %d, falling back to PIO\n", ret);
  296. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  297. }
  298. dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
  299. desc, cookie, data->sg_len);
  300. }
  301. static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
  302. {
  303. struct mmc_data *data = host->mrq->data;
  304. struct scatterlist *sg = data->sg;
  305. struct dma_async_tx_descriptor *desc = NULL;
  306. struct dma_chan *chan = host->chan_tx;
  307. struct device *dev = sh_mmcif_host_to_dev(host);
  308. dma_cookie_t cookie = -EINVAL;
  309. int ret;
  310. ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
  311. DMA_TO_DEVICE);
  312. if (ret > 0) {
  313. host->dma_active = true;
  314. desc = dmaengine_prep_slave_sg(chan, sg, ret,
  315. DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  316. }
  317. if (desc) {
  318. desc->callback = sh_mmcif_dma_complete;
  319. desc->callback_param = host;
  320. cookie = dmaengine_submit(desc);
  321. sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
  322. dma_async_issue_pending(chan);
  323. }
  324. dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
  325. __func__, data->sg_len, ret, cookie);
  326. if (!desc) {
  327. /* DMA failed, fall back to PIO */
  328. if (ret >= 0)
  329. ret = -EIO;
  330. host->chan_tx = NULL;
  331. host->dma_active = false;
  332. dma_release_channel(chan);
  333. /* Free the Rx channel too */
  334. chan = host->chan_rx;
  335. if (chan) {
  336. host->chan_rx = NULL;
  337. dma_release_channel(chan);
  338. }
  339. dev_warn(dev,
  340. "DMA failed: %d, falling back to PIO\n", ret);
  341. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  342. }
  343. dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
  344. desc, cookie);
  345. }
  346. static struct dma_chan *
  347. sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
  348. {
  349. dma_cap_mask_t mask;
  350. dma_cap_zero(mask);
  351. dma_cap_set(DMA_SLAVE, mask);
  352. if (slave_id <= 0)
  353. return NULL;
  354. return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
  355. }
  356. static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
  357. struct dma_chan *chan,
  358. enum dma_transfer_direction direction)
  359. {
  360. struct resource *res;
  361. struct dma_slave_config cfg = { 0, };
  362. res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
  363. if (!res)
  364. return -EINVAL;
  365. cfg.direction = direction;
  366. if (direction == DMA_DEV_TO_MEM) {
  367. cfg.src_addr = res->start + MMCIF_CE_DATA;
  368. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  369. } else {
  370. cfg.dst_addr = res->start + MMCIF_CE_DATA;
  371. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  372. }
  373. return dmaengine_slave_config(chan, &cfg);
  374. }
  375. static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
  376. {
  377. struct device *dev = sh_mmcif_host_to_dev(host);
  378. host->dma_active = false;
  379. /* We can only either use DMA for both Tx and Rx or not use it at all */
  380. if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
  381. struct sh_mmcif_plat_data *pdata = dev->platform_data;
  382. host->chan_tx = sh_mmcif_request_dma_pdata(host,
  383. pdata->slave_id_tx);
  384. host->chan_rx = sh_mmcif_request_dma_pdata(host,
  385. pdata->slave_id_rx);
  386. } else {
  387. host->chan_tx = dma_request_chan(dev, "tx");
  388. if (IS_ERR(host->chan_tx))
  389. host->chan_tx = NULL;
  390. host->chan_rx = dma_request_chan(dev, "rx");
  391. if (IS_ERR(host->chan_rx))
  392. host->chan_rx = NULL;
  393. }
  394. dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
  395. host->chan_rx);
  396. if (!host->chan_tx || !host->chan_rx ||
  397. sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
  398. sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
  399. goto error;
  400. return;
  401. error:
  402. if (host->chan_tx)
  403. dma_release_channel(host->chan_tx);
  404. if (host->chan_rx)
  405. dma_release_channel(host->chan_rx);
  406. host->chan_tx = host->chan_rx = NULL;
  407. }
  408. static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
  409. {
  410. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  411. /* Descriptors are freed automatically */
  412. if (host->chan_tx) {
  413. struct dma_chan *chan = host->chan_tx;
  414. host->chan_tx = NULL;
  415. dma_release_channel(chan);
  416. }
  417. if (host->chan_rx) {
  418. struct dma_chan *chan = host->chan_rx;
  419. host->chan_rx = NULL;
  420. dma_release_channel(chan);
  421. }
  422. host->dma_active = false;
  423. }
  424. static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
  425. {
  426. struct device *dev = sh_mmcif_host_to_dev(host);
  427. struct sh_mmcif_plat_data *p = dev->platform_data;
  428. bool sup_pclk = p ? p->sup_pclk : false;
  429. unsigned int current_clk = clk_get_rate(host->clk);
  430. unsigned int clkdiv;
  431. sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
  432. sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
  433. if (!clk)
  434. return;
  435. if (host->clkdiv_map) {
  436. unsigned int freq, best_freq, myclk, div, diff_min, diff;
  437. int i;
  438. clkdiv = 0;
  439. diff_min = ~0;
  440. best_freq = 0;
  441. for (i = 31; i >= 0; i--) {
  442. if (!((1 << i) & host->clkdiv_map))
  443. continue;
  444. /*
  445. * clk = parent_freq / div
  446. * -> parent_freq = clk x div
  447. */
  448. div = 1 << (i + 1);
  449. freq = clk_round_rate(host->clk, clk * div);
  450. myclk = freq / div;
  451. diff = (myclk > clk) ? myclk - clk : clk - myclk;
  452. if (diff <= diff_min) {
  453. best_freq = freq;
  454. clkdiv = i;
  455. diff_min = diff;
  456. }
  457. }
  458. dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
  459. (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
  460. clk_set_rate(host->clk, best_freq);
  461. clkdiv = clkdiv << 16;
  462. } else if (sup_pclk && clk == current_clk) {
  463. clkdiv = CLK_SUP_PCLK;
  464. } else {
  465. clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
  466. }
  467. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
  468. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
  469. }
  470. static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
  471. {
  472. u32 tmp;
  473. tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
  474. sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
  475. sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
  476. if (host->ccs_enable)
  477. tmp |= SCCSTO_29;
  478. if (host->clk_ctrl2_enable)
  479. sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
  480. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
  481. SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
  482. /* byte swap on */
  483. sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
  484. }
  485. static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
  486. {
  487. struct device *dev = sh_mmcif_host_to_dev(host);
  488. u32 state1, state2;
  489. int ret, timeout;
  490. host->sd_error = false;
  491. state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
  492. state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
  493. dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
  494. dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
  495. if (state1 & STS1_CMDSEQ) {
  496. sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
  497. sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
  498. for (timeout = 10000; timeout; timeout--) {
  499. if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
  500. & STS1_CMDSEQ))
  501. break;
  502. mdelay(1);
  503. }
  504. if (!timeout) {
  505. dev_err(dev,
  506. "Forced end of command sequence timeout err\n");
  507. return -EIO;
  508. }
  509. sh_mmcif_sync_reset(host);
  510. dev_dbg(dev, "Forced end of command sequence\n");
  511. return -EIO;
  512. }
  513. if (state2 & STS2_CRC_ERR) {
  514. dev_err(dev, " CRC error: state %u, wait %u\n",
  515. host->state, host->wait_for);
  516. ret = -EIO;
  517. } else if (state2 & STS2_TIMEOUT_ERR) {
  518. dev_err(dev, " Timeout: state %u, wait %u\n",
  519. host->state, host->wait_for);
  520. ret = -ETIMEDOUT;
  521. } else {
  522. dev_dbg(dev, " End/Index error: state %u, wait %u\n",
  523. host->state, host->wait_for);
  524. ret = -EIO;
  525. }
  526. return ret;
  527. }
  528. static void sh_mmcif_single_read(struct sh_mmcif_host *host,
  529. struct mmc_request *mrq)
  530. {
  531. struct mmc_data *data = mrq->data;
  532. host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
  533. BLOCK_SIZE_MASK) + 3;
  534. sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
  535. SG_MITER_TO_SG);
  536. host->wait_for = MMCIF_WAIT_FOR_READ;
  537. /* buf read enable */
  538. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
  539. }
  540. static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
  541. {
  542. struct sg_mapping_iter *sgm = &host->sg_miter;
  543. struct device *dev = sh_mmcif_host_to_dev(host);
  544. struct mmc_data *data = host->mrq->data;
  545. u32 *p;
  546. int i;
  547. if (host->sd_error) {
  548. sg_miter_stop(sgm);
  549. data->error = sh_mmcif_error_manage(host);
  550. dev_dbg(dev, "%s(): %d\n", __func__, data->error);
  551. return false;
  552. }
  553. if (!sg_miter_next(sgm)) {
  554. /* This should not happen on single blocks */
  555. sg_miter_stop(sgm);
  556. return false;
  557. }
  558. p = sgm->addr;
  559. for (i = 0; i < host->blocksize / 4; i++)
  560. *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
  561. sg_miter_stop(&host->sg_miter);
  562. /* buffer read end */
  563. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
  564. host->wait_for = MMCIF_WAIT_FOR_READ_END;
  565. return true;
  566. }
  567. static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
  568. struct mmc_request *mrq)
  569. {
  570. struct sg_mapping_iter *sgm = &host->sg_miter;
  571. struct mmc_data *data = mrq->data;
  572. if (!data->sg_len || !data->sg->length)
  573. return;
  574. host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
  575. BLOCK_SIZE_MASK;
  576. sg_miter_start(sgm, data->sg, data->sg_len,
  577. SG_MITER_TO_SG);
  578. /* Advance to the first sglist entry */
  579. if (!sg_miter_next(sgm)) {
  580. sg_miter_stop(sgm);
  581. return;
  582. }
  583. host->wait_for = MMCIF_WAIT_FOR_MREAD;
  584. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
  585. }
  586. static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
  587. {
  588. struct sg_mapping_iter *sgm = &host->sg_miter;
  589. struct device *dev = sh_mmcif_host_to_dev(host);
  590. struct mmc_data *data = host->mrq->data;
  591. u32 *p;
  592. int i;
  593. if (host->sd_error) {
  594. sg_miter_stop(sgm);
  595. data->error = sh_mmcif_error_manage(host);
  596. dev_dbg(dev, "%s(): %d\n", __func__, data->error);
  597. return false;
  598. }
  599. p = sgm->addr;
  600. for (i = 0; i < host->blocksize / 4; i++)
  601. *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
  602. sgm->consumed = host->blocksize;
  603. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
  604. if (!sg_miter_next(sgm)) {
  605. sg_miter_stop(sgm);
  606. return false;
  607. }
  608. return true;
  609. }
  610. static void sh_mmcif_single_write(struct sh_mmcif_host *host,
  611. struct mmc_request *mrq)
  612. {
  613. struct mmc_data *data = mrq->data;
  614. host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
  615. BLOCK_SIZE_MASK) + 3;
  616. sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
  617. SG_MITER_FROM_SG);
  618. host->wait_for = MMCIF_WAIT_FOR_WRITE;
  619. /* buf write enable */
  620. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
  621. }
  622. static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
  623. {
  624. struct sg_mapping_iter *sgm = &host->sg_miter;
  625. struct device *dev = sh_mmcif_host_to_dev(host);
  626. struct mmc_data *data = host->mrq->data;
  627. u32 *p;
  628. int i;
  629. if (host->sd_error) {
  630. sg_miter_stop(sgm);
  631. data->error = sh_mmcif_error_manage(host);
  632. dev_dbg(dev, "%s(): %d\n", __func__, data->error);
  633. return false;
  634. }
  635. if (!sg_miter_next(sgm)) {
  636. /* This should not happen on single blocks */
  637. sg_miter_stop(sgm);
  638. return false;
  639. }
  640. p = sgm->addr;
  641. for (i = 0; i < host->blocksize / 4; i++)
  642. sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
  643. sg_miter_stop(&host->sg_miter);
  644. /* buffer write end */
  645. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
  646. host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
  647. return true;
  648. }
  649. static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
  650. struct mmc_request *mrq)
  651. {
  652. struct sg_mapping_iter *sgm = &host->sg_miter;
  653. struct mmc_data *data = mrq->data;
  654. if (!data->sg_len || !data->sg->length)
  655. return;
  656. host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
  657. BLOCK_SIZE_MASK;
  658. sg_miter_start(sgm, data->sg, data->sg_len,
  659. SG_MITER_FROM_SG);
  660. /* Advance to the first sglist entry */
  661. if (!sg_miter_next(sgm)) {
  662. sg_miter_stop(sgm);
  663. return;
  664. }
  665. host->wait_for = MMCIF_WAIT_FOR_MWRITE;
  666. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
  667. }
  668. static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
  669. {
  670. struct sg_mapping_iter *sgm = &host->sg_miter;
  671. struct device *dev = sh_mmcif_host_to_dev(host);
  672. struct mmc_data *data = host->mrq->data;
  673. u32 *p;
  674. int i;
  675. if (host->sd_error) {
  676. sg_miter_stop(sgm);
  677. data->error = sh_mmcif_error_manage(host);
  678. dev_dbg(dev, "%s(): %d\n", __func__, data->error);
  679. return false;
  680. }
  681. p = sgm->addr;
  682. for (i = 0; i < host->blocksize / 4; i++)
  683. sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
  684. sgm->consumed = host->blocksize;
  685. if (!sg_miter_next(sgm)) {
  686. sg_miter_stop(sgm);
  687. return false;
  688. }
  689. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
  690. return true;
  691. }
  692. static void sh_mmcif_get_response(struct sh_mmcif_host *host,
  693. struct mmc_command *cmd)
  694. {
  695. if (cmd->flags & MMC_RSP_136) {
  696. cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
  697. cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
  698. cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
  699. cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
  700. } else
  701. cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
  702. }
  703. static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
  704. struct mmc_command *cmd)
  705. {
  706. cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
  707. }
  708. static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
  709. struct mmc_request *mrq)
  710. {
  711. struct device *dev = sh_mmcif_host_to_dev(host);
  712. struct mmc_data *data = mrq->data;
  713. struct mmc_command *cmd = mrq->cmd;
  714. u32 opc = cmd->opcode;
  715. u32 tmp = 0;
  716. /* Response Type check */
  717. switch (mmc_resp_type(cmd)) {
  718. case MMC_RSP_NONE:
  719. tmp |= CMD_SET_RTYP_NO;
  720. break;
  721. case MMC_RSP_R1:
  722. case MMC_RSP_R3:
  723. tmp |= CMD_SET_RTYP_6B;
  724. break;
  725. case MMC_RSP_R1B:
  726. tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
  727. break;
  728. case MMC_RSP_R2:
  729. tmp |= CMD_SET_RTYP_17B;
  730. break;
  731. default:
  732. dev_err(dev, "Unsupported response type.\n");
  733. break;
  734. }
  735. /* WDAT / DATW */
  736. if (data) {
  737. tmp |= CMD_SET_WDAT;
  738. switch (host->bus_width) {
  739. case MMC_BUS_WIDTH_1:
  740. tmp |= CMD_SET_DATW_1;
  741. break;
  742. case MMC_BUS_WIDTH_4:
  743. tmp |= CMD_SET_DATW_4;
  744. break;
  745. case MMC_BUS_WIDTH_8:
  746. tmp |= CMD_SET_DATW_8;
  747. break;
  748. default:
  749. dev_err(dev, "Unsupported bus width.\n");
  750. break;
  751. }
  752. switch (host->timing) {
  753. case MMC_TIMING_MMC_DDR52:
  754. /*
  755. * MMC core will only set this timing, if the host
  756. * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
  757. * capability. MMCIF implementations with this
  758. * capability, e.g. sh73a0, will have to set it
  759. * in their platform data.
  760. */
  761. tmp |= CMD_SET_DARS;
  762. break;
  763. }
  764. }
  765. /* DWEN */
  766. if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
  767. tmp |= CMD_SET_DWEN;
  768. /* CMLTE/CMD12EN */
  769. if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
  770. tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
  771. sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
  772. data->blocks << 16);
  773. }
  774. /* RIDXC[1:0] check bits */
  775. if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
  776. opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
  777. tmp |= CMD_SET_RIDXC_BITS;
  778. /* RCRC7C[1:0] check bits */
  779. if (opc == MMC_SEND_OP_COND)
  780. tmp |= CMD_SET_CRC7C_BITS;
  781. /* RCRC7C[1:0] internal CRC7 */
  782. if (opc == MMC_ALL_SEND_CID ||
  783. opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
  784. tmp |= CMD_SET_CRC7C_INTERNAL;
  785. return (opc << 24) | tmp;
  786. }
  787. static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
  788. struct mmc_request *mrq, u32 opc)
  789. {
  790. struct device *dev = sh_mmcif_host_to_dev(host);
  791. switch (opc) {
  792. case MMC_READ_MULTIPLE_BLOCK:
  793. sh_mmcif_multi_read(host, mrq);
  794. return 0;
  795. case MMC_WRITE_MULTIPLE_BLOCK:
  796. sh_mmcif_multi_write(host, mrq);
  797. return 0;
  798. case MMC_WRITE_BLOCK:
  799. sh_mmcif_single_write(host, mrq);
  800. return 0;
  801. case MMC_READ_SINGLE_BLOCK:
  802. case MMC_SEND_EXT_CSD:
  803. sh_mmcif_single_read(host, mrq);
  804. return 0;
  805. default:
  806. dev_err(dev, "Unsupported CMD%d\n", opc);
  807. return -EINVAL;
  808. }
  809. }
  810. static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
  811. struct mmc_request *mrq)
  812. {
  813. struct mmc_command *cmd = mrq->cmd;
  814. u32 opc;
  815. u32 mask = 0;
  816. unsigned long flags;
  817. if (cmd->flags & MMC_RSP_BUSY)
  818. mask = MASK_START_CMD | MASK_MRBSYE;
  819. else
  820. mask = MASK_START_CMD | MASK_MCRSPE;
  821. if (host->ccs_enable)
  822. mask |= MASK_MCCSTO;
  823. if (mrq->data) {
  824. sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
  825. sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
  826. mrq->data->blksz);
  827. }
  828. opc = sh_mmcif_set_cmd(host, mrq);
  829. if (host->ccs_enable)
  830. sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
  831. else
  832. sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
  833. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
  834. /* set arg */
  835. sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
  836. /* set cmd */
  837. spin_lock_irqsave(&host->lock, flags);
  838. sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
  839. host->wait_for = MMCIF_WAIT_FOR_CMD;
  840. schedule_delayed_work(&host->timeout_work, host->timeout);
  841. spin_unlock_irqrestore(&host->lock, flags);
  842. }
  843. static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
  844. struct mmc_request *mrq)
  845. {
  846. struct device *dev = sh_mmcif_host_to_dev(host);
  847. switch (mrq->cmd->opcode) {
  848. case MMC_READ_MULTIPLE_BLOCK:
  849. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
  850. break;
  851. case MMC_WRITE_MULTIPLE_BLOCK:
  852. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
  853. break;
  854. default:
  855. dev_err(dev, "unsupported stop cmd\n");
  856. mrq->stop->error = sh_mmcif_error_manage(host);
  857. return;
  858. }
  859. host->wait_for = MMCIF_WAIT_FOR_STOP;
  860. }
  861. static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
  862. {
  863. struct sh_mmcif_host *host = mmc_priv(mmc);
  864. struct device *dev = sh_mmcif_host_to_dev(host);
  865. unsigned long flags;
  866. spin_lock_irqsave(&host->lock, flags);
  867. if (host->state != STATE_IDLE) {
  868. dev_dbg(dev, "%s() rejected, state %u\n",
  869. __func__, host->state);
  870. spin_unlock_irqrestore(&host->lock, flags);
  871. mrq->cmd->error = -EAGAIN;
  872. mmc_request_done(mmc, mrq);
  873. return;
  874. }
  875. host->state = STATE_REQUEST;
  876. spin_unlock_irqrestore(&host->lock, flags);
  877. host->mrq = mrq;
  878. sh_mmcif_start_cmd(host, mrq);
  879. }
  880. static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
  881. {
  882. struct device *dev = sh_mmcif_host_to_dev(host);
  883. if (host->mmc->f_max) {
  884. unsigned int f_max, f_min = 0, f_min_old;
  885. f_max = host->mmc->f_max;
  886. for (f_min_old = f_max; f_min_old > 2;) {
  887. f_min = clk_round_rate(host->clk, f_min_old / 2);
  888. if (f_min == f_min_old)
  889. break;
  890. f_min_old = f_min;
  891. }
  892. /*
  893. * This driver assumes this SoC is R-Car Gen2 or later
  894. */
  895. host->clkdiv_map = 0x3ff;
  896. host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
  897. host->mmc->f_min = f_min >> fls(host->clkdiv_map);
  898. } else {
  899. unsigned int clk = clk_get_rate(host->clk);
  900. host->mmc->f_max = clk / 2;
  901. host->mmc->f_min = clk / 512;
  902. }
  903. dev_dbg(dev, "clk max/min = %d/%d\n",
  904. host->mmc->f_max, host->mmc->f_min);
  905. }
  906. static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  907. {
  908. struct sh_mmcif_host *host = mmc_priv(mmc);
  909. struct device *dev = sh_mmcif_host_to_dev(host);
  910. unsigned long flags;
  911. spin_lock_irqsave(&host->lock, flags);
  912. if (host->state != STATE_IDLE) {
  913. dev_dbg(dev, "%s() rejected, state %u\n",
  914. __func__, host->state);
  915. spin_unlock_irqrestore(&host->lock, flags);
  916. return;
  917. }
  918. host->state = STATE_IOS;
  919. spin_unlock_irqrestore(&host->lock, flags);
  920. switch (ios->power_mode) {
  921. case MMC_POWER_UP:
  922. if (!IS_ERR(mmc->supply.vmmc))
  923. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  924. if (!host->power) {
  925. clk_prepare_enable(host->clk);
  926. pm_runtime_get_sync(dev);
  927. sh_mmcif_sync_reset(host);
  928. sh_mmcif_request_dma(host);
  929. host->power = true;
  930. }
  931. break;
  932. case MMC_POWER_OFF:
  933. if (!IS_ERR(mmc->supply.vmmc))
  934. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  935. if (host->power) {
  936. sh_mmcif_clock_control(host, 0);
  937. sh_mmcif_release_dma(host);
  938. pm_runtime_put(dev);
  939. clk_disable_unprepare(host->clk);
  940. host->power = false;
  941. }
  942. break;
  943. case MMC_POWER_ON:
  944. sh_mmcif_clock_control(host, ios->clock);
  945. break;
  946. }
  947. host->timing = ios->timing;
  948. host->bus_width = ios->bus_width;
  949. host->state = STATE_IDLE;
  950. }
  951. static const struct mmc_host_ops sh_mmcif_ops = {
  952. .request = sh_mmcif_request,
  953. .set_ios = sh_mmcif_set_ios,
  954. .get_cd = mmc_gpio_get_cd,
  955. };
  956. static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
  957. {
  958. struct mmc_command *cmd = host->mrq->cmd;
  959. struct mmc_data *data = host->mrq->data;
  960. struct device *dev = sh_mmcif_host_to_dev(host);
  961. long time;
  962. if (host->sd_error) {
  963. switch (cmd->opcode) {
  964. case MMC_ALL_SEND_CID:
  965. case MMC_SELECT_CARD:
  966. case MMC_APP_CMD:
  967. cmd->error = -ETIMEDOUT;
  968. break;
  969. default:
  970. cmd->error = sh_mmcif_error_manage(host);
  971. break;
  972. }
  973. dev_dbg(dev, "CMD%d error %d\n",
  974. cmd->opcode, cmd->error);
  975. host->sd_error = false;
  976. return false;
  977. }
  978. if (!(cmd->flags & MMC_RSP_PRESENT)) {
  979. cmd->error = 0;
  980. return false;
  981. }
  982. sh_mmcif_get_response(host, cmd);
  983. if (!data)
  984. return false;
  985. /*
  986. * Completion can be signalled from DMA callback and error, so, have to
  987. * reset here, before setting .dma_active
  988. */
  989. init_completion(&host->dma_complete);
  990. if (data->flags & MMC_DATA_READ) {
  991. if (host->chan_rx)
  992. sh_mmcif_start_dma_rx(host);
  993. } else {
  994. if (host->chan_tx)
  995. sh_mmcif_start_dma_tx(host);
  996. }
  997. if (!host->dma_active) {
  998. data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
  999. return !data->error;
  1000. }
  1001. /* Running in the IRQ thread, can sleep */
  1002. time = wait_for_completion_interruptible_timeout(&host->dma_complete,
  1003. host->timeout);
  1004. if (data->flags & MMC_DATA_READ)
  1005. dma_unmap_sg(host->chan_rx->device->dev,
  1006. data->sg, data->sg_len,
  1007. DMA_FROM_DEVICE);
  1008. else
  1009. dma_unmap_sg(host->chan_tx->device->dev,
  1010. data->sg, data->sg_len,
  1011. DMA_TO_DEVICE);
  1012. if (host->sd_error) {
  1013. dev_err(host->mmc->parent,
  1014. "Error IRQ while waiting for DMA completion!\n");
  1015. /* Woken up by an error IRQ: abort DMA */
  1016. data->error = sh_mmcif_error_manage(host);
  1017. } else if (!time) {
  1018. dev_err(host->mmc->parent, "DMA timeout!\n");
  1019. data->error = -ETIMEDOUT;
  1020. } else if (time < 0) {
  1021. dev_err(host->mmc->parent,
  1022. "wait_for_completion_...() error %ld!\n", time);
  1023. data->error = time;
  1024. }
  1025. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
  1026. BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  1027. host->dma_active = false;
  1028. if (data->error) {
  1029. data->bytes_xfered = 0;
  1030. /* Abort DMA */
  1031. if (data->flags & MMC_DATA_READ)
  1032. dmaengine_terminate_sync(host->chan_rx);
  1033. else
  1034. dmaengine_terminate_sync(host->chan_tx);
  1035. }
  1036. return false;
  1037. }
  1038. static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
  1039. {
  1040. struct sh_mmcif_host *host = dev_id;
  1041. struct mmc_request *mrq;
  1042. struct device *dev = sh_mmcif_host_to_dev(host);
  1043. bool wait = false;
  1044. unsigned long flags;
  1045. int wait_work;
  1046. spin_lock_irqsave(&host->lock, flags);
  1047. wait_work = host->wait_for;
  1048. spin_unlock_irqrestore(&host->lock, flags);
  1049. cancel_delayed_work_sync(&host->timeout_work);
  1050. mutex_lock(&host->thread_lock);
  1051. mrq = host->mrq;
  1052. if (!mrq) {
  1053. dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
  1054. host->state, host->wait_for);
  1055. mutex_unlock(&host->thread_lock);
  1056. return IRQ_HANDLED;
  1057. }
  1058. /*
  1059. * All handlers return true, if processing continues, and false, if the
  1060. * request has to be completed - successfully or not
  1061. */
  1062. switch (wait_work) {
  1063. case MMCIF_WAIT_FOR_REQUEST:
  1064. /* We're too late, the timeout has already kicked in */
  1065. mutex_unlock(&host->thread_lock);
  1066. return IRQ_HANDLED;
  1067. case MMCIF_WAIT_FOR_CMD:
  1068. /* Wait for data? */
  1069. wait = sh_mmcif_end_cmd(host);
  1070. break;
  1071. case MMCIF_WAIT_FOR_MREAD:
  1072. /* Wait for more data? */
  1073. wait = sh_mmcif_mread_block(host);
  1074. break;
  1075. case MMCIF_WAIT_FOR_READ:
  1076. /* Wait for data end? */
  1077. wait = sh_mmcif_read_block(host);
  1078. break;
  1079. case MMCIF_WAIT_FOR_MWRITE:
  1080. /* Wait data to write? */
  1081. wait = sh_mmcif_mwrite_block(host);
  1082. break;
  1083. case MMCIF_WAIT_FOR_WRITE:
  1084. /* Wait for data end? */
  1085. wait = sh_mmcif_write_block(host);
  1086. break;
  1087. case MMCIF_WAIT_FOR_STOP:
  1088. if (host->sd_error) {
  1089. mrq->stop->error = sh_mmcif_error_manage(host);
  1090. dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
  1091. break;
  1092. }
  1093. sh_mmcif_get_cmd12response(host, mrq->stop);
  1094. mrq->stop->error = 0;
  1095. break;
  1096. case MMCIF_WAIT_FOR_READ_END:
  1097. case MMCIF_WAIT_FOR_WRITE_END:
  1098. if (host->sd_error) {
  1099. mrq->data->error = sh_mmcif_error_manage(host);
  1100. dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
  1101. }
  1102. break;
  1103. default:
  1104. BUG();
  1105. }
  1106. if (wait) {
  1107. schedule_delayed_work(&host->timeout_work, host->timeout);
  1108. /* Wait for more data */
  1109. mutex_unlock(&host->thread_lock);
  1110. return IRQ_HANDLED;
  1111. }
  1112. if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
  1113. struct mmc_data *data = mrq->data;
  1114. if (!mrq->cmd->error && data && !data->error)
  1115. data->bytes_xfered =
  1116. data->blocks * data->blksz;
  1117. if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
  1118. sh_mmcif_stop_cmd(host, mrq);
  1119. if (!mrq->stop->error) {
  1120. schedule_delayed_work(&host->timeout_work, host->timeout);
  1121. mutex_unlock(&host->thread_lock);
  1122. return IRQ_HANDLED;
  1123. }
  1124. }
  1125. }
  1126. host->wait_for = MMCIF_WAIT_FOR_REQUEST;
  1127. host->state = STATE_IDLE;
  1128. host->mrq = NULL;
  1129. mmc_request_done(host->mmc, mrq);
  1130. mutex_unlock(&host->thread_lock);
  1131. return IRQ_HANDLED;
  1132. }
  1133. static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
  1134. {
  1135. struct sh_mmcif_host *host = dev_id;
  1136. struct device *dev = sh_mmcif_host_to_dev(host);
  1137. u32 state, mask;
  1138. state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
  1139. mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
  1140. if (host->ccs_enable)
  1141. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
  1142. else
  1143. sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
  1144. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
  1145. if (state & ~MASK_CLEAN)
  1146. dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
  1147. state);
  1148. if (state & INT_ERR_STS || state & ~INT_ALL) {
  1149. host->sd_error = true;
  1150. dev_dbg(dev, "int err state = 0x%08x\n", state);
  1151. }
  1152. if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
  1153. if (!host->mrq)
  1154. dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
  1155. if (!host->dma_active)
  1156. return IRQ_WAKE_THREAD;
  1157. else if (host->sd_error)
  1158. sh_mmcif_dma_complete(host);
  1159. } else {
  1160. dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
  1161. }
  1162. return IRQ_HANDLED;
  1163. }
  1164. static void sh_mmcif_timeout_work(struct work_struct *work)
  1165. {
  1166. struct delayed_work *d = to_delayed_work(work);
  1167. struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
  1168. struct mmc_request *mrq = host->mrq;
  1169. struct device *dev = sh_mmcif_host_to_dev(host);
  1170. unsigned long flags;
  1171. if (host->dying)
  1172. /* Don't run after mmc_remove_host() */
  1173. return;
  1174. spin_lock_irqsave(&host->lock, flags);
  1175. if (host->state == STATE_IDLE) {
  1176. spin_unlock_irqrestore(&host->lock, flags);
  1177. return;
  1178. }
  1179. dev_err(dev, "Timeout waiting for %u on CMD%u\n",
  1180. host->wait_for, mrq->cmd->opcode);
  1181. host->state = STATE_TIMEOUT;
  1182. spin_unlock_irqrestore(&host->lock, flags);
  1183. /*
  1184. * Handle races with cancel_delayed_work(), unless
  1185. * cancel_delayed_work_sync() is used
  1186. */
  1187. switch (host->wait_for) {
  1188. case MMCIF_WAIT_FOR_CMD:
  1189. mrq->cmd->error = sh_mmcif_error_manage(host);
  1190. break;
  1191. case MMCIF_WAIT_FOR_STOP:
  1192. mrq->stop->error = sh_mmcif_error_manage(host);
  1193. break;
  1194. case MMCIF_WAIT_FOR_MREAD:
  1195. case MMCIF_WAIT_FOR_MWRITE:
  1196. case MMCIF_WAIT_FOR_READ:
  1197. case MMCIF_WAIT_FOR_WRITE:
  1198. case MMCIF_WAIT_FOR_READ_END:
  1199. case MMCIF_WAIT_FOR_WRITE_END:
  1200. mrq->data->error = sh_mmcif_error_manage(host);
  1201. break;
  1202. default:
  1203. BUG();
  1204. }
  1205. host->state = STATE_IDLE;
  1206. host->wait_for = MMCIF_WAIT_FOR_REQUEST;
  1207. host->mrq = NULL;
  1208. mmc_request_done(host->mmc, mrq);
  1209. }
  1210. static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
  1211. {
  1212. struct device *dev = sh_mmcif_host_to_dev(host);
  1213. struct sh_mmcif_plat_data *pd = dev->platform_data;
  1214. struct mmc_host *mmc = host->mmc;
  1215. mmc_regulator_get_supply(mmc);
  1216. if (!pd)
  1217. return;
  1218. if (!mmc->ocr_avail)
  1219. mmc->ocr_avail = pd->ocr;
  1220. else if (pd->ocr)
  1221. dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
  1222. }
  1223. static int sh_mmcif_probe(struct platform_device *pdev)
  1224. {
  1225. int ret = 0, irq[2];
  1226. struct mmc_host *mmc;
  1227. struct sh_mmcif_host *host;
  1228. struct device *dev = &pdev->dev;
  1229. struct sh_mmcif_plat_data *pd = dev->platform_data;
  1230. void __iomem *reg;
  1231. const char *name;
  1232. irq[0] = platform_get_irq(pdev, 0);
  1233. irq[1] = platform_get_irq_optional(pdev, 1);
  1234. if (irq[0] < 0)
  1235. return irq[0];
  1236. reg = devm_platform_ioremap_resource(pdev, 0);
  1237. if (IS_ERR(reg))
  1238. return PTR_ERR(reg);
  1239. mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
  1240. if (!mmc)
  1241. return -ENOMEM;
  1242. ret = mmc_of_parse(mmc);
  1243. if (ret < 0)
  1244. goto err_host;
  1245. host = mmc_priv(mmc);
  1246. host->mmc = mmc;
  1247. host->addr = reg;
  1248. host->timeout = msecs_to_jiffies(10000);
  1249. host->ccs_enable = true;
  1250. host->clk_ctrl2_enable = false;
  1251. host->pd = pdev;
  1252. spin_lock_init(&host->lock);
  1253. mmc->ops = &sh_mmcif_ops;
  1254. sh_mmcif_init_ocr(host);
  1255. mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
  1256. mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
  1257. mmc->max_busy_timeout = 10000;
  1258. if (pd && pd->caps)
  1259. mmc->caps |= pd->caps;
  1260. mmc->max_segs = 32;
  1261. mmc->max_blk_size = 512;
  1262. mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
  1263. mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
  1264. mmc->max_seg_size = mmc->max_req_size;
  1265. platform_set_drvdata(pdev, host);
  1266. host->clk = devm_clk_get(dev, NULL);
  1267. if (IS_ERR(host->clk)) {
  1268. ret = PTR_ERR(host->clk);
  1269. dev_err(dev, "cannot get clock: %d\n", ret);
  1270. goto err_host;
  1271. }
  1272. ret = clk_prepare_enable(host->clk);
  1273. if (ret < 0)
  1274. goto err_host;
  1275. sh_mmcif_clk_setup(host);
  1276. pm_runtime_enable(dev);
  1277. host->power = false;
  1278. ret = pm_runtime_get_sync(dev);
  1279. if (ret < 0)
  1280. goto err_clk;
  1281. INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
  1282. sh_mmcif_sync_reset(host);
  1283. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
  1284. name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
  1285. ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
  1286. sh_mmcif_irqt, 0, name, host);
  1287. if (ret) {
  1288. dev_err(dev, "request_irq error (%s)\n", name);
  1289. goto err_clk;
  1290. }
  1291. if (irq[1] >= 0) {
  1292. ret = devm_request_threaded_irq(dev, irq[1],
  1293. sh_mmcif_intr, sh_mmcif_irqt,
  1294. 0, "sh_mmc:int", host);
  1295. if (ret) {
  1296. dev_err(dev, "request_irq error (sh_mmc:int)\n");
  1297. goto err_clk;
  1298. }
  1299. }
  1300. mutex_init(&host->thread_lock);
  1301. ret = mmc_add_host(mmc);
  1302. if (ret < 0)
  1303. goto err_clk;
  1304. dev_pm_qos_expose_latency_limit(dev, 100);
  1305. dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
  1306. sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
  1307. clk_get_rate(host->clk) / 1000000UL);
  1308. pm_runtime_put(dev);
  1309. clk_disable_unprepare(host->clk);
  1310. return ret;
  1311. err_clk:
  1312. clk_disable_unprepare(host->clk);
  1313. pm_runtime_put_sync(dev);
  1314. pm_runtime_disable(dev);
  1315. err_host:
  1316. mmc_free_host(mmc);
  1317. return ret;
  1318. }
  1319. static void sh_mmcif_remove(struct platform_device *pdev)
  1320. {
  1321. struct sh_mmcif_host *host = platform_get_drvdata(pdev);
  1322. host->dying = true;
  1323. clk_prepare_enable(host->clk);
  1324. pm_runtime_get_sync(&pdev->dev);
  1325. dev_pm_qos_hide_latency_limit(&pdev->dev);
  1326. mmc_remove_host(host->mmc);
  1327. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
  1328. /*
  1329. * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
  1330. * mmc_remove_host() call above. But swapping order doesn't help either
  1331. * (a query on the linux-mmc mailing list didn't bring any replies).
  1332. */
  1333. cancel_delayed_work_sync(&host->timeout_work);
  1334. clk_disable_unprepare(host->clk);
  1335. mmc_free_host(host->mmc);
  1336. pm_runtime_put_sync(&pdev->dev);
  1337. pm_runtime_disable(&pdev->dev);
  1338. }
  1339. #ifdef CONFIG_PM_SLEEP
  1340. static int sh_mmcif_suspend(struct device *dev)
  1341. {
  1342. struct sh_mmcif_host *host = dev_get_drvdata(dev);
  1343. pm_runtime_get_sync(dev);
  1344. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
  1345. pm_runtime_put(dev);
  1346. return 0;
  1347. }
  1348. static int sh_mmcif_resume(struct device *dev)
  1349. {
  1350. return 0;
  1351. }
  1352. #endif
  1353. static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
  1354. SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
  1355. };
  1356. static struct platform_driver sh_mmcif_driver = {
  1357. .probe = sh_mmcif_probe,
  1358. .remove_new = sh_mmcif_remove,
  1359. .driver = {
  1360. .name = DRIVER_NAME,
  1361. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1362. .pm = &sh_mmcif_dev_pm_ops,
  1363. .of_match_table = sh_mmcif_of_match,
  1364. },
  1365. };
  1366. module_platform_driver(sh_mmcif_driver);
  1367. MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
  1368. MODULE_LICENSE("GPL v2");
  1369. MODULE_ALIAS("platform:" DRIVER_NAME);
  1370. MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");