sdmmc.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674
  1. #include "FreeRTOS.h"
  2. #include "chip.h"
  3. #include "board.h"
  4. #include "mmc.h"
  5. #include "sdio.h"
  6. #include "sdmmc.h"
  7. #include "mmcsd_core.h"
  8. #include "os_adapt.h"
  9. #include "sema.h"
  10. #ifdef SDMMC_SUPPORT
  11. #define DW_MCI_DESC_DATA_LENGTH 0x1000
  12. struct idmac_desc {
  13. __le32 des0; /* Control Descriptor */
  14. #define IDMAC_DES0_DIC BIT(1)
  15. #define IDMAC_DES0_LD BIT(2)
  16. #define IDMAC_DES0_FD BIT(3)
  17. #define IDMAC_DES0_CH BIT(4)
  18. #define IDMAC_DES0_ER BIT(5)
  19. #define IDMAC_DES0_CES BIT(30)
  20. #define IDMAC_DES0_OWN BIT(31)
  21. __le32 des1; /* Buffer sizes */
  22. #define IDMAC_SET_BUFFER1_SIZE(d, s) \
  23. ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
  24. __le32 des2; /* buffer 1 physical address */
  25. __le32 des3; /* buffer 2 physical address */
  26. };
  27. static inline uint32_t MMC_GetWaterlevel(struct ark_mmc_obj *mmc_obj)
  28. {
  29. return (readl(mmc_obj->base + SDMMC_STATUS) >> 17) & 0x1fff;
  30. }
  31. static inline uint32_t MMC_GetStatus(struct ark_mmc_obj *mmc_obj)
  32. {
  33. return readl(mmc_obj->base + SDMMC_STATUS);
  34. }
  35. static inline uint32_t MMC_GetRawInterrupt(struct ark_mmc_obj *mmc_obj)
  36. {
  37. return readl(mmc_obj->base + SDMMC_RINTSTS);
  38. }
  39. static inline uint32_t MMC_GetUnmaskedInterrupt(struct ark_mmc_obj *mmc_obj)
  40. {
  41. return readl(mmc_obj->base + SDMMC_MINTSTS);
  42. }
  43. static inline uint32_t MMC_ClearRawInterrupt(struct ark_mmc_obj *mmc_obj, uint32_t interrupts)
  44. {
  45. return writel(interrupts, mmc_obj->base + SDMMC_RINTSTS);
  46. }
  47. static inline uint32_t MMC_GetInterruptMask(struct ark_mmc_obj *mmc_obj)
  48. {
  49. return readl(mmc_obj->base + SDMMC_INTMASK);
  50. }
  51. static inline uint32_t MMC_SetInterruptMask(struct ark_mmc_obj *mmc_obj, uint32_t mask)
  52. {
  53. return writel(mask, mmc_obj->base + SDMMC_INTMASK);
  54. }
  55. static inline void MMC_SetByteCount(struct ark_mmc_obj *mmc_obj, uint32_t bytes)
  56. {
  57. writel(bytes, mmc_obj->base + SDMMC_BYTCNT);
  58. }
  59. static inline void MMC_SetBlockSize(struct ark_mmc_obj *mmc_obj, uint32_t size)
  60. {
  61. writel(size, mmc_obj->base + SDMMC_BLKSIZ);
  62. }
  63. static inline uint32_t MMC_GetResponse(struct ark_mmc_obj *mmc_obj, int resp_num)
  64. {
  65. return readl(mmc_obj->base + SDMMC_RESP0 + resp_num * 4);
  66. }
  67. static inline uint32_t MMC_IsFifoEmpty(struct ark_mmc_obj *mmc_obj)
  68. {
  69. return (readl(mmc_obj->base + SDMMC_STATUS) >> 2) & 0x1;
  70. }
  71. /*
  72. static inline uint32_t MMC_IsDataStateBusy(struct ark_mmc_obj *mmc_obj)
  73. {
  74. return (readl(mmc_obj->base + SDMMC_STATUS) >> 10) & 0x1;
  75. }
  76. */
  77. int MMC_UpdateClockRegister(struct ark_mmc_obj *mmc_obj, int div)
  78. {
  79. uint32_t tick, timeout;
  80. int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  81. tick = xTaskGetTickCount();
  82. timeout = tick + configTICK_RATE_HZ / 10; //100ms in total
  83. /* disable clock */
  84. writel(0, mmc_obj->base + SDMMC_CLKENA);
  85. writel(0, mmc_obj->base + SDMMC_CLKSRC);
  86. /* inform CIU */
  87. writel(0, mmc_obj->base + SDMMC_CMDARG);
  88. wmb(); /* drain writebuffer */
  89. writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD);
  90. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  91. {
  92. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  93. {
  94. printf("ERROR: %s, CARD out\n", __func__);
  95. return -1;
  96. }
  97. tick = xTaskGetTickCount();
  98. if(tick > timeout)
  99. {
  100. printf("ERROR: %s, update clock timeout\n", __func__);
  101. return -1;
  102. }
  103. }
  104. /* set clock to desired speed */
  105. writel(div, mmc_obj->base + SDMMC_CLKDIV);
  106. /* inform CIU */
  107. writel(0, mmc_obj->base + SDMMC_CMDARG);
  108. wmb(); /* drain writebuffer */
  109. writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD);
  110. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  111. {
  112. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  113. {
  114. printf("ERROR: %s, CARD out\n", __func__);
  115. return -1;
  116. }
  117. tick = xTaskGetTickCount();
  118. if(tick > timeout)
  119. {
  120. TRACE_DEBUG("ERROR: %s, update clock timeout\n", __func__);
  121. return -1;
  122. }
  123. }
  124. /* enable clock */
  125. writel(1, mmc_obj->base + SDMMC_CLKENA);
  126. /* inform CIU */
  127. writel(1<<31 | 1<<21, mmc_obj->base + SDMMC_CMD);
  128. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  129. {
  130. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  131. {
  132. printf("ERROR: %s, CARD out\n", __func__);
  133. return -1;
  134. }
  135. tick = xTaskGetTickCount();
  136. if(tick > timeout)
  137. {
  138. printf("ERROR: %s, update clock timeout\n", __func__);
  139. return -1;
  140. }
  141. }
  142. return 0;
  143. }
  144. int MMC_SetCardWidth(struct ark_mmc_obj *mmc_obj, int width)
  145. {
  146. switch(width)
  147. {
  148. case SDMMC_CTYPE_1BIT:
  149. writel(0, mmc_obj->base + SDMMC_CTYPE);
  150. break;
  151. case SDMMC_CTYPE_4BIT:
  152. writel(1, mmc_obj->base + SDMMC_CTYPE);
  153. break;
  154. default:
  155. printf("ERROR: %s, card width %d is not supported\n", __func__, width);
  156. return -1;
  157. break;
  158. }
  159. return 0;
  160. }
  161. int MMC_SendCommand(struct ark_mmc_obj *mmc_obj, uint32_t cmd, uint32_t arg, uint32_t flags)
  162. {
  163. uint32_t tick, timeout;
  164. tick = xTaskGetTickCount();
  165. timeout = tick + configTICK_RATE_HZ; //1s
  166. writel(arg, mmc_obj->base + SDMMC_CMDARG);
  167. flags |= 1<<31 | 1<<29 | cmd;
  168. writel(flags, mmc_obj->base + SDMMC_CMD);
  169. while(readl(mmc_obj->base + SDMMC_CMD) & SDMMC_CMD_START)
  170. {
  171. if((mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  172. {
  173. printf("ERROR: %s,SDMMC_CMD BackWard Read , Card Out\n", __func__);
  174. return -1;
  175. }
  176. tick = xTaskGetTickCount();
  177. if(tick > timeout)
  178. {
  179. printf("ERROR: %s, send cmd timeout\n", __func__);
  180. return -1;
  181. }
  182. }
  183. //fixme: check HLE_INT_STATUS
  184. return 0;
  185. }
  186. int MMC_ResetFifo(struct ark_mmc_obj *mmc_obj)
  187. {
  188. uint32_t reg, tick, timeout;
  189. tick = xTaskGetTickCount();
  190. timeout = tick + configTICK_RATE_HZ / 10; //100ms
  191. reg = readl(mmc_obj->base + SDMMC_CTRL);
  192. reg |= 1 << 1;
  193. writel(reg, mmc_obj->base + SDMMC_CTRL);
  194. //wait until fifo reset finish
  195. while(readl(mmc_obj->base + SDMMC_CTRL) & SDMMC_CTRL_FIFO_RESET)
  196. {
  197. tick = xTaskGetTickCount();
  198. if(tick > timeout)
  199. {
  200. printf("ERROR: %s, FIFO reset timeout\n", __func__);
  201. return -1;
  202. }
  203. }
  204. return 0;
  205. }
  206. int MMC_Reset(struct ark_mmc_obj *mmc_obj)
  207. {
  208. uint32_t reg, tick, timeout;
  209. // tick = xTaskGetTickCount();
  210. // timeout = tick + configTICK_RATE_HZ / 10; //100ms
  211. reg = readl(mmc_obj->base + SDMMC_CTRL);
  212. reg |= SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET;
  213. writel(reg, mmc_obj->base + SDMMC_CTRL);
  214. tick = xTaskGetTickCount();
  215. timeout = tick + configTICK_RATE_HZ / 10; //100ms
  216. while(readl(mmc_obj->base + SDMMC_CTRL) & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))
  217. {
  218. tick = xTaskGetTickCount();
  219. if(tick > timeout)
  220. {
  221. printf("ERROR: %s, CTRL dma|fifo|ctrl reset timeout\n", __func__);
  222. return -1;
  223. }
  224. }
  225. return 0;
  226. }
  227. #define DW_MCI_DMA_THRESHOLD 32
  228. /* DMA interface functions */
  229. static void dw_mci_stop_dma(struct ark_mmc_obj *mmc_obj)
  230. {
  231. if (mmc_obj->using_dma) {
  232. mmc_obj->dma_ops->stop(mmc_obj);
  233. mmc_obj->dma_ops->cleanup(mmc_obj);
  234. }
  235. /* Data transfer was stopped by the interrupt handler */
  236. //set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  237. }
  238. static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj);
  239. static void dw_mci_dmac_complete_callback(void *param, unsigned int mask)
  240. {
  241. struct ark_mmc_obj *mmc_obj = param;
  242. //struct mmcsd_data *data = mmc_obj->data;
  243. dev_vdbg(mmc_obj->dev, "DMA complete\n");
  244. mmc_obj->dma_ops->cleanup(mmc_obj);
  245. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  246. /*
  247. * If the card was removed, data will be NULL. No point in trying to
  248. * send the stop command or waiting for NBUSY in this case.
  249. */
  250. /* if (data) {
  251. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  252. tasklet_schedule(&host->tasklet);
  253. } */
  254. }
  255. static UINT32 WaiteforDmaResetClear(UINT32 addr, UINT32 __delay_us, UINT32 timeout_us)
  256. {
  257. u64 __timeout_us = (timeout_us);
  258. UINT32 tmp,val;
  259. while(__timeout_us){
  260. val = readl(addr);
  261. tmp = (val>>2)& 0x1 ;
  262. if(tmp == 0)
  263. return 0;
  264. udelay(__delay_us);
  265. }
  266. return 1;
  267. }
  268. static UINT32 WaiteforDmaOwnClear(u32 addr, UINT32 __delay_us, UINT32 timeout_us)
  269. {
  270. u64 __timeout_us = (timeout_us);
  271. UINT32 tmp,val;
  272. while(__timeout_us){
  273. val = readl(addr);
  274. tmp = (val>>31)& 0x1 ;
  275. if(tmp == 0)
  276. return 0;
  277. udelay(__delay_us);
  278. __timeout_us-- ;
  279. }
  280. return 1;
  281. }
  282. static bool dw_mci_ctrl_reset(struct ark_mmc_obj *mmc_obj, u32 reset)
  283. {
  284. u32 ctrl;
  285. ctrl = readl(mmc_obj->base + SDMMC_CTRL);
  286. ctrl |= reset;
  287. writel( ctrl, mmc_obj->base + SDMMC_CTRL);
  288. /* wait till resets clear */
  289. if (WaiteforDmaResetClear(mmc_obj->base + SDMMC_CTRL,1, 500 * USEC_PER_MSEC)) {
  290. printf("Timeout resetting block (ctrl reset %#x)\n",
  291. readl(mmc_obj->base + SDMMC_CTRL));
  292. return false;
  293. }
  294. return true;
  295. }
  296. static void dw_mci_idmac_reset(struct ark_mmc_obj *mmc_obj)
  297. {
  298. u32 bmod = readl(mmc_obj->base + SDMMC_BMOD);
  299. /* Software reset of DMA */
  300. bmod |= SDMMC_IDMAC_SWRESET;
  301. writel(bmod, mmc_obj->base + SDMMC_BMOD);
  302. }
  303. static int dw_mci_idmac_init(struct ark_mmc_obj *mmc_obj)
  304. {
  305. int i;
  306. unsigned int ring_size =0;
  307. if (mmc_obj->dma_64bit_address == 1) {
  308. #if 0
  309. struct idmac_desc_64addr *p;
  310. /* Number of descriptors in the ring buffer */
  311. ring_size =
  312. DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
  313. /* Forward link the descriptor list */
  314. for (i = 0, p = mmc_obj->sg_cpu; i < ring_size - 1;
  315. i++, p++) {
  316. p->des6 = (mmc_obj->sg_dma +
  317. (sizeof(struct idmac_desc_64addr) *
  318. (i + 1))) & 0xffffffff;
  319. p->des7 = (u64)(mmc_obj->sg_dma +
  320. (sizeof(struct idmac_desc_64addr) *
  321. (i + 1))) >> 32;
  322. /* Initialize reserved and buffer size fields to "0" */
  323. p->des0 = 0;
  324. p->des1 = 0;
  325. p->des2 = 0;
  326. p->des3 = 0;
  327. }
  328. /* Set the last descriptor as the end-of-ring descriptor */
  329. p->des6 = mmc_obj->sg_dma & 0xffffffff;
  330. p->des7 = (u64)mmc_obj->sg_dma >> 32;
  331. p->des0 = IDMAC_DES0_ER;
  332. #endif
  333. } else {
  334. struct idmac_desc *p;
  335. /* Number of descriptors in the ring buffer */
  336. ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
  337. /* Forward link the descriptor list */
  338. for (i = 0, p = mmc_obj->sg_cpu;
  339. i < ring_size - 1;
  340. i++, p++) {
  341. p->des3 = cpu_to_le32(mmc_obj->sg_dma +
  342. (sizeof(struct idmac_desc) * (i + 1)));
  343. p->des0 = 0;
  344. p->des1 = 0;
  345. }
  346. /* Set the last descriptor as the end-of-ring descriptor */
  347. p->des3 = cpu_to_le32(mmc_obj->sg_dma);
  348. p->des0 = cpu_to_le32(IDMAC_DES0_ER);
  349. }
  350. dw_mci_idmac_reset(mmc_obj);
  351. if (mmc_obj->dma_64bit_address == 1) {
  352. /* Mask out interrupts - get Tx & Rx complete only */
  353. writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS64);
  354. writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI,mmc_obj->base + SDMMC_IDINTEN64);
  355. /* Set the descriptor base address */
  356. writel(mmc_obj->sg_dma & 0xffffffff, mmc_obj->base + SDMMC_DBADDRL);
  357. writel((u64)mmc_obj->sg_dma >> 32, mmc_obj->base + SDMMC_DBADDRU);
  358. } else {
  359. /* Mask out interrupts - get Tx & Rx complete only */
  360. writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS);
  361. writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI, mmc_obj->base + SDMMC_IDINTEN);
  362. /* Set the descriptor base address */
  363. writel(mmc_obj->sg_dma, mmc_obj->base + SDMMC_DBADDR);
  364. }
  365. return 0;
  366. }
  367. static inline int dw_mci_prepare_desc32(struct ark_mmc_obj *mmc_obj,
  368. struct mmcsd_data *data)
  369. {
  370. unsigned int desc_len;
  371. struct idmac_desc *desc_first, *desc_last, *desc;
  372. u32 dma_address;
  373. int i;
  374. unsigned int sg_len;
  375. u32 mem_addr;
  376. unsigned int length;
  377. unsigned int leftsize;
  378. dma_address = (u32)data->buf;
  379. if (dma_address & (ARCH_DMA_MINALIGN - 1)) {
  380. if (data->flags & DATA_DIR_WRITE) {
  381. mmc_obj->tx_dummy_buffer = pvPortMalloc(data->blks * data->blksize);
  382. if (!mmc_obj->tx_dummy_buffer)
  383. return -ENOMEM;
  384. memcpy(mmc_obj->tx_dummy_buffer, data->buf, data->blks * data->blksize);
  385. dma_address = (u32)mmc_obj->tx_dummy_buffer;
  386. } else if (data->flags & DATA_DIR_READ) {
  387. mmc_obj->rx_dummy_buffer = pvPortMalloc(data->blks * data->blksize);
  388. if (!mmc_obj->rx_dummy_buffer)
  389. return -ENOMEM;
  390. dma_address = (u32)mmc_obj->rx_dummy_buffer;
  391. }
  392. mmc_obj->dummy_buffer_used = 1;
  393. } else {
  394. mmc_obj->dummy_buffer_used = 0;
  395. }
  396. desc_first = desc_last = desc = mmc_obj->sg_cpu;
  397. leftsize = data->blks * data->blksize;
  398. //sg_len = data->blks * data->blksize;
  399. if(data->blks * data->blksize < DW_MCI_DESC_DATA_LENGTH){
  400. sg_len = 1;
  401. length = data->blks * data->blksize;
  402. }
  403. else{
  404. sg_len = (data->blks * data->blksize % DW_MCI_DESC_DATA_LENGTH)?
  405. ((data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH) + 1):
  406. data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH;
  407. length = DW_MCI_DESC_DATA_LENGTH;
  408. }
  409. mem_addr = VIRT_TO_PHY(dma_address);//PHY_TO_UNCACHED_VIRT(dma_address);//dma_address;//(u32)mmc_obj->sg_dma;//sg_dma_address(&data->sg[i]);
  410. //data->blks * data->blksize;//data->blksize;//0x200;//sg_dma_len(&data->sg[i]);
  411. for(i = 0; i < sg_len; i++){
  412. if(sg_len > 1){
  413. length = (leftsize>= DW_MCI_DESC_DATA_LENGTH) ?
  414. DW_MCI_DESC_DATA_LENGTH : leftsize;
  415. if( length == 0)
  416. break;
  417. }
  418. for ( ; length ; desc++) {
  419. desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
  420. length : DW_MCI_DESC_DATA_LENGTH;
  421. length -= desc_len;
  422. leftsize -= desc_len;
  423. /*
  424. * Wait for the former clear OWN bit operation
  425. * of IDMAC to make sure that this descriptor
  426. * isn't still owned by IDMAC as IDMAC's write
  427. * ops and CPU's read ops are asynchronous.
  428. */
  429. if(WaiteforDmaOwnClear((UINT32)(&desc->des0),10,
  430. 10))
  431. {
  432. printf(">>>>>IDMA OWN TIME OUT!!!\n");
  433. goto err_own_bit;
  434. }
  435. /*
  436. * Set the OWN bit and disable interrupts
  437. * for this descriptor
  438. */
  439. desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
  440. IDMAC_DES0_DIC |
  441. IDMAC_DES0_CH);
  442. /* Buffer length */
  443. IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
  444. /* Physical address to DMA to/from */
  445. desc->des2 = cpu_to_le32(mem_addr);
  446. /* Update physical address for the next desc */
  447. mem_addr += desc_len;
  448. /* Save pointer to the last descriptor */
  449. desc_last = desc;
  450. }
  451. }
  452. /* Set first descriptor */
  453. desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
  454. /* Set last descriptor */
  455. desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
  456. IDMAC_DES0_DIC));
  457. desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
  458. mmc_obj->data = data;
  459. return 0;
  460. err_own_bit:
  461. /* restore the descriptor chain as it's polluted */
  462. dev_dbg("descriptor is still owned by IDMAC.\n");
  463. memset(mmc_obj->sg_cpu, 0, DESC_RING_BUF_SZ);
  464. dw_mci_idmac_init(mmc_obj);
  465. return -EINVAL;
  466. }
  467. static int dw_mci_idmac_start_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  468. {
  469. u32 temp;
  470. int ret = 0;
  471. #if 1
  472. if (mmc_obj->dma_64bit_address == 1)
  473. ;//ret = dw_mci_prepare_desc64(host, mmc_obj->data, sg_len);
  474. else
  475. ret = dw_mci_prepare_desc32(mmc_obj, data);
  476. if (ret)
  477. goto out;
  478. #endif
  479. /* drain writebuffer */
  480. wmb();
  481. /* Make sure to reset DMA in case we did PIO before this */
  482. dw_mci_ctrl_reset(mmc_obj, SDMMC_CTRL_DMA_RESET);
  483. dw_mci_idmac_reset(mmc_obj);
  484. /* Select IDMAC interface */
  485. temp = readl( mmc_obj->base + SDMMC_CTRL);
  486. temp |= SDMMC_CTRL_USE_IDMAC;
  487. writel(temp, mmc_obj->base + SDMMC_CTRL);
  488. /* drain writebuffer */
  489. wmb();
  490. /* Enable the IDMAC */
  491. temp = readl(mmc_obj->base + SDMMC_BMOD);
  492. temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
  493. writel(temp, mmc_obj->base + SDMMC_BMOD);
  494. #if 0
  495. /* Flush cache before write */
  496. if (data->flags & DATA_DIR_WRITE)
  497. {
  498. CP15_clean_dcache_for_dma((u32)data->buf,
  499. (u32)data->buf + data->blks * data->blksize);
  500. //printf(">>>>>>>>Write clean cache>>>>>>\n");
  501. }
  502. /* Invalidate cache before read */
  503. else if (data->flags & DATA_DIR_READ)
  504. {
  505. CP15_flush_dcache_for_dma((u32)data->buf,
  506. (u32)data->buf + data->blks * data->blksize);
  507. //printf(">>>>>>>>Read flush cache>>>>>>\n");
  508. }
  509. #else
  510. if (mmc_obj->using_dma) {
  511. if (mmc_obj->dummy_buffer_used) {
  512. if (data->flags & DATA_DIR_WRITE) {
  513. if (mmc_obj->tx_dummy_buffer)
  514. CP15_clean_dcache_for_dma((u32)mmc_obj->tx_dummy_buffer,
  515. (u32)mmc_obj->tx_dummy_buffer + data->blks * data->blksize);
  516. } else if (data->flags & DATA_DIR_READ) {
  517. if (mmc_obj->rx_dummy_buffer)
  518. CP15_flush_dcache_for_dma((u32)mmc_obj->rx_dummy_buffer,
  519. (u32)mmc_obj->rx_dummy_buffer + data->blks * data->blksize);
  520. }
  521. } else {
  522. /* Flush cache before write */
  523. if (data->flags & DATA_DIR_WRITE)
  524. {
  525. CP15_clean_dcache_for_dma((u32)data->buf,
  526. (u32)data->buf + data->blks * data->blksize);
  527. //printf(">>>>>>>>Write clean cache>>>>>>\n");
  528. }
  529. /* Invalidate cache before read */
  530. else if (data->flags & DATA_DIR_READ)
  531. {
  532. CP15_flush_dcache_for_dma((u32)data->buf,
  533. (u32)data->buf + data->blks * data->blksize);
  534. //printf(">>>>>>>>Read flush cache>>>>>>\n");
  535. }
  536. }
  537. }
  538. #endif
  539. /* Start it running */
  540. writel(1, mmc_obj->base + SDMMC_PLDMND);
  541. out:
  542. return ret;
  543. }
  544. static void dw_mci_idmac_stop_dma(struct ark_mmc_obj *mmc_obj)
  545. {
  546. u32 temp;
  547. /* Disable and reset the IDMAC interface */
  548. temp = readl(mmc_obj->base+SDMMC_CTRL);
  549. temp &= ~SDMMC_CTRL_USE_IDMAC;
  550. temp |= SDMMC_CTRL_DMA_RESET;
  551. writel(temp,mmc_obj->base+SDMMC_CTRL);
  552. /* Stop the IDMAC running */
  553. temp = readl(mmc_obj->base+SDMMC_BMOD);
  554. temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
  555. temp |= SDMMC_IDMAC_SWRESET;
  556. writel(temp,mmc_obj->base+SDMMC_BMOD);
  557. }
  558. void dw_mci_dmac_complete_dma(void *arg)
  559. {
  560. #if 0
  561. struct dw_mci *host = arg;
  562. struct mmc_data *data = host->data;
  563. dev_vdbg(host->dev, "DMA complete\n");
  564. if ((host->use_dma == TRANS_MODE_EDMAC) &&
  565. data && (data->flags & MMC_DATA_READ))
  566. /* Invalidate cache after read */
  567. dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
  568. data->sg,
  569. data->sg_len,
  570. DMA_FROM_DEVICE);
  571. host->dma_ops->cleanup(host);
  572. /*
  573. * If the card was removed, data will be NULL. No point in trying to
  574. * send the stop command or waiting for NBUSY in this case.
  575. */
  576. if (data) {
  577. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  578. tasklet_schedule(&host->tasklet);
  579. }
  580. #endif
  581. return;
  582. }
  583. static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj)
  584. {
  585. return;
  586. if(mmc_obj->use_dma == TRANS_MODE_IDMAC)
  587. {
  588. void *data = mmc_obj->sg_cpu;
  589. if (data) {
  590. free(data);
  591. }
  592. }
  593. }
  594. static struct dw_mci_dma_ops dw_mci_idmac_ops = {
  595. .init = dw_mci_idmac_init,
  596. .start = dw_mci_idmac_start_dma,
  597. .stop = dw_mci_idmac_stop_dma,
  598. // .complete = dw_mci_dmac_complete_dma,
  599. .cleanup = dw_mci_dma_cleanup,
  600. };
  601. static void dw_mci_init_dma(struct ark_mmc_obj *mmc_obj)
  602. {
  603. unsigned int addr_config = 0;
  604. /*
  605. * Check tansfer mode from HCON[17:16]
  606. * Clear the ambiguous description of dw_mmc databook:
  607. * 2b'00: No DMA Interface -> Actually means using Internal DMA block
  608. * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
  609. * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
  610. * 2b'11: Non DW DMA Interface -> pio only
  611. * Compared to DesignWare DMA Interface, Generic DMA Interface has a
  612. * simpler request/acknowledge handshake mechanism and both of them
  613. * are regarded as external dma master for dw_mmc.
  614. */
  615. mmc_obj->use_dma = SDMMC_GET_TRANS_MODE(readl(mmc_obj->base + SDMMC_HCON));
  616. //mmc_obj->use_dma = TRANS_MODE_IDMAC;
  617. if (mmc_obj->use_dma == DMA_INTERFACE_IDMA) {
  618. mmc_obj->use_dma = TRANS_MODE_IDMAC;
  619. } else {
  620. goto no_dma;
  621. }
  622. /* Determine which DMA interface to use */
  623. /*
  624. * Check ADDR_CONFIG bit in HCON to find
  625. * IDMAC address bus width
  626. */
  627. addr_config = SDMMC_GET_ADDR_CONFIG(readl(mmc_obj->base + SDMMC_HCON));
  628. if (addr_config == 1) {
  629. ;/* host supports IDMAC in 64-bit address mode */
  630. } else {
  631. /* host supports IDMAC in 32-bit address mode */
  632. mmc_obj->dma_64bit_address = 0;
  633. dev_info(mmc_obj->dev, "IDMAC supports 32-bit address mode.\n");
  634. }
  635. /* Alloc memory for sg translation */
  636. mmc_obj->sg_cpu =(void *)(dma_addr_t)pvPortMalloc(DESC_RING_BUF_SZ + ARCH_DMA_MINALIGN);
  637. if (!mmc_obj->sg_cpu) {
  638. printf("%s: could not alloc DMA memory\n", __func__);
  639. goto no_dma;
  640. }
  641. mmc_obj->sg_dma = VIRT_TO_PHY((u32)mmc_obj->sg_cpu);//PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu);
  642. mmc_obj->sg_cpu = (u32*)PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu);
  643. printf(">>>sg_dma 0x%x,sg_cpu = 0x%x>>>>\n",mmc_obj->sg_dma,mmc_obj->sg_cpu);
  644. if (!mmc_obj->sg_cpu) {
  645. dev_err(mmc_obj->dev,"%s: could not alloc DMA memory\n",__func__);
  646. goto no_dma;
  647. }
  648. mmc_obj->dma_ops = &dw_mci_idmac_ops;
  649. dev_info(mmc_obj->dev, "Using internal DMA controller.\n");
  650. if (mmc_obj->dma_ops->init && mmc_obj->dma_ops->start &&
  651. mmc_obj->dma_ops->stop && mmc_obj->dma_ops->cleanup) {
  652. if (mmc_obj->dma_ops->init(mmc_obj)) {
  653. printf("%s: Unable to initialize DMA Controller.\n",
  654. __func__);
  655. goto no_dma;
  656. }
  657. } else {
  658. printf("DMA initialization not found.\n");
  659. goto no_dma;
  660. }
  661. return;
  662. no_dma:
  663. dev_info(mmc_obj->dev, "Using PIO mode.\n");
  664. mmc_obj->use_dma = TRANS_MODE_PIO;
  665. }
  666. void MMC_Init(struct ark_mmc_obj *mmc_obj)
  667. {
  668. uint32_t reg;
  669. if(mmc_obj->mmc_reset)
  670. mmc_obj->mmc_reset(mmc_obj);
  671. vClkEnable(mmc_obj->clk_id);
  672. MMC_Reset(mmc_obj);
  673. dw_mci_init_dma(mmc_obj);
  674. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_ALL);
  675. MMC_SetInterruptMask(mmc_obj, 0x0);
  676. reg = readl(mmc_obj->base + SDMMC_CTRL);
  677. reg |= SDMMC_CTRL_INT_ENABLE;
  678. writel(reg, mmc_obj->base + SDMMC_CTRL);
  679. //set timeout param
  680. writel(0xffffffff, mmc_obj->base + SDMMC_TMOUT);
  681. //set fifo
  682. reg = readl(mmc_obj->base + SDMMC_FIFOTH);
  683. reg = ((reg >> 16) & 0xfff) + 1;
  684. mmc_obj->fifoth_val = SDMMC_SET_FIFOTH(0x3, reg / 2 - 1, reg / 2);
  685. writel(mmc_obj->fifoth_val, mmc_obj->base + SDMMC_FIFOTH);
  686. MMC_SetInterruptMask(mmc_obj, SDMMC_INT_CD);
  687. }
  688. static int ark_mmc_write_pio(struct mmc_driver *mmc_drv)
  689. {
  690. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  691. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  692. struct mmcsd_data *data = NULL;
  693. uint32_t status;
  694. uint32_t len;
  695. uint32_t remain, fcnt;
  696. uint32_t *buf;
  697. int i;
  698. int hotpluge_support;
  699. if(cmd)
  700. data = cmd->data;
  701. if(!data)
  702. {
  703. TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__);
  704. return -EIO;
  705. }
  706. hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  707. do {
  708. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  709. break;
  710. if (data->blks * data->blksize == data->bytes_xfered)
  711. break;
  712. buf = data->buf + data->bytes_xfered / 4;
  713. remain = data->blks * data->blksize - data->bytes_xfered;
  714. do {
  715. fcnt = (SDMMC_FIFO_DEPTH - MMC_GetWaterlevel(mmc_obj)) * 4;
  716. len = configMIN(remain, fcnt);
  717. if (!len)
  718. break;
  719. for (i = 0; i < len / 4; i ++) {
  720. writel(*buf++, mmc_obj->base + SDMMC_FIFO);
  721. }
  722. data->bytes_xfered += len;
  723. remain -= len;
  724. } while (remain);
  725. status = readl(mmc_obj->base + SDMMC_MINTSTS);
  726. writel(SDMMC_INT_TXDR, mmc_obj->base + SDMMC_MINTSTS);
  727. } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
  728. return 0;
  729. }
  730. static int ark_mmc_read_pio(struct mmc_driver *mmc_drv, bool dto)
  731. {
  732. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  733. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  734. struct mmcsd_data *data = NULL;
  735. u32 status;
  736. unsigned int len;
  737. unsigned int remain, fcnt;
  738. uint32_t *buf;
  739. int i;
  740. int hotpluge_support;
  741. if(cmd)
  742. data = cmd->data;
  743. if(!data)
  744. {
  745. TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__);
  746. return -EIO;
  747. }
  748. hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  749. do {
  750. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  751. break;
  752. if (data->blks * data->blksize == data->bytes_xfered)
  753. break;
  754. buf = data->buf + data->bytes_xfered / 4;
  755. remain = data->blks * data->blksize - data->bytes_xfered;
  756. do {
  757. fcnt = MMC_GetWaterlevel(mmc_obj) * 4;
  758. len = configMIN(remain, fcnt);
  759. if (!len)
  760. break;
  761. for (i = 0; i < len / 4; i ++) {
  762. *buf++ = readl(mmc_obj->base + SDMMC_FIFO);
  763. }
  764. data->bytes_xfered += len;
  765. remain -= len;
  766. } while (remain);
  767. status = readl(mmc_obj->base + SDMMC_MINTSTS);
  768. writel(SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS);
  769. /* if the RXDR is ready read again */
  770. } while ((status & SDMMC_INT_RXDR) ||
  771. (dto && MMC_GetWaterlevel(mmc_obj)));
  772. return 0;
  773. }
  774. static void ark_mmc_set_iocfg(struct mmcsd_host *host, struct mmcsd_io_cfg *io_cfg)
  775. {
  776. uint32_t clksrc, clkdiv;
  777. struct mmc_driver *mmc_drv = host->private_data;
  778. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  779. unsigned int regs;
  780. int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  781. /* maybe switch power to the card */
  782. switch (io_cfg->power_mode)
  783. {
  784. case MMCSD_POWER_OFF:
  785. if(hotpluge_support == 1)
  786. {
  787. regs = readl(mmc_obj->base + SDMMC_PWREN);
  788. regs &= ~(1 << 0);
  789. writel(regs, mmc_obj->base + SDMMC_PWREN);
  790. }
  791. break;
  792. case MMCSD_POWER_UP:
  793. if(hotpluge_support == 1)
  794. {
  795. regs = readl(mmc_obj->base + SDMMC_PWREN);
  796. regs &= ~(1 << 0);
  797. regs |= (1 << 0);
  798. writel(regs, mmc_obj->base + SDMMC_PWREN);
  799. }
  800. break;
  801. case MMCSD_POWER_ON:
  802. if(hotpluge_support == 1)
  803. {
  804. MMC_Reset(mmc_obj);
  805. }
  806. break;
  807. default:
  808. printf("ERROR: %s, unknown power_mode %d\n", __func__, io_cfg->power_mode);
  809. break;
  810. }
  811. //fixme: read from PMU
  812. //why io_cfg->clock == 0 ?
  813. if(io_cfg->clock)
  814. {
  815. clksrc = ulClkGetRate(mmc_obj->clk_id);
  816. clkdiv = clksrc / io_cfg->clock / 2;
  817. MMC_UpdateClockRegister(mmc_obj, clkdiv);
  818. TRACE_DEBUG("io_cfg->clock: %lu, clock in: %lu, clkdiv: %d\n", io_cfg->clock, clkdiv, clkdiv);
  819. }
  820. else
  821. {
  822. writel(0, mmc_obj->base + SDMMC_CLKENA);
  823. }
  824. if (io_cfg->bus_width == MMCSD_BUS_WIDTH_4)
  825. {
  826. MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_4BIT);
  827. TRACE_DEBUG("set to 4-bit mode\n");
  828. }
  829. else
  830. {
  831. MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_1BIT);
  832. // printf("set to 1-bit mode\n");
  833. }
  834. TRACE_DEBUG("%s end\n", __func__);
  835. }
  836. static void ark_mmc_enable_sdio_irq(struct mmcsd_host *host, int32_t enable)
  837. {
  838. struct mmc_driver *mmc_drv = host->private_data;
  839. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  840. uint32_t reg;
  841. TRACE_DEBUG("%s start\n", __func__);
  842. if (enable)
  843. {
  844. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO);
  845. reg = MMC_GetInterruptMask(mmc_obj);
  846. reg |= SDMMC_INT_SDIO;
  847. MMC_SetInterruptMask(mmc_obj, reg);
  848. }
  849. else
  850. {
  851. reg = MMC_GetInterruptMask(mmc_obj);
  852. reg &= ~SDMMC_INT_SDIO;
  853. MMC_SetInterruptMask(mmc_obj, reg);
  854. }
  855. }
  856. static int32_t ark_mmc_get_card_status(struct mmcsd_host *host)
  857. {
  858. struct mmc_driver *mmc_drv = host->private_data;
  859. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  860. return !(readl(mmc_obj->base + SDMMC_CDETECT) & 0x1);
  861. }
  862. static void ark_mmc_send_command(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd)
  863. {
  864. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  865. struct mmcsd_req *req = mmc_drv->req;
  866. //fixme: cmd->data or req->data
  867. struct mmcsd_data *data = cmd->data;
  868. int ret;
  869. uint32_t cmd_flags = 0;
  870. TRACE_DEBUG("%s, start\n", __func__);
  871. if (!cmd)
  872. {
  873. //fixme: stop dma
  874. printf("ERROR: %s, cmd is NULL\n", __func__);
  875. return;
  876. }
  877. if (data)
  878. {
  879. cmd_flags |= SDMMC_CMD_DAT_EXP;
  880. /* always set data start - also set direction flag for read */
  881. if (data->flags & DATA_DIR_WRITE)
  882. cmd_flags |= SDMMC_CMD_DAT_WR;
  883. if (data->flags & DATA_STREAM)
  884. cmd_flags |= SDMMC_CMD_STRM_MODE;
  885. }
  886. if (cmd == req->stop)
  887. cmd_flags |= SDMMC_CMD_STOP;
  888. else
  889. cmd_flags |= SDMMC_CMD_PRV_DAT_WAIT;
  890. switch (resp_type(cmd))
  891. {
  892. case RESP_NONE:
  893. break;
  894. case RESP_R1:
  895. case RESP_R5:
  896. case RESP_R6:
  897. case RESP_R7:
  898. case RESP_R1B:
  899. cmd_flags |= SDMMC_CMD_RESP_EXP;
  900. cmd_flags |= SDMMC_CMD_RESP_CRC;
  901. break;
  902. case RESP_R2:
  903. cmd_flags |= SDMMC_CMD_RESP_EXP;
  904. cmd_flags |= SDMMC_CMD_RESP_CRC;
  905. cmd_flags |= SDMMC_CMD_RESP_LONG;
  906. break;
  907. case RESP_R3:
  908. case RESP_R4:
  909. cmd_flags |= SDMMC_CMD_RESP_EXP;
  910. break;
  911. default:
  912. printf("ERROR: %s, unknown cmd type %x\n", __func__, resp_type(cmd));
  913. return;
  914. }
  915. if (cmd->cmd_code == GO_IDLE_STATE)
  916. cmd_flags |= SDMMC_CMD_INIT;
  917. /* CMD 11 check switch voltage */
  918. if (cmd->cmd_code == READ_DAT_UNTIL_STOP)
  919. cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
  920. TRACE_DEBUG("cmd code: %d, args: 0x%x, resp type: 0x%x, flag: 0x%x\n", cmd->cmd_code, cmd->arg, resp_type(cmd), cmd_flags);
  921. ret = MMC_SendCommand(mmc_obj, cmd->cmd_code, cmd->arg, cmd_flags);
  922. if(ret)
  923. {
  924. printf("ERROR: %s, Send command timeout, cmd: %d, status: 0x%x\n", __func__, cmd->cmd_code, MMC_GetStatus(mmc_obj));
  925. }
  926. }
  927. static void dw_mci_adjust_fifoth(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  928. {
  929. unsigned int blksz = data->blksize;
  930. const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
  931. u32 fifo_width = 4;
  932. u32 blksz_depth = blksz / fifo_width, fifoth_val;
  933. u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
  934. int idx = ARRAY_SIZE(mszs) - 1;
  935. /* pio should ship this scenario */
  936. if (!mmc_obj->use_dma)
  937. return;
  938. tx_wmark = SDMMC_FIFO_DEPTH / 2;
  939. tx_wmark_invers = SDMMC_FIFO_DEPTH - tx_wmark;
  940. /*
  941. * MSIZE is '1',
  942. * if blksz is not a multiple of the FIFO width
  943. */
  944. if (blksz % fifo_width)
  945. goto done;
  946. do {
  947. if (!((blksz_depth % mszs[idx]) ||
  948. (tx_wmark_invers % mszs[idx]))) {
  949. msize = idx;
  950. rx_wmark = mszs[idx] - 1;
  951. break;
  952. }
  953. } while (--idx > 0);
  954. /*
  955. * If idx is '0', it won't be tried
  956. * Thus, initial values are uesed
  957. */
  958. done:
  959. fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
  960. writel(fifoth_val, mmc_obj->base + SDMMC_FIFOTH);
  961. }
  962. static int dw_mci_submit_data_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  963. {
  964. u32 temp;
  965. mmc_obj->using_dma = 0;
  966. /* If we don't have a channel, we can't do DMA */
  967. if (!mmc_obj->use_dma)
  968. return -1;
  969. if (data->blks * data->blksize < DW_MCI_DMA_THRESHOLD ||
  970. data->blksize & 3 || (u32)data->buf & 3) {
  971. // //printf(">>>>Error,blksize 0x%x,dataaddr:0x%x\n",data->blksize,(u32)data->buf);
  972. mmc_obj->dma_ops->stop(mmc_obj);
  973. return -1;
  974. }
  975. mmc_obj->using_dma = 1;
  976. temp = MMC_GetInterruptMask(mmc_obj);
  977. temp |= SDMMC_INT_DATA_OVER | SDMMC_INT_DATA_ERROR;
  978. temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
  979. MMC_SetInterruptMask(mmc_obj, temp);
  980. /* Enable the DMA interface */
  981. temp = readl(mmc_obj->base + SDMMC_CTRL);
  982. temp |= SDMMC_CTRL_DMA_ENABLE;
  983. writel(temp, mmc_obj->base + SDMMC_CTRL);
  984. /*
  985. * Decide the MSIZE and RX/TX Watermark.
  986. * If current block size is same with previous size,
  987. * no need to update fifoth.
  988. */
  989. if (mmc_obj->prev_blksz != data->blksize)
  990. dw_mci_adjust_fifoth(mmc_obj, data);
  991. if (mmc_obj->dma_ops->start(mmc_obj, data)) {
  992. mmc_obj->dma_ops->stop(mmc_obj);
  993. /* We can't do DMA, try PIO for this one */
  994. dev_dbg(mmc_obj->dev,
  995. "%s: fall back to PIO mode for current transfer\n",
  996. __func__);
  997. mmc_obj->using_dma = 0;
  998. return -1;
  999. }
  1000. return 0;
  1001. }
  1002. static void ark_mmc_perpare_data(struct mmc_driver *mmc_drv)
  1003. {
  1004. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1005. struct mmcsd_data *data = cmd->data;
  1006. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1007. uint32_t data_size;
  1008. uint32_t reg;
  1009. if(!data)
  1010. {
  1011. MMC_SetBlockSize(mmc_obj, 0);
  1012. MMC_SetByteCount(mmc_obj, 0);
  1013. return;
  1014. }
  1015. TRACE_DEBUG("%s, start\n", __func__);
  1016. if(MMC_ResetFifo(mmc_obj))
  1017. {
  1018. return;
  1019. }
  1020. data_size = data->blks * data->blksize;
  1021. MMC_SetBlockSize(mmc_obj, data->blksize);
  1022. data->bytes_xfered = 0;
  1023. if(data_size % 4)
  1024. {
  1025. printf("ERROR: data_size should be a multiple of 4, but now is %d\n", data_size);
  1026. }
  1027. MMC_SetByteCount(mmc_obj, data_size);
  1028. TRACE_DEBUG("%s, set blk size: 0x%x, byte count: 0x%x\n", __func__, data->blksize, data_size);
  1029. if (dw_mci_submit_data_dma(mmc_obj, data)) {
  1030. writel(SDMMC_INT_TXDR | SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS);
  1031. reg = readl(mmc_obj->base + SDMMC_INTMASK);
  1032. reg |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
  1033. writel(reg, mmc_obj->base + SDMMC_INTMASK);
  1034. reg = readl(mmc_obj->base + SDMMC_CTRL);
  1035. reg &= ~SDMMC_CTRL_DMA_ENABLE;
  1036. writel(reg, mmc_obj->base + SDMMC_CTRL);
  1037. } else {
  1038. mmc_obj->prev_blksz = data->blksize;
  1039. }
  1040. TRACE_DEBUG("%s, end\n", __func__);
  1041. }
  1042. int ark_mmc_wait_card_idle(struct ark_mmc_obj *mmc_obj)
  1043. {
  1044. uint32_t tick, timeout;
  1045. tick = xTaskGetTickCount();
  1046. timeout = tick + configTICK_RATE_HZ / 2; //500ms
  1047. while(MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY)
  1048. {
  1049. tick = xTaskGetTickCount();
  1050. if(tick > timeout)
  1051. {
  1052. return -1;
  1053. }
  1054. }
  1055. return 0;
  1056. }
  1057. static int ark_mmc_get_response(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd)
  1058. {
  1059. int i;
  1060. uint32_t tick, timeout, status;
  1061. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1062. cmd->resp[0] = 0;
  1063. cmd->resp[1] = 0;
  1064. cmd->resp[2] = 0;
  1065. cmd->resp[3] = 0;
  1066. tick = xTaskGetTickCount();
  1067. timeout = tick + configTICK_RATE_HZ / 2; //500ms
  1068. //fixme: spin_lock_irqsave?
  1069. do
  1070. {
  1071. status = MMC_GetRawInterrupt(mmc_obj);
  1072. tick = xTaskGetTickCount();
  1073. if(tick > timeout)
  1074. {
  1075. TRACE_DEBUG("ERROR: %s, get response timeout(cmd is not received by card), RINTSTS: 0x%x, cmd: %d\n", __func__, status, cmd->cmd_code);
  1076. return -1;
  1077. }
  1078. }
  1079. while(!(status & SDMMC_INT_CMD_DONE));
  1080. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE);
  1081. for (i = 0; i < 4; i++)
  1082. {
  1083. if (resp_type(cmd) == RESP_R2)
  1084. {
  1085. cmd->resp[i] = MMC_GetResponse(mmc_obj, 3 - i);
  1086. //fixme : R2 must delay some time here ,when use UHI card, need check why
  1087. //1ms
  1088. //vTaskDelay(configTICK_RATE_HZ / 100);
  1089. }
  1090. else
  1091. {
  1092. cmd->resp[i] = MMC_GetResponse(mmc_obj, i);
  1093. }
  1094. }
  1095. TRACE_DEBUG("resp: 0x%x, 0x%x, 0x%x, 0x%x\n", cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  1096. if (status & SDMMC_INT_RTO)
  1097. {
  1098. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RTO);
  1099. TRACE_DEBUG("ERROR: %s, get response timeout, RINTSTS: 0x%x\n", __func__, status);
  1100. return -1;
  1101. }
  1102. else if (status & (SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR))
  1103. {
  1104. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR);
  1105. printf("ERROR: %s, response error or response crc error, RINTSTS: 0x%x\n", __func__, status);
  1106. return -1;
  1107. }
  1108. return 0;
  1109. }
  1110. static int ark_mmc_start_transfer(struct mmc_driver *mmc_drv)
  1111. {
  1112. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1113. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1114. struct mmcsd_data *data = NULL;
  1115. int ret;
  1116. uint32_t interrupt, status, reg;
  1117. uint32_t timeout;
  1118. if(cmd)
  1119. data = cmd->data;
  1120. if(!data)
  1121. {
  1122. return 0;
  1123. }
  1124. TRACE_DEBUG("%s, start\n", __func__);
  1125. //fixme: spin_lock_irqsave(&host->lock, flags);
  1126. if (!mmc_obj->using_dma) {
  1127. //fifo mode open data interrupts
  1128. reg = MMC_GetInterruptMask(mmc_obj);
  1129. reg |= SDMMC_INT_STATUS_DATA;
  1130. MMC_SetInterruptMask(mmc_obj, reg);
  1131. }
  1132. //fixme: spin_unlock_irqrestore(&host->lock, flags);
  1133. timeout = configTICK_RATE_HZ + pdMS_TO_TICKS(data->blks * data->blksize * 100/ 1024); // Minimum 10KB per second
  1134. ret = xQueueReceive(mmc_obj->transfer_completion, NULL, timeout);
  1135. if (mmc_obj->using_dma) {
  1136. if (mmc_obj->dummy_buffer_used) {
  1137. if (data->flags & DATA_DIR_WRITE) {
  1138. if (mmc_obj->tx_dummy_buffer) {
  1139. vPortFree(mmc_obj->tx_dummy_buffer);
  1140. mmc_obj->tx_dummy_buffer = NULL;
  1141. }
  1142. } else if (data->flags & DATA_DIR_READ) {
  1143. if (mmc_obj->rx_dummy_buffer) {
  1144. memcpy(data->buf, mmc_obj->rx_dummy_buffer, data->blks * data->blksize);
  1145. vPortFree(mmc_obj->rx_dummy_buffer);
  1146. mmc_obj->rx_dummy_buffer = NULL;
  1147. }
  1148. }
  1149. }
  1150. } else {
  1151. reg = MMC_GetInterruptMask(mmc_obj);
  1152. reg &= ~SDMMC_INT_STATUS_DATA;
  1153. MMC_SetInterruptMask(mmc_obj, reg);
  1154. }
  1155. if(ret != pdTRUE || mmc_obj->result)
  1156. {
  1157. //fixme: error handle
  1158. if (mmc_obj->using_dma)
  1159. dw_mci_stop_dma(mmc_obj);
  1160. cmd->err = ret;
  1161. interrupt = MMC_GetRawInterrupt(mmc_obj);
  1162. status = MMC_GetStatus(mmc_obj);
  1163. printf("ERROR: %s, transfer timeout, ret: %d, RINTSTS: 0x%x, STATUS: 0x%x\n", __func__, ret, interrupt, status);
  1164. return -1;
  1165. }
  1166. data->bytes_xfered = data->blks * data->blksize;
  1167. return 0;
  1168. }
  1169. static void ark_mmc_complete_request(struct mmc_driver *mmc_drv)
  1170. {
  1171. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1172. mmc_drv->cmd = NULL;
  1173. mmc_drv->req = NULL;
  1174. mmc_drv->data = NULL;
  1175. MMC_SetBlockSize(mmc_obj, 0);
  1176. MMC_SetByteCount(mmc_obj, 0);
  1177. mmcsd_req_complete(mmc_drv->host);
  1178. }
  1179. static void ark_mmc_request(struct mmcsd_host *host, struct mmcsd_req *req)
  1180. {
  1181. int ret;
  1182. struct mmc_driver *mmc_drv = host->private_data;
  1183. struct mmcsd_cmd *cmd = req->cmd;
  1184. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1185. TRACE_DEBUG("%s start\n", __func__);
  1186. mmc_drv->req = req;
  1187. mmc_drv->cmd = cmd;
  1188. if (mmc_obj->transfer_completion == NULL)
  1189. mmc_obj->transfer_completion = xQueueCreate(1, 0);
  1190. else
  1191. xQueueReset(mmc_obj->transfer_completion);
  1192. ret = ark_mmc_wait_card_idle(mmc_obj);
  1193. if (ret)
  1194. {
  1195. printf("ERROR: %s, data transfer timeout, status: 0x%x\r\n", __func__, MMC_GetStatus(mmc_obj));
  1196. if (MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY)
  1197. goto out;
  1198. }
  1199. mmc_obj->result = 0;
  1200. ark_mmc_perpare_data(mmc_drv);
  1201. ark_mmc_send_command(mmc_drv, cmd);
  1202. if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1)
  1203. vTaskDelay(pdMS_TO_TICKS(1));
  1204. ret = ark_mmc_get_response(mmc_drv, cmd);
  1205. if(ret)
  1206. {
  1207. cmd->err = ret;
  1208. printf("%s,get response returns %d, cmd: %d\r\n", __func__, ret, cmd->cmd_code);
  1209. goto out;
  1210. }
  1211. ark_mmc_start_transfer(mmc_drv);
  1212. if(req->stop)
  1213. {
  1214. /* send stop command */
  1215. TRACE_DEBUG("%s send stop\n", __func__);
  1216. ark_mmc_send_command(mmc_drv, req->stop);
  1217. }
  1218. out:
  1219. ark_mmc_complete_request(mmc_drv);
  1220. TRACE_DEBUG("%s end\n", __func__);
  1221. }
  1222. static const struct mmcsd_host_ops ark_mmc_ops =
  1223. {
  1224. .request = ark_mmc_request,
  1225. .set_iocfg = ark_mmc_set_iocfg,
  1226. .enable_sdio_irq = ark_mmc_enable_sdio_irq,
  1227. .get_card_status = ark_mmc_get_card_status,
  1228. };
  1229. static void ark_mmc_interrupt(void *param)
  1230. {
  1231. struct mmc_driver *mmc_drv = (struct mmc_driver *)param;
  1232. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1233. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1234. struct mmcsd_data *data = NULL;
  1235. uint32_t status;
  1236. if (cmd && cmd->data)
  1237. {
  1238. data = cmd->data;
  1239. }
  1240. status = MMC_GetUnmaskedInterrupt(mmc_obj);
  1241. TRACE_DEBUG("unmasked interrupts: 0x%x\n", status);
  1242. if (status & SDMMC_CMD_ERROR_FLAGS) {
  1243. MMC_ClearRawInterrupt(mmc_obj, SDMMC_CMD_ERROR_FLAGS);
  1244. mmc_obj->result = -1;
  1245. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1246. }
  1247. if (status & SDMMC_DATA_ERROR_FLAGS) {
  1248. /* if there is an error report DATA_ERROR */
  1249. MMC_ClearRawInterrupt(mmc_obj, SDMMC_DATA_ERROR_FLAGS);
  1250. mmc_obj->result = -1;
  1251. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1252. }
  1253. if (status & SDMMC_INT_DATA_OVER) {
  1254. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_DATA_OVER);
  1255. if (data && data->flags & DATA_DIR_READ) {
  1256. if (!mmc_obj->using_dma && data->bytes_xfered != data->blks * data->blksize)
  1257. ark_mmc_read_pio(mmc_drv, 1);
  1258. }
  1259. if (!mmc_obj->using_dma)
  1260. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1261. }
  1262. if (status & SDMMC_INT_RXDR) {
  1263. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR);
  1264. if (data && data->flags & DATA_DIR_READ)
  1265. ark_mmc_read_pio(mmc_drv, 0);
  1266. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR);
  1267. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_HTO);
  1268. }
  1269. if (status & SDMMC_INT_TXDR) {
  1270. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_TXDR);
  1271. if (data && data->flags & DATA_DIR_WRITE)
  1272. ark_mmc_write_pio(mmc_drv);
  1273. }
  1274. if (status & SDMMC_INT_CMD_DONE) {
  1275. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE);
  1276. }
  1277. if (status & SDMMC_INT_CD) {
  1278. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CD);
  1279. mmcsd_change_from_isr(mmc_drv->host);
  1280. }
  1281. if (status & SDMMC_INT_SDIO) {
  1282. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO);
  1283. sdio_irq_wakeup_isr(mmc_drv->host);
  1284. }
  1285. if (mmc_obj->use_dma == TRANS_MODE_IDMAC){
  1286. /* Handle IDMA interrupts */
  1287. if (mmc_obj->dma_64bit_address == 1) {
  1288. status = readl(mmc_obj->base + SDMMC_IDSTS64);
  1289. if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))) {
  1290. writel(SDMMC_IDMAC_INT_TI|SDMMC_IDMAC_INT_RI, mmc_obj->base + SDMMC_IDSTS64);
  1291. writel(SDMMC_IDMAC_INT_NI,mmc_obj->base + SDMMC_IDSTS64);
  1292. // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  1293. // host->dma_ops->complete((void *)host);
  1294. }
  1295. } else {
  1296. status = readl(mmc_obj->base+SDMMC_IDSTS);
  1297. if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))){
  1298. writel(SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI,mmc_obj->base+SDMMC_IDSTS);
  1299. writel(SDMMC_IDMAC_INT_NI,mmc_obj->base+SDMMC_IDSTS);
  1300. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1301. // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  1302. // mmc_obj->dma_ops->complete((void *)host);
  1303. }
  1304. }
  1305. }
  1306. // xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1307. }
  1308. void ark_mmc_reset(struct ark_mmc_obj *mmc_obj)
  1309. {
  1310. sys_soft_reset(mmc_obj->softreset_id);
  1311. }
  1312. static struct ark_mmc_obj mmc0_obj =
  1313. {
  1314. .id = 0,
  1315. .irq = SDMMC0_IRQn,
  1316. .base = REGS_SDMMC0_BASE,
  1317. .mmc_reset = ark_mmc_reset,
  1318. .softreset_id = softreset_sdmmc,
  1319. .clk_id = CLK_SDMMC0,
  1320. };
  1321. static struct ark_mmc_obj mmc1_obj =
  1322. {
  1323. .id = 1,
  1324. .irq = SDMMC1_IRQn,
  1325. .base = REGS_SDMMC1_BASE,
  1326. .mmc_reset = ark_mmc_reset,
  1327. .softreset_id = softreset_sdmmc1,
  1328. .clk_id = CLK_SDMMC1,
  1329. };
  1330. int ark_mmc_probe(struct ark_mmc_obj *mmc_obj)
  1331. {
  1332. struct mmc_driver *mmc_drv;
  1333. struct mmcsd_host *host;
  1334. TRACE_DEBUG("%s start\n", __func__);
  1335. mmc_drv = (struct mmc_driver*)pvPortMalloc(sizeof(struct mmc_driver));
  1336. memset(mmc_drv, 0, sizeof(struct mmc_driver));
  1337. mmc_drv->priv = mmc_obj;
  1338. host = mmcsd_alloc_host();
  1339. if (!host)
  1340. {
  1341. printf("ERROR: %s, failed to malloc host\n", __func__);
  1342. return -ENOMEM;
  1343. }
  1344. host->ops = &ark_mmc_ops;
  1345. host->freq_min = MMC_FEQ_MIN;
  1346. host->freq_max = MMC_FEQ_MAX;
  1347. host->valid_ocr = VDD_32_33 | VDD_33_34;
  1348. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_BUSWIDTH_4;
  1349. host->max_blk_size = 512;
  1350. //fixme: max_blk_count?
  1351. host->max_blk_count = 2048;
  1352. host->private_data = mmc_drv;
  1353. mmc_drv->host = host;
  1354. MMC_Init(mmc_obj);
  1355. if (mmc_obj->use_dma == TRANS_MODE_IDMAC)
  1356. {
  1357. host->max_segs = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);//host->ring_size;
  1358. host->max_blk_size = 65535;
  1359. host->max_seg_size = 0x1000;
  1360. host->max_req_size = host->max_seg_size * host->max_segs;
  1361. host->max_blk_count = host->max_req_size / 512;
  1362. }
  1363. request_irq(mmc_obj->irq, 0, ark_mmc_interrupt, mmc_drv);
  1364. if (mmcsd_dev_is_sdio_card(mmc_obj->id) == 1) {
  1365. ark_mmc_enable_sdio_irq(host, 1);
  1366. }
  1367. if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) {
  1368. if (ark_mmc_get_card_status(host))
  1369. mmcsd_change(host);
  1370. } else {
  1371. mmcsd_change(host);
  1372. }
  1373. TRACE_DEBUG("%s end\n", __func__);
  1374. return 0;
  1375. }
  1376. int mmc_init(void)
  1377. {
  1378. #ifdef SDMMC0_SUPPORT
  1379. sema_take(SEMA_GATE_SDMMC0, portMAX_DELAY);
  1380. ark_mmc_probe(&mmc0_obj);
  1381. sema_give(SEMA_GATE_SDMMC0);
  1382. #endif
  1383. #ifdef SDMMC1_SUPPORT
  1384. ark_mmc_probe(&mmc1_obj);
  1385. #endif
  1386. return 0;
  1387. }
  1388. #endif