sdmmc.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. #include "FreeRTOS.h"
  2. #include "chip.h"
  3. #include "board.h"
  4. #include "mmc.h"
  5. #include "sdio.h"
  6. #include "sdmmc.h"
  7. #include "mmcsd_core.h"
  8. #include "os_adapt.h"
  9. #include "sema.h"
  10. #ifdef SDMMC_SUPPORT
  11. #define DW_MCI_DESC_DATA_LENGTH 0x1000
  12. struct idmac_desc {
  13. __le32 des0; /* Control Descriptor */
  14. #define IDMAC_DES0_DIC BIT(1)
  15. #define IDMAC_DES0_LD BIT(2)
  16. #define IDMAC_DES0_FD BIT(3)
  17. #define IDMAC_DES0_CH BIT(4)
  18. #define IDMAC_DES0_ER BIT(5)
  19. #define IDMAC_DES0_CES BIT(30)
  20. #define IDMAC_DES0_OWN BIT(31)
  21. __le32 des1; /* Buffer sizes */
  22. #define IDMAC_SET_BUFFER1_SIZE(d, s) \
  23. ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
  24. __le32 des2; /* buffer 1 physical address */
  25. __le32 des3; /* buffer 2 physical address */
  26. };
  27. static inline uint32_t MMC_GetWaterlevel(struct ark_mmc_obj *mmc_obj)
  28. {
  29. return (readl(mmc_obj->base + SDMMC_STATUS) >> 17) & 0x1fff;
  30. }
  31. static inline uint32_t MMC_GetStatus(struct ark_mmc_obj *mmc_obj)
  32. {
  33. return readl(mmc_obj->base + SDMMC_STATUS);
  34. }
  35. static inline uint32_t MMC_GetRawInterrupt(struct ark_mmc_obj *mmc_obj)
  36. {
  37. return readl(mmc_obj->base + SDMMC_RINTSTS);
  38. }
  39. static inline uint32_t MMC_GetUnmaskedInterrupt(struct ark_mmc_obj *mmc_obj)
  40. {
  41. return readl(mmc_obj->base + SDMMC_MINTSTS);
  42. }
  43. static inline uint32_t MMC_ClearRawInterrupt(struct ark_mmc_obj *mmc_obj, uint32_t interrupts)
  44. {
  45. return writel(interrupts, mmc_obj->base + SDMMC_RINTSTS);
  46. }
  47. static inline uint32_t MMC_GetInterruptMask(struct ark_mmc_obj *mmc_obj)
  48. {
  49. return readl(mmc_obj->base + SDMMC_INTMASK);
  50. }
  51. static inline uint32_t MMC_SetInterruptMask(struct ark_mmc_obj *mmc_obj, uint32_t mask)
  52. {
  53. return writel(mask, mmc_obj->base + SDMMC_INTMASK);
  54. }
  55. static inline void MMC_SetByteCount(struct ark_mmc_obj *mmc_obj, uint32_t bytes)
  56. {
  57. writel(bytes, mmc_obj->base + SDMMC_BYTCNT);
  58. }
  59. static inline void MMC_SetBlockSize(struct ark_mmc_obj *mmc_obj, uint32_t size)
  60. {
  61. writel(size, mmc_obj->base + SDMMC_BLKSIZ);
  62. }
  63. static inline uint32_t MMC_GetResponse(struct ark_mmc_obj *mmc_obj, int resp_num)
  64. {
  65. return readl(mmc_obj->base + SDMMC_RESP0 + resp_num * 4);
  66. }
  67. static inline uint32_t MMC_IsFifoEmpty(struct ark_mmc_obj *mmc_obj)
  68. {
  69. return (readl(mmc_obj->base + SDMMC_STATUS) >> 2) & 0x1;
  70. }
  71. /*
  72. static inline uint32_t MMC_IsDataStateBusy(struct ark_mmc_obj *mmc_obj)
  73. {
  74. return (readl(mmc_obj->base + SDMMC_STATUS) >> 10) & 0x1;
  75. }
  76. */
  77. int MMC_UpdateClockRegister(struct ark_mmc_obj *mmc_obj, int div)
  78. {
  79. uint32_t tick, timeout;
  80. int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  81. tick = xTaskGetTickCount();
  82. timeout = tick + configTICK_RATE_HZ / 10; //100ms in total
  83. /* disable clock */
  84. writel(0, mmc_obj->base + SDMMC_CLKENA);
  85. writel(0, mmc_obj->base + SDMMC_CLKSRC);
  86. /* inform CIU */
  87. writel(0, mmc_obj->base + SDMMC_CMDARG);
  88. wmb(); /* drain writebuffer */
  89. writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD);
  90. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  91. {
  92. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  93. {
  94. printf("ERROR: %s, CARD out\n", __func__);
  95. return -1;
  96. }
  97. tick = xTaskGetTickCount();
  98. if(tick > timeout)
  99. {
  100. printf("ERROR: %s, update clock timeout\n", __func__);
  101. return -1;
  102. }
  103. }
  104. /* set clock to desired speed */
  105. writel(div, mmc_obj->base + SDMMC_CLKDIV);
  106. /* inform CIU */
  107. writel(0, mmc_obj->base + SDMMC_CMDARG);
  108. wmb(); /* drain writebuffer */
  109. writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD);
  110. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  111. {
  112. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  113. {
  114. printf("ERROR: %s, CARD out\n", __func__);
  115. return -1;
  116. }
  117. tick = xTaskGetTickCount();
  118. if(tick > timeout)
  119. {
  120. TRACE_DEBUG("ERROR: %s, update clock timeout\n", __func__);
  121. return -1;
  122. }
  123. }
  124. /* enable clock */
  125. writel(0x10001, mmc_obj->base + SDMMC_CLKENA); //low power
  126. /* inform CIU */
  127. writel(1<<31 | 1<<21, mmc_obj->base + SDMMC_CMD);
  128. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  129. {
  130. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  131. {
  132. printf("ERROR: %s, CARD out\n", __func__);
  133. return -1;
  134. }
  135. tick = xTaskGetTickCount();
  136. if(tick > timeout)
  137. {
  138. printf("ERROR: %s, update clock timeout\n", __func__);
  139. return -1;
  140. }
  141. }
  142. return 0;
  143. }
  144. int MMC_SetCardWidth(struct ark_mmc_obj *mmc_obj, int width)
  145. {
  146. switch(width)
  147. {
  148. case SDMMC_CTYPE_1BIT:
  149. writel(0, mmc_obj->base + SDMMC_CTYPE);
  150. break;
  151. case SDMMC_CTYPE_4BIT:
  152. writel(1, mmc_obj->base + SDMMC_CTYPE);
  153. break;
  154. default:
  155. printf("ERROR: %s, card width %d is not supported\n", __func__, width);
  156. return -1;
  157. break;
  158. }
  159. return 0;
  160. }
  161. int MMC_SendCommand(struct ark_mmc_obj *mmc_obj, uint32_t cmd, uint32_t arg, uint32_t flags)
  162. {
  163. uint32_t tick, timeout;
  164. tick = xTaskGetTickCount();
  165. timeout = tick + configTICK_RATE_HZ; //1s
  166. writel(arg, mmc_obj->base + SDMMC_CMDARG);
  167. flags |= 1<<31 | 1<<29 | cmd;
  168. writel(flags, mmc_obj->base + SDMMC_CMD);
  169. while(readl(mmc_obj->base + SDMMC_CMD) & SDMMC_CMD_START)
  170. {
  171. if((mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  172. {
  173. printf("ERROR: %s,SDMMC_CMD BackWard Read , Card Out\n", __func__);
  174. return -1;
  175. }
  176. tick = xTaskGetTickCount();
  177. if(tick > timeout)
  178. {
  179. printf("ERROR: %s, send cmd timeout\n", __func__);
  180. return -1;
  181. }
  182. }
  183. //fixme: check HLE_INT_STATUS
  184. return 0;
  185. }
  186. int MMC_ResetFifo(struct ark_mmc_obj *mmc_obj)
  187. {
  188. uint32_t reg, tick, timeout;
  189. tick = xTaskGetTickCount();
  190. timeout = tick + configTICK_RATE_HZ / 10; //100ms
  191. reg = readl(mmc_obj->base + SDMMC_CTRL);
  192. reg |= 1 << 1;
  193. writel(reg, mmc_obj->base + SDMMC_CTRL);
  194. //wait until fifo reset finish
  195. while(readl(mmc_obj->base + SDMMC_CTRL) & SDMMC_CTRL_FIFO_RESET)
  196. {
  197. tick = xTaskGetTickCount();
  198. if(tick > timeout)
  199. {
  200. printf("ERROR: %s, FIFO reset timeout\n", __func__);
  201. return -1;
  202. }
  203. }
  204. return 0;
  205. }
  206. int MMC_Reset(struct ark_mmc_obj *mmc_obj)
  207. {
  208. uint32_t reg, tick, timeout;
  209. // tick = xTaskGetTickCount();
  210. // timeout = tick + configTICK_RATE_HZ / 10; //100ms
  211. reg = readl(mmc_obj->base + SDMMC_CTRL);
  212. reg |= SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET;
  213. writel(reg, mmc_obj->base + SDMMC_CTRL);
  214. tick = xTaskGetTickCount();
  215. timeout = tick + configTICK_RATE_HZ / 10; //100ms
  216. while(readl(mmc_obj->base + SDMMC_CTRL) & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))
  217. {
  218. tick = xTaskGetTickCount();
  219. if(tick > timeout)
  220. {
  221. printf("ERROR: %s, CTRL dma|fifo|ctrl reset timeout\n", __func__);
  222. return -1;
  223. }
  224. }
  225. return 0;
  226. }
  227. #define DW_MCI_DMA_THRESHOLD 32
  228. /* DMA interface functions */
  229. static void dw_mci_stop_dma(struct ark_mmc_obj *mmc_obj)
  230. {
  231. if (mmc_obj->using_dma) {
  232. mmc_obj->dma_ops->stop(mmc_obj);
  233. mmc_obj->dma_ops->cleanup(mmc_obj);
  234. }
  235. /* Data transfer was stopped by the interrupt handler */
  236. //set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  237. }
  238. static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj);
  239. static void dw_mci_dmac_complete_callback(void *param, unsigned int mask)
  240. {
  241. struct ark_mmc_obj *mmc_obj = param;
  242. //struct mmcsd_data *data = mmc_obj->data;
  243. dev_vdbg(mmc_obj->dev, "DMA complete\n");
  244. mmc_obj->dma_ops->cleanup(mmc_obj);
  245. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  246. /*
  247. * If the card was removed, data will be NULL. No point in trying to
  248. * send the stop command or waiting for NBUSY in this case.
  249. */
  250. /* if (data) {
  251. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  252. tasklet_schedule(&host->tasklet);
  253. } */
  254. }
  255. static UINT32 WaiteforDmaResetClear(UINT32 addr, UINT32 __delay_us, UINT32 timeout_us)
  256. {
  257. u64 __timeout_us = (timeout_us);
  258. UINT32 tmp,val;
  259. while(__timeout_us){
  260. val = readl(addr);
  261. tmp = (val>>2)& 0x1 ;
  262. if(tmp == 0)
  263. return 0;
  264. udelay(__delay_us);
  265. }
  266. return 1;
  267. }
  268. static UINT32 WaiteforDmaOwnClear(u32 addr, UINT32 __delay_us, UINT32 timeout_us)
  269. {
  270. u64 __timeout_us = (timeout_us);
  271. UINT32 tmp,val;
  272. while(__timeout_us){
  273. val = readl(addr);
  274. tmp = (val>>31)& 0x1 ;
  275. if(tmp == 0)
  276. return 0;
  277. udelay(__delay_us);
  278. __timeout_us-- ;
  279. }
  280. return 1;
  281. }
  282. static bool dw_mci_ctrl_reset(struct ark_mmc_obj *mmc_obj, u32 reset)
  283. {
  284. u32 ctrl;
  285. ctrl = readl(mmc_obj->base + SDMMC_CTRL);
  286. ctrl |= reset;
  287. writel( ctrl, mmc_obj->base + SDMMC_CTRL);
  288. /* wait till resets clear */
  289. if (WaiteforDmaResetClear(mmc_obj->base + SDMMC_CTRL,1, 500 * USEC_PER_MSEC)) {
  290. printf("Timeout resetting block (ctrl reset %#x)\n",
  291. readl(mmc_obj->base + SDMMC_CTRL));
  292. return false;
  293. }
  294. return true;
  295. }
  296. static void dw_mci_idmac_reset(struct ark_mmc_obj *mmc_obj)
  297. {
  298. u32 bmod = readl(mmc_obj->base + SDMMC_BMOD);
  299. /* Software reset of DMA */
  300. bmod |= SDMMC_IDMAC_SWRESET;
  301. writel(bmod, mmc_obj->base + SDMMC_BMOD);
  302. }
  303. static int dw_mci_idmac_init(struct ark_mmc_obj *mmc_obj)
  304. {
  305. int i;
  306. unsigned int ring_size =0;
  307. if (mmc_obj->dma_64bit_address == 1) {
  308. #if 0
  309. struct idmac_desc_64addr *p;
  310. /* Number of descriptors in the ring buffer */
  311. ring_size =
  312. DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
  313. /* Forward link the descriptor list */
  314. for (i = 0, p = mmc_obj->sg_cpu; i < ring_size - 1;
  315. i++, p++) {
  316. p->des6 = (mmc_obj->sg_dma +
  317. (sizeof(struct idmac_desc_64addr) *
  318. (i + 1))) & 0xffffffff;
  319. p->des7 = (u64)(mmc_obj->sg_dma +
  320. (sizeof(struct idmac_desc_64addr) *
  321. (i + 1))) >> 32;
  322. /* Initialize reserved and buffer size fields to "0" */
  323. p->des0 = 0;
  324. p->des1 = 0;
  325. p->des2 = 0;
  326. p->des3 = 0;
  327. }
  328. /* Set the last descriptor as the end-of-ring descriptor */
  329. p->des6 = mmc_obj->sg_dma & 0xffffffff;
  330. p->des7 = (u64)mmc_obj->sg_dma >> 32;
  331. p->des0 = IDMAC_DES0_ER;
  332. #endif
  333. } else {
  334. struct idmac_desc *p;
  335. /* Number of descriptors in the ring buffer */
  336. ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
  337. /* Forward link the descriptor list */
  338. for (i = 0, p = mmc_obj->sg_cpu;
  339. i < ring_size - 1;
  340. i++, p++) {
  341. p->des3 = cpu_to_le32(mmc_obj->sg_dma +
  342. (sizeof(struct idmac_desc) * (i + 1)));
  343. p->des0 = 0;
  344. p->des1 = 0;
  345. }
  346. /* Set the last descriptor as the end-of-ring descriptor */
  347. p->des3 = cpu_to_le32(mmc_obj->sg_dma);
  348. p->des0 = cpu_to_le32(IDMAC_DES0_ER);
  349. }
  350. dw_mci_idmac_reset(mmc_obj);
  351. if (mmc_obj->dma_64bit_address == 1) {
  352. /* Mask out interrupts - get Tx & Rx complete only */
  353. writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS64);
  354. writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI,mmc_obj->base + SDMMC_IDINTEN64);
  355. /* Set the descriptor base address */
  356. writel(mmc_obj->sg_dma & 0xffffffff, mmc_obj->base + SDMMC_DBADDRL);
  357. writel((u64)mmc_obj->sg_dma >> 32, mmc_obj->base + SDMMC_DBADDRU);
  358. } else {
  359. /* Mask out interrupts - get Tx & Rx complete only */
  360. writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS);
  361. writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI, mmc_obj->base + SDMMC_IDINTEN);
  362. /* Set the descriptor base address */
  363. writel(mmc_obj->sg_dma, mmc_obj->base + SDMMC_DBADDR);
  364. }
  365. return 0;
  366. }
  367. static inline int dw_mci_prepare_desc32(struct ark_mmc_obj *mmc_obj,
  368. struct mmcsd_data *data)
  369. {
  370. unsigned int desc_len;
  371. struct idmac_desc *desc_first, *desc_last, *desc;
  372. u32 dma_address;
  373. int i;
  374. unsigned int sg_len;
  375. u32 mem_addr;
  376. unsigned int length;
  377. unsigned int leftsize;
  378. dma_address = (u32)data->buf;
  379. if (dma_address & (ARCH_DMA_MINALIGN - 1)) {
  380. if (data->flags & DATA_DIR_WRITE) {
  381. mmc_obj->tx_dummy_buffer = pvPortMalloc(data->blks * data->blksize);
  382. if (!mmc_obj->tx_dummy_buffer)
  383. return -ENOMEM;
  384. memcpy(mmc_obj->tx_dummy_buffer, data->buf, data->blks * data->blksize);
  385. dma_address = (u32)mmc_obj->tx_dummy_buffer;
  386. } else if (data->flags & DATA_DIR_READ) {
  387. mmc_obj->rx_dummy_buffer = pvPortMalloc(data->blks * data->blksize);
  388. if (!mmc_obj->rx_dummy_buffer)
  389. return -ENOMEM;
  390. dma_address = (u32)mmc_obj->rx_dummy_buffer;
  391. }
  392. mmc_obj->dummy_buffer_used = 1;
  393. } else {
  394. mmc_obj->dummy_buffer_used = 0;
  395. }
  396. desc_first = desc_last = desc = mmc_obj->sg_cpu;
  397. leftsize = data->blks * data->blksize;
  398. //sg_len = data->blks * data->blksize;
  399. if(data->blks * data->blksize < DW_MCI_DESC_DATA_LENGTH){
  400. sg_len = 1;
  401. length = data->blks * data->blksize;
  402. }
  403. else{
  404. sg_len = (data->blks * data->blksize % DW_MCI_DESC_DATA_LENGTH)?
  405. ((data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH) + 1):
  406. data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH;
  407. length = DW_MCI_DESC_DATA_LENGTH;
  408. }
  409. mem_addr = VIRT_TO_PHY(dma_address);//PHY_TO_UNCACHED_VIRT(dma_address);//dma_address;//(u32)mmc_obj->sg_dma;//sg_dma_address(&data->sg[i]);
  410. //data->blks * data->blksize;//data->blksize;//0x200;//sg_dma_len(&data->sg[i]);
  411. for(i = 0; i < sg_len; i++){
  412. if(sg_len > 1){
  413. length = (leftsize>= DW_MCI_DESC_DATA_LENGTH) ?
  414. DW_MCI_DESC_DATA_LENGTH : leftsize;
  415. if( length == 0)
  416. break;
  417. }
  418. for ( ; length ; desc++) {
  419. desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
  420. length : DW_MCI_DESC_DATA_LENGTH;
  421. length -= desc_len;
  422. leftsize -= desc_len;
  423. /*
  424. * Wait for the former clear OWN bit operation
  425. * of IDMAC to make sure that this descriptor
  426. * isn't still owned by IDMAC as IDMAC's write
  427. * ops and CPU's read ops are asynchronous.
  428. */
  429. if(WaiteforDmaOwnClear((UINT32)(&desc->des0),10,
  430. 10))
  431. {
  432. printf(">>>>>IDMA OWN TIME OUT!!!\n");
  433. goto err_own_bit;
  434. }
  435. /*
  436. * Set the OWN bit and disable interrupts
  437. * for this descriptor
  438. */
  439. desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
  440. IDMAC_DES0_DIC |
  441. IDMAC_DES0_CH);
  442. /* Buffer length */
  443. IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
  444. /* Physical address to DMA to/from */
  445. desc->des2 = cpu_to_le32(mem_addr);
  446. /* Update physical address for the next desc */
  447. mem_addr += desc_len;
  448. /* Save pointer to the last descriptor */
  449. desc_last = desc;
  450. }
  451. }
  452. /* Set first descriptor */
  453. desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
  454. /* Set last descriptor */
  455. desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
  456. IDMAC_DES0_DIC));
  457. desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
  458. mmc_obj->data = data;
  459. return 0;
  460. err_own_bit:
  461. /* restore the descriptor chain as it's polluted */
  462. dev_dbg("descriptor is still owned by IDMAC.\n");
  463. memset(mmc_obj->sg_cpu, 0, DESC_RING_BUF_SZ);
  464. dw_mci_idmac_init(mmc_obj);
  465. return -EINVAL;
  466. }
  467. static int dw_mci_idmac_start_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  468. {
  469. u32 temp;
  470. int ret = 0;
  471. #if 1
  472. if (mmc_obj->dma_64bit_address == 1)
  473. ;//ret = dw_mci_prepare_desc64(host, mmc_obj->data, sg_len);
  474. else
  475. ret = dw_mci_prepare_desc32(mmc_obj, data);
  476. if (ret)
  477. goto out;
  478. #endif
  479. /* drain writebuffer */
  480. wmb();
  481. /* Make sure to reset DMA in case we did PIO before this */
  482. dw_mci_ctrl_reset(mmc_obj, SDMMC_CTRL_DMA_RESET);
  483. dw_mci_idmac_reset(mmc_obj);
  484. /* Select IDMAC interface */
  485. temp = readl( mmc_obj->base + SDMMC_CTRL);
  486. temp |= SDMMC_CTRL_USE_IDMAC;
  487. writel(temp, mmc_obj->base + SDMMC_CTRL);
  488. /* drain writebuffer */
  489. wmb();
  490. /* Enable the IDMAC */
  491. temp = readl(mmc_obj->base + SDMMC_BMOD);
  492. temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
  493. writel(temp, mmc_obj->base + SDMMC_BMOD);
  494. #if 0
  495. /* Flush cache before write */
  496. if (data->flags & DATA_DIR_WRITE)
  497. {
  498. CP15_clean_dcache_for_dma((u32)data->buf,
  499. (u32)data->buf + data->blks * data->blksize);
  500. //printf(">>>>>>>>Write clean cache>>>>>>\n");
  501. }
  502. /* Invalidate cache before read */
  503. else if (data->flags & DATA_DIR_READ)
  504. {
  505. CP15_flush_dcache_for_dma((u32)data->buf,
  506. (u32)data->buf + data->blks * data->blksize);
  507. //printf(">>>>>>>>Read flush cache>>>>>>\n");
  508. }
  509. #else
  510. if (mmc_obj->using_dma) {
  511. if (mmc_obj->dummy_buffer_used) {
  512. if (data->flags & DATA_DIR_WRITE) {
  513. if (mmc_obj->tx_dummy_buffer)
  514. CP15_clean_dcache_for_dma((u32)mmc_obj->tx_dummy_buffer,
  515. (u32)mmc_obj->tx_dummy_buffer + data->blks * data->blksize);
  516. } else if (data->flags & DATA_DIR_READ) {
  517. if (mmc_obj->rx_dummy_buffer)
  518. CP15_flush_dcache_for_dma((u32)mmc_obj->rx_dummy_buffer,
  519. (u32)mmc_obj->rx_dummy_buffer + data->blks * data->blksize);
  520. }
  521. } else {
  522. /* Flush cache before write */
  523. if (data->flags & DATA_DIR_WRITE)
  524. {
  525. CP15_clean_dcache_for_dma((u32)data->buf,
  526. (u32)data->buf + data->blks * data->blksize);
  527. //printf(">>>>>>>>Write clean cache>>>>>>\n");
  528. }
  529. /* Invalidate cache before read */
  530. else if (data->flags & DATA_DIR_READ)
  531. {
  532. CP15_flush_dcache_for_dma((u32)data->buf,
  533. (u32)data->buf + data->blks * data->blksize);
  534. //printf(">>>>>>>>Read flush cache>>>>>>\n");
  535. }
  536. }
  537. }
  538. #endif
  539. /* Start it running */
  540. writel(1, mmc_obj->base + SDMMC_PLDMND);
  541. out:
  542. return ret;
  543. }
  544. static void dw_mci_idmac_stop_dma(struct ark_mmc_obj *mmc_obj)
  545. {
  546. u32 temp;
  547. /* Disable and reset the IDMAC interface */
  548. temp = readl(mmc_obj->base+SDMMC_CTRL);
  549. temp &= ~SDMMC_CTRL_USE_IDMAC;
  550. temp |= SDMMC_CTRL_DMA_RESET;
  551. writel(temp,mmc_obj->base+SDMMC_CTRL);
  552. /* Stop the IDMAC running */
  553. temp = readl(mmc_obj->base+SDMMC_BMOD);
  554. temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
  555. temp |= SDMMC_IDMAC_SWRESET;
  556. writel(temp,mmc_obj->base+SDMMC_BMOD);
  557. }
  558. void dw_mci_dmac_complete_dma(void *arg)
  559. {
  560. #if 0
  561. struct dw_mci *host = arg;
  562. struct mmc_data *data = host->data;
  563. dev_vdbg(host->dev, "DMA complete\n");
  564. if ((host->use_dma == TRANS_MODE_EDMAC) &&
  565. data && (data->flags & MMC_DATA_READ))
  566. /* Invalidate cache after read */
  567. dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
  568. data->sg,
  569. data->sg_len,
  570. DMA_FROM_DEVICE);
  571. host->dma_ops->cleanup(host);
  572. /*
  573. * If the card was removed, data will be NULL. No point in trying to
  574. * send the stop command or waiting for NBUSY in this case.
  575. */
  576. if (data) {
  577. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  578. tasklet_schedule(&host->tasklet);
  579. }
  580. #endif
  581. return;
  582. }
  583. static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj)
  584. {
  585. return;
  586. if(mmc_obj->use_dma == TRANS_MODE_IDMAC)
  587. {
  588. void *data = mmc_obj->sg_cpu;
  589. if (data) {
  590. free(data);
  591. }
  592. }
  593. }
  594. static struct dw_mci_dma_ops dw_mci_idmac_ops = {
  595. .init = dw_mci_idmac_init,
  596. .start = dw_mci_idmac_start_dma,
  597. .stop = dw_mci_idmac_stop_dma,
  598. // .complete = dw_mci_dmac_complete_dma,
  599. .cleanup = dw_mci_dma_cleanup,
  600. };
  601. static void dw_mci_init_dma(struct ark_mmc_obj *mmc_obj)
  602. {
  603. unsigned int addr_config = 0;
  604. /*
  605. * Check tansfer mode from HCON[17:16]
  606. * Clear the ambiguous description of dw_mmc databook:
  607. * 2b'00: No DMA Interface -> Actually means using Internal DMA block
  608. * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
  609. * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
  610. * 2b'11: Non DW DMA Interface -> pio only
  611. * Compared to DesignWare DMA Interface, Generic DMA Interface has a
  612. * simpler request/acknowledge handshake mechanism and both of them
  613. * are regarded as external dma master for dw_mmc.
  614. */
  615. mmc_obj->use_dma = SDMMC_GET_TRANS_MODE(readl(mmc_obj->base + SDMMC_HCON));
  616. //mmc_obj->use_dma = TRANS_MODE_IDMAC;
  617. if (mmc_obj->use_dma == DMA_INTERFACE_IDMA) {
  618. mmc_obj->use_dma = TRANS_MODE_IDMAC;
  619. } else {
  620. goto no_dma;
  621. }
  622. /* Determine which DMA interface to use */
  623. /*
  624. * Check ADDR_CONFIG bit in HCON to find
  625. * IDMAC address bus width
  626. */
  627. addr_config = SDMMC_GET_ADDR_CONFIG(readl(mmc_obj->base + SDMMC_HCON));
  628. if (addr_config == 1) {
  629. ;/* host supports IDMAC in 64-bit address mode */
  630. } else {
  631. /* host supports IDMAC in 32-bit address mode */
  632. mmc_obj->dma_64bit_address = 0;
  633. dev_info(mmc_obj->dev, "IDMAC supports 32-bit address mode.\n");
  634. }
  635. /* Alloc memory for sg translation */
  636. mmc_obj->sg_cpu =(void *)(dma_addr_t)pvPortMalloc(DESC_RING_BUF_SZ + ARCH_DMA_MINALIGN);
  637. if (!mmc_obj->sg_cpu) {
  638. printf("%s: could not alloc DMA memory\n", __func__);
  639. goto no_dma;
  640. }
  641. mmc_obj->sg_dma = VIRT_TO_PHY((u32)mmc_obj->sg_cpu);//PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu);
  642. mmc_obj->sg_cpu = (u32*)PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu);
  643. printf(">>>sg_dma 0x%x,sg_cpu = 0x%x>>>>\n",mmc_obj->sg_dma,mmc_obj->sg_cpu);
  644. if (!mmc_obj->sg_cpu) {
  645. dev_err(mmc_obj->dev,"%s: could not alloc DMA memory\n",__func__);
  646. goto no_dma;
  647. }
  648. mmc_obj->dma_ops = &dw_mci_idmac_ops;
  649. dev_info(mmc_obj->dev, "Using internal DMA controller.\n");
  650. if (mmc_obj->dma_ops->init && mmc_obj->dma_ops->start &&
  651. mmc_obj->dma_ops->stop && mmc_obj->dma_ops->cleanup) {
  652. if (mmc_obj->dma_ops->init(mmc_obj)) {
  653. printf("%s: Unable to initialize DMA Controller.\n",
  654. __func__);
  655. goto no_dma;
  656. }
  657. } else {
  658. printf("DMA initialization not found.\n");
  659. goto no_dma;
  660. }
  661. return;
  662. no_dma:
  663. dev_info(mmc_obj->dev, "Using PIO mode.\n");
  664. mmc_obj->use_dma = TRANS_MODE_PIO;
  665. }
  666. void MMC_Init(struct ark_mmc_obj *mmc_obj)
  667. {
  668. uint32_t reg;
  669. if(mmc_obj->mmc_reset)
  670. mmc_obj->mmc_reset(mmc_obj);
  671. vClkEnable(mmc_obj->clk_id);
  672. MMC_Reset(mmc_obj);
  673. dw_mci_init_dma(mmc_obj);
  674. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_ALL);
  675. MMC_SetInterruptMask(mmc_obj, 0x0);
  676. reg = readl(mmc_obj->base + SDMMC_CTRL);
  677. reg |= SDMMC_CTRL_INT_ENABLE;
  678. writel(reg, mmc_obj->base + SDMMC_CTRL);
  679. //set timeout param
  680. writel(0xffffffff, mmc_obj->base + SDMMC_TMOUT);
  681. //set fifo
  682. reg = readl(mmc_obj->base + SDMMC_FIFOTH);
  683. reg = ((reg >> 16) & 0xfff) + 1;
  684. mmc_obj->fifoth_val = SDMMC_SET_FIFOTH(0x3, reg / 2 - 1, reg / 2);
  685. writel(mmc_obj->fifoth_val, mmc_obj->base + SDMMC_FIFOTH);
  686. MMC_SetInterruptMask(mmc_obj, SDMMC_INT_CD);
  687. }
  688. static int ark_mmc_write_pio(struct mmc_driver *mmc_drv)
  689. {
  690. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  691. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  692. struct mmcsd_data *data = NULL;
  693. uint32_t status;
  694. uint32_t len;
  695. uint32_t remain, fcnt;
  696. uint32_t *buf;
  697. int i;
  698. int hotpluge_support;
  699. if(cmd)
  700. data = cmd->data;
  701. if(!data)
  702. {
  703. TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__);
  704. return -EIO;
  705. }
  706. hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  707. do {
  708. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  709. break;
  710. if (data->blks * data->blksize == data->bytes_xfered)
  711. break;
  712. buf = data->buf + data->bytes_xfered / 4;
  713. remain = data->blks * data->blksize - data->bytes_xfered;
  714. do {
  715. fcnt = (SDMMC_FIFO_DEPTH - MMC_GetWaterlevel(mmc_obj)) * 4;
  716. len = configMIN(remain, fcnt);
  717. if (!len)
  718. break;
  719. for (i = 0; i < len / 4; i ++) {
  720. writel(*buf++, mmc_obj->base + SDMMC_FIFO);
  721. }
  722. data->bytes_xfered += len;
  723. remain -= len;
  724. } while (remain);
  725. status = readl(mmc_obj->base + SDMMC_MINTSTS);
  726. writel(SDMMC_INT_TXDR, mmc_obj->base + SDMMC_MINTSTS);
  727. } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
  728. return 0;
  729. }
  730. static int ark_mmc_read_pio(struct mmc_driver *mmc_drv, bool dto)
  731. {
  732. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  733. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  734. struct mmcsd_data *data = NULL;
  735. u32 status;
  736. unsigned int len;
  737. unsigned int remain, fcnt;
  738. uint32_t *buf;
  739. int i;
  740. int hotpluge_support;
  741. if(cmd)
  742. data = cmd->data;
  743. if(!data)
  744. {
  745. TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__);
  746. return -EIO;
  747. }
  748. hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  749. do {
  750. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  751. break;
  752. if (data->blks * data->blksize == data->bytes_xfered)
  753. break;
  754. buf = data->buf + data->bytes_xfered / 4;
  755. remain = data->blks * data->blksize - data->bytes_xfered;
  756. do {
  757. fcnt = MMC_GetWaterlevel(mmc_obj) * 4;
  758. len = configMIN(remain, fcnt);
  759. if (!len)
  760. break;
  761. for (i = 0; i < len / 4; i ++) {
  762. *buf++ = readl(mmc_obj->base + SDMMC_FIFO);
  763. }
  764. data->bytes_xfered += len;
  765. remain -= len;
  766. } while (remain);
  767. status = readl(mmc_obj->base + SDMMC_MINTSTS);
  768. writel(SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS);
  769. /* if the RXDR is ready read again */
  770. } while ((status & SDMMC_INT_RXDR) ||
  771. (dto && MMC_GetWaterlevel(mmc_obj)));
  772. return 0;
  773. }
  774. static void ark_mmc_set_iocfg(struct mmcsd_host *host, struct mmcsd_io_cfg *io_cfg)
  775. {
  776. uint32_t clksrc, clkdiv;
  777. struct mmc_driver *mmc_drv = host->private_data;
  778. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  779. unsigned int regs;
  780. int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  781. /* maybe switch power to the card */
  782. switch (io_cfg->power_mode)
  783. {
  784. case MMCSD_POWER_OFF:
  785. if(hotpluge_support == 1)
  786. {
  787. regs = readl(mmc_obj->base + SDMMC_PWREN);
  788. regs &= ~(1 << 0);
  789. writel(regs, mmc_obj->base + SDMMC_PWREN);
  790. }
  791. break;
  792. case MMCSD_POWER_UP:
  793. if(hotpluge_support == 1)
  794. {
  795. regs = readl(mmc_obj->base + SDMMC_PWREN);
  796. regs &= ~(1 << 0);
  797. regs |= (1 << 0);
  798. writel(regs, mmc_obj->base + SDMMC_PWREN);
  799. }
  800. break;
  801. case MMCSD_POWER_ON:
  802. if(hotpluge_support == 1)
  803. {
  804. MMC_Reset(mmc_obj);
  805. }
  806. break;
  807. default:
  808. printf("ERROR: %s, unknown power_mode %d\n", __func__, io_cfg->power_mode);
  809. break;
  810. }
  811. //fixme: read from PMU
  812. //why io_cfg->clock == 0 ?
  813. if(io_cfg->clock)
  814. {
  815. clksrc = ulClkGetRate(mmc_obj->clk_id);
  816. clkdiv = clksrc / io_cfg->clock / 2;
  817. MMC_UpdateClockRegister(mmc_obj, clkdiv);
  818. TRACE_DEBUG("io_cfg->clock: %lu, clock in: %lu, clkdiv: %d\n", io_cfg->clock, clkdiv, clkdiv);
  819. }
  820. else
  821. {
  822. writel(0, mmc_obj->base + SDMMC_CLKENA);
  823. }
  824. if (io_cfg->bus_width == MMCSD_BUS_WIDTH_4)
  825. {
  826. MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_4BIT);
  827. TRACE_DEBUG("set to 4-bit mode\n");
  828. }
  829. else
  830. {
  831. MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_1BIT);
  832. // printf("set to 1-bit mode\n");
  833. }
  834. TRACE_DEBUG("%s end\n", __func__);
  835. }
  836. static void ark_mmc_enable_sdio_irq(struct mmcsd_host *host, int32_t enable)
  837. {
  838. struct mmc_driver *mmc_drv = host->private_data;
  839. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  840. uint32_t reg;
  841. TRACE_DEBUG("%s start\n", __func__);
  842. if (enable)
  843. {
  844. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO);
  845. reg = MMC_GetInterruptMask(mmc_obj);
  846. reg |= SDMMC_INT_SDIO;
  847. MMC_SetInterruptMask(mmc_obj, reg);
  848. }
  849. else
  850. {
  851. reg = MMC_GetInterruptMask(mmc_obj);
  852. reg &= ~SDMMC_INT_SDIO;
  853. MMC_SetInterruptMask(mmc_obj, reg);
  854. }
  855. }
  856. static int32_t ark_mmc_get_card_status(struct mmcsd_host *host)
  857. {
  858. struct mmc_driver *mmc_drv = host->private_data;
  859. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  860. return !(readl(mmc_obj->base + SDMMC_CDETECT) & 0x1);
  861. }
  862. static void ark_mmc_send_command(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd)
  863. {
  864. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  865. struct mmcsd_req *req = mmc_drv->req;
  866. //fixme: cmd->data or req->data
  867. struct mmcsd_data *data = cmd->data;
  868. int ret;
  869. uint32_t cmd_flags = 0;
  870. TRACE_DEBUG("%s, start\n", __func__);
  871. if (!cmd)
  872. {
  873. //fixme: stop dma
  874. printf("ERROR: %s, cmd is NULL\n", __func__);
  875. return;
  876. }
  877. if (data)
  878. {
  879. cmd_flags |= SDMMC_CMD_DAT_EXP;
  880. /* always set data start - also set direction flag for read */
  881. if (data->flags & DATA_DIR_WRITE)
  882. cmd_flags |= SDMMC_CMD_DAT_WR;
  883. if (data->flags & DATA_STREAM)
  884. cmd_flags |= SDMMC_CMD_STRM_MODE;
  885. }
  886. if (cmd == req->stop)
  887. cmd_flags |= SDMMC_CMD_STOP;
  888. else
  889. cmd_flags |= SDMMC_CMD_PRV_DAT_WAIT;
  890. switch (resp_type(cmd))
  891. {
  892. case RESP_NONE:
  893. break;
  894. case RESP_R1:
  895. case RESP_R5:
  896. case RESP_R6:
  897. case RESP_R7:
  898. case RESP_R1B:
  899. cmd_flags |= SDMMC_CMD_RESP_EXP;
  900. cmd_flags |= SDMMC_CMD_RESP_CRC;
  901. break;
  902. case RESP_R2:
  903. cmd_flags |= SDMMC_CMD_RESP_EXP;
  904. cmd_flags |= SDMMC_CMD_RESP_CRC;
  905. cmd_flags |= SDMMC_CMD_RESP_LONG;
  906. break;
  907. case RESP_R3:
  908. case RESP_R4:
  909. cmd_flags |= SDMMC_CMD_RESP_EXP;
  910. break;
  911. default:
  912. printf("ERROR: %s, unknown cmd type %x\n", __func__, resp_type(cmd));
  913. return;
  914. }
  915. if (cmd->cmd_code == GO_IDLE_STATE)
  916. cmd_flags |= SDMMC_CMD_INIT;
  917. /* CMD 11 check switch voltage */
  918. if (cmd->cmd_code == READ_DAT_UNTIL_STOP)
  919. cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
  920. TRACE_DEBUG("cmd code: %d, args: 0x%x, resp type: 0x%x, flag: 0x%x\n", cmd->cmd_code, cmd->arg, resp_type(cmd), cmd_flags);
  921. ret = MMC_SendCommand(mmc_obj, cmd->cmd_code, cmd->arg, cmd_flags);
  922. if(ret)
  923. {
  924. printf("ERROR: %s, Send command timeout, cmd: %d, status: 0x%x\n", __func__, cmd->cmd_code, MMC_GetStatus(mmc_obj));
  925. }
  926. }
  927. static void dw_mci_adjust_fifoth(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  928. {
  929. unsigned int blksz = data->blksize;
  930. const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
  931. u32 fifo_width = 4;
  932. u32 blksz_depth = blksz / fifo_width, fifoth_val;
  933. u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
  934. int idx = ARRAY_SIZE(mszs) - 1;
  935. /* pio should ship this scenario */
  936. if (!mmc_obj->use_dma)
  937. return;
  938. tx_wmark = SDMMC_FIFO_DEPTH / 2;
  939. tx_wmark_invers = SDMMC_FIFO_DEPTH - tx_wmark;
  940. /*
  941. * MSIZE is '1',
  942. * if blksz is not a multiple of the FIFO width
  943. */
  944. if (blksz % fifo_width)
  945. goto done;
  946. do {
  947. if (!((blksz_depth % mszs[idx]) ||
  948. (tx_wmark_invers % mszs[idx]))) {
  949. msize = idx;
  950. rx_wmark = mszs[idx] - 1;
  951. break;
  952. }
  953. } while (--idx > 0);
  954. /*
  955. * If idx is '0', it won't be tried
  956. * Thus, initial values are uesed
  957. */
  958. done:
  959. fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
  960. writel(fifoth_val, mmc_obj->base + SDMMC_FIFOTH);
  961. }
  962. static int dw_mci_submit_data_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  963. {
  964. u32 temp;
  965. mmc_obj->using_dma = 0;
  966. /* If we don't have a channel, we can't do DMA */
  967. if (!mmc_obj->use_dma)
  968. return -1;
  969. if (data->blks * data->blksize < DW_MCI_DMA_THRESHOLD ||
  970. data->blksize & 3 || (u32)data->buf & 3) {
  971. // //printf(">>>>Error,blksize 0x%x,dataaddr:0x%x\n",data->blksize,(u32)data->buf);
  972. mmc_obj->dma_ops->stop(mmc_obj);
  973. return -1;
  974. }
  975. mmc_obj->using_dma = 1;
  976. temp = MMC_GetInterruptMask(mmc_obj);
  977. temp |= SDMMC_INT_DATA_OVER | SDMMC_INT_DATA_ERROR;
  978. temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
  979. MMC_SetInterruptMask(mmc_obj, temp);
  980. /* Enable the DMA interface */
  981. temp = readl(mmc_obj->base + SDMMC_CTRL);
  982. temp |= SDMMC_CTRL_DMA_ENABLE;
  983. writel(temp, mmc_obj->base + SDMMC_CTRL);
  984. /*
  985. * Decide the MSIZE and RX/TX Watermark.
  986. * If current block size is same with previous size,
  987. * no need to update fifoth.
  988. */
  989. if (mmc_obj->prev_blksz != data->blksize)
  990. dw_mci_adjust_fifoth(mmc_obj, data);
  991. if (mmc_obj->dma_ops->start(mmc_obj, data)) {
  992. mmc_obj->dma_ops->stop(mmc_obj);
  993. /* We can't do DMA, try PIO for this one */
  994. dev_dbg(mmc_obj->dev,
  995. "%s: fall back to PIO mode for current transfer\n",
  996. __func__);
  997. mmc_obj->using_dma = 0;
  998. return -1;
  999. }
  1000. return 0;
  1001. }
  1002. static void ark_mmc_perpare_data(struct mmc_driver *mmc_drv)
  1003. {
  1004. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1005. struct mmcsd_data *data = cmd->data;
  1006. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1007. uint32_t data_size;
  1008. uint32_t reg;
  1009. if(!data)
  1010. {
  1011. MMC_SetBlockSize(mmc_obj, 0);
  1012. MMC_SetByteCount(mmc_obj, 0);
  1013. return;
  1014. }
  1015. TRACE_DEBUG("%s, start\n", __func__);
  1016. if(MMC_ResetFifo(mmc_obj))
  1017. {
  1018. return;
  1019. }
  1020. data_size = data->blks * data->blksize;
  1021. MMC_SetBlockSize(mmc_obj, data->blksize);
  1022. data->bytes_xfered = 0;
  1023. if(data_size % 4)
  1024. {
  1025. printf("ERROR: data_size should be a multiple of 4, but now is %d\n", data_size);
  1026. }
  1027. MMC_SetByteCount(mmc_obj, data_size);
  1028. TRACE_DEBUG("%s, set blk size: 0x%x, byte count: 0x%x\n", __func__, data->blksize, data_size);
  1029. if (dw_mci_submit_data_dma(mmc_obj, data)) {
  1030. writel(SDMMC_INT_TXDR | SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS);
  1031. reg = readl(mmc_obj->base + SDMMC_INTMASK);
  1032. reg |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
  1033. writel(reg, mmc_obj->base + SDMMC_INTMASK);
  1034. reg = readl(mmc_obj->base + SDMMC_CTRL);
  1035. reg &= ~SDMMC_CTRL_DMA_ENABLE;
  1036. writel(reg, mmc_obj->base + SDMMC_CTRL);
  1037. } else {
  1038. mmc_obj->prev_blksz = data->blksize;
  1039. }
  1040. TRACE_DEBUG("%s, end\n", __func__);
  1041. }
  1042. int ark_mmc_wait_card_idle(struct ark_mmc_obj *mmc_obj)
  1043. {
  1044. uint32_t tick, timeout;
  1045. tick = xTaskGetTickCount();
  1046. timeout = tick + configTICK_RATE_HZ / 2; //500ms
  1047. while(MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY)
  1048. {
  1049. tick = xTaskGetTickCount();
  1050. if(tick > timeout)
  1051. {
  1052. return -1;
  1053. }
  1054. }
  1055. return 0;
  1056. }
  1057. static int ark_mmc_get_response(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd)
  1058. {
  1059. int i;
  1060. uint32_t tick, timeout, status;
  1061. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1062. cmd->resp[0] = 0;
  1063. cmd->resp[1] = 0;
  1064. cmd->resp[2] = 0;
  1065. cmd->resp[3] = 0;
  1066. tick = xTaskGetTickCount();
  1067. timeout = tick + configTICK_RATE_HZ / 2; //500ms
  1068. //fixme: spin_lock_irqsave?
  1069. do
  1070. {
  1071. status = MMC_GetRawInterrupt(mmc_obj);
  1072. tick = xTaskGetTickCount();
  1073. if(tick > timeout)
  1074. {
  1075. TRACE_DEBUG("ERROR: %s, get response timeout(cmd is not received by card), RINTSTS: 0x%x, cmd: %d\n", __func__, status, cmd->cmd_code);
  1076. return -1;
  1077. }
  1078. }
  1079. while(!(status & SDMMC_INT_CMD_DONE));
  1080. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE);
  1081. for (i = 0; i < 4; i++)
  1082. {
  1083. if (resp_type(cmd) == RESP_R2)
  1084. {
  1085. cmd->resp[i] = MMC_GetResponse(mmc_obj, 3 - i);
  1086. //fixme : R2 must delay some time here ,when use UHI card, need check why
  1087. //1ms
  1088. //vTaskDelay(configTICK_RATE_HZ / 100);
  1089. }
  1090. else
  1091. {
  1092. cmd->resp[i] = MMC_GetResponse(mmc_obj, i);
  1093. }
  1094. }
  1095. TRACE_DEBUG("resp: 0x%x, 0x%x, 0x%x, 0x%x\n", cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  1096. if (status & SDMMC_INT_RTO)
  1097. {
  1098. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RTO);
  1099. TRACE_DEBUG("ERROR: %s, get response timeout, RINTSTS: 0x%x\n", __func__, status);
  1100. return -1;
  1101. }
  1102. else if (status & (SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR))
  1103. {
  1104. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR);
  1105. printf("ERROR: %s, response error or response crc error, RINTSTS: 0x%x\n", __func__, status);
  1106. return -1;
  1107. }
  1108. return 0;
  1109. }
  1110. static int ark_mmc_start_transfer(struct mmc_driver *mmc_drv)
  1111. {
  1112. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1113. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1114. struct mmcsd_data *data = NULL;
  1115. int ret;
  1116. uint32_t interrupt, status, reg;
  1117. uint32_t timeout;
  1118. if(cmd)
  1119. data = cmd->data;
  1120. if(!data)
  1121. {
  1122. return 0;
  1123. }
  1124. TRACE_DEBUG("%s, start\n", __func__);
  1125. //fixme: spin_lock_irqsave(&host->lock, flags);
  1126. if (!mmc_obj->using_dma) {
  1127. //fifo mode open data interrupts
  1128. reg = MMC_GetInterruptMask(mmc_obj);
  1129. reg |= SDMMC_INT_STATUS_DATA;
  1130. MMC_SetInterruptMask(mmc_obj, reg);
  1131. }
  1132. //fixme: spin_unlock_irqrestore(&host->lock, flags);
  1133. timeout = configTICK_RATE_HZ + pdMS_TO_TICKS(data->blks * data->blksize * 100/ 1024); // Minimum 10KB per second
  1134. ret = xQueueReceive(mmc_obj->transfer_completion, NULL, timeout);
  1135. if (mmc_obj->using_dma) {
  1136. if (mmc_obj->dummy_buffer_used) {
  1137. if (data->flags & DATA_DIR_WRITE) {
  1138. if (mmc_obj->tx_dummy_buffer) {
  1139. vPortFree(mmc_obj->tx_dummy_buffer);
  1140. mmc_obj->tx_dummy_buffer = NULL;
  1141. }
  1142. } else if (data->flags & DATA_DIR_READ) {
  1143. if (mmc_obj->rx_dummy_buffer) {
  1144. CP15_invalidate_dcache_for_dma((u32)mmc_obj->rx_dummy_buffer, (u32)mmc_obj->rx_dummy_buffer + data->blks * data->blksize);
  1145. memcpy(data->buf, mmc_obj->rx_dummy_buffer, data->blks * data->blksize);
  1146. vPortFree(mmc_obj->rx_dummy_buffer);
  1147. mmc_obj->rx_dummy_buffer = NULL;
  1148. }
  1149. }
  1150. } else {
  1151. if (data->flags & DATA_DIR_READ)
  1152. CP15_invalidate_dcache_for_dma((u32)data->buf, (u32)data->buf + data->blks * data->blksize);
  1153. }
  1154. } else {
  1155. reg = MMC_GetInterruptMask(mmc_obj);
  1156. reg &= ~SDMMC_INT_STATUS_DATA;
  1157. MMC_SetInterruptMask(mmc_obj, reg);
  1158. }
  1159. if(ret != pdTRUE || mmc_obj->result)
  1160. {
  1161. //fixme: error handle
  1162. if (mmc_obj->using_dma)
  1163. dw_mci_stop_dma(mmc_obj);
  1164. cmd->err = ret;
  1165. interrupt = MMC_GetRawInterrupt(mmc_obj);
  1166. status = MMC_GetStatus(mmc_obj);
  1167. printf("ERROR: %s, transfer timeout, ret: %d, RINTSTS: 0x%x, STATUS: 0x%x\n", __func__, ret, interrupt, status);
  1168. return -1;
  1169. }
  1170. data->bytes_xfered = data->blks * data->blksize;
  1171. return 0;
  1172. }
  1173. static void ark_mmc_complete_request(struct mmc_driver *mmc_drv)
  1174. {
  1175. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1176. mmc_drv->cmd = NULL;
  1177. mmc_drv->req = NULL;
  1178. mmc_drv->data = NULL;
  1179. MMC_SetBlockSize(mmc_obj, 0);
  1180. MMC_SetByteCount(mmc_obj, 0);
  1181. mmcsd_req_complete(mmc_drv->host);
  1182. }
  1183. static void ark_mmc_request(struct mmcsd_host *host, struct mmcsd_req *req)
  1184. {
  1185. int ret;
  1186. struct mmc_driver *mmc_drv = host->private_data;
  1187. struct mmcsd_cmd *cmd = req->cmd;
  1188. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1189. TRACE_DEBUG("%s start\n", __func__);
  1190. mmc_drv->req = req;
  1191. mmc_drv->cmd = cmd;
  1192. if (mmc_obj->transfer_completion == NULL)
  1193. mmc_obj->transfer_completion = xQueueCreate(1, 0);
  1194. else
  1195. xQueueReset(mmc_obj->transfer_completion);
  1196. ret = ark_mmc_wait_card_idle(mmc_obj);
  1197. if (ret)
  1198. {
  1199. printf("ERROR: %s, data transfer timeout, status: 0x%x\r\n", __func__, MMC_GetStatus(mmc_obj));
  1200. if (MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY)
  1201. goto out;
  1202. }
  1203. mmc_obj->result = 0;
  1204. ark_mmc_perpare_data(mmc_drv);
  1205. ark_mmc_send_command(mmc_drv, cmd);
  1206. if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1)
  1207. vTaskDelay(pdMS_TO_TICKS(1));
  1208. ret = ark_mmc_get_response(mmc_drv, cmd);
  1209. if(ret)
  1210. {
  1211. cmd->err = ret;
  1212. printf("%s,get response returns %d, cmd: %d\r\n", __func__, ret, cmd->cmd_code);
  1213. goto out;
  1214. }
  1215. ark_mmc_start_transfer(mmc_drv);
  1216. if(req->stop)
  1217. {
  1218. /* send stop command */
  1219. TRACE_DEBUG("%s send stop\n", __func__);
  1220. ark_mmc_send_command(mmc_drv, req->stop);
  1221. }
  1222. out:
  1223. ark_mmc_complete_request(mmc_drv);
  1224. TRACE_DEBUG("%s end\n", __func__);
  1225. }
  1226. static const struct mmcsd_host_ops ark_mmc_ops =
  1227. {
  1228. .request = ark_mmc_request,
  1229. .set_iocfg = ark_mmc_set_iocfg,
  1230. .enable_sdio_irq = ark_mmc_enable_sdio_irq,
  1231. .get_card_status = ark_mmc_get_card_status,
  1232. };
  1233. static void ark_mmc_interrupt(void *param)
  1234. {
  1235. struct mmc_driver *mmc_drv = (struct mmc_driver *)param;
  1236. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1237. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1238. struct mmcsd_data *data = NULL;
  1239. uint32_t status;
  1240. if (cmd && cmd->data)
  1241. {
  1242. data = cmd->data;
  1243. }
  1244. status = MMC_GetUnmaskedInterrupt(mmc_obj);
  1245. TRACE_DEBUG("unmasked interrupts: 0x%x\n", status);
  1246. if (status & SDMMC_CMD_ERROR_FLAGS) {
  1247. MMC_ClearRawInterrupt(mmc_obj, SDMMC_CMD_ERROR_FLAGS);
  1248. mmc_obj->result = -1;
  1249. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1250. }
  1251. if (status & SDMMC_DATA_ERROR_FLAGS) {
  1252. /* if there is an error report DATA_ERROR */
  1253. MMC_ClearRawInterrupt(mmc_obj, SDMMC_DATA_ERROR_FLAGS);
  1254. mmc_obj->result = -1;
  1255. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1256. }
  1257. if (status & SDMMC_INT_DATA_OVER) {
  1258. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_DATA_OVER);
  1259. if (data && data->flags & DATA_DIR_READ) {
  1260. if (!mmc_obj->using_dma && data->bytes_xfered != data->blks * data->blksize)
  1261. ark_mmc_read_pio(mmc_drv, 1);
  1262. }
  1263. if (!mmc_obj->using_dma)
  1264. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1265. }
  1266. if (status & SDMMC_INT_RXDR) {
  1267. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR);
  1268. if (data && data->flags & DATA_DIR_READ)
  1269. ark_mmc_read_pio(mmc_drv, 0);
  1270. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR);
  1271. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_HTO);
  1272. }
  1273. if (status & SDMMC_INT_TXDR) {
  1274. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_TXDR);
  1275. if (data && data->flags & DATA_DIR_WRITE)
  1276. ark_mmc_write_pio(mmc_drv);
  1277. }
  1278. if (status & SDMMC_INT_CMD_DONE) {
  1279. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE);
  1280. }
  1281. if (status & SDMMC_INT_CD) {
  1282. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CD);
  1283. mmcsd_change_from_isr(mmc_drv->host);
  1284. }
  1285. if (status & SDMMC_INT_SDIO) {
  1286. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO);
  1287. sdio_irq_wakeup_isr(mmc_drv->host);
  1288. }
  1289. if (mmc_obj->use_dma == TRANS_MODE_IDMAC){
  1290. /* Handle IDMA interrupts */
  1291. if (mmc_obj->dma_64bit_address == 1) {
  1292. status = readl(mmc_obj->base + SDMMC_IDSTS64);
  1293. if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))) {
  1294. writel(SDMMC_IDMAC_INT_TI|SDMMC_IDMAC_INT_RI, mmc_obj->base + SDMMC_IDSTS64);
  1295. writel(SDMMC_IDMAC_INT_NI,mmc_obj->base + SDMMC_IDSTS64);
  1296. // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  1297. // host->dma_ops->complete((void *)host);
  1298. }
  1299. } else {
  1300. status = readl(mmc_obj->base+SDMMC_IDSTS);
  1301. if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))){
  1302. writel(SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI,mmc_obj->base+SDMMC_IDSTS);
  1303. writel(SDMMC_IDMAC_INT_NI,mmc_obj->base+SDMMC_IDSTS);
  1304. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1305. // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  1306. // mmc_obj->dma_ops->complete((void *)host);
  1307. }
  1308. }
  1309. }
  1310. // xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1311. }
  1312. void ark_mmc_reset(struct ark_mmc_obj *mmc_obj)
  1313. {
  1314. sys_soft_reset(mmc_obj->softreset_id);
  1315. }
  1316. static struct ark_mmc_obj mmc0_obj =
  1317. {
  1318. .id = 0,
  1319. .irq = SDMMC0_IRQn,
  1320. .base = REGS_SDMMC0_BASE,
  1321. .mmc_reset = ark_mmc_reset,
  1322. .softreset_id = softreset_sdmmc,
  1323. .clk_id = CLK_SDMMC0,
  1324. };
  1325. static struct ark_mmc_obj mmc1_obj =
  1326. {
  1327. .id = 1,
  1328. .irq = SDMMC1_IRQn,
  1329. .base = REGS_SDMMC1_BASE,
  1330. .mmc_reset = ark_mmc_reset,
  1331. .softreset_id = softreset_sdmmc1,
  1332. .clk_id = CLK_SDMMC1,
  1333. };
  1334. int ark_mmc_probe(struct ark_mmc_obj *mmc_obj)
  1335. {
  1336. struct mmc_driver *mmc_drv;
  1337. struct mmcsd_host *host;
  1338. TRACE_DEBUG("%s start\n", __func__);
  1339. mmc_drv = (struct mmc_driver*)pvPortMalloc(sizeof(struct mmc_driver));
  1340. memset(mmc_drv, 0, sizeof(struct mmc_driver));
  1341. mmc_drv->priv = mmc_obj;
  1342. host = mmcsd_alloc_host();
  1343. if (!host)
  1344. {
  1345. printf("ERROR: %s, failed to malloc host\n", __func__);
  1346. return -ENOMEM;
  1347. }
  1348. host->ops = &ark_mmc_ops;
  1349. host->freq_min = MMC_FEQ_MIN;
  1350. host->freq_max = MMC_FEQ_MAX;
  1351. host->valid_ocr = VDD_32_33 | VDD_33_34;
  1352. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_BUSWIDTH_4;
  1353. host->max_blk_size = 512;
  1354. //fixme: max_blk_count?
  1355. host->max_blk_count = 2048;
  1356. host->private_data = mmc_drv;
  1357. mmc_drv->host = host;
  1358. MMC_Init(mmc_obj);
  1359. if (mmc_obj->use_dma == TRANS_MODE_IDMAC)
  1360. {
  1361. host->max_segs = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);//host->ring_size;
  1362. host->max_blk_size = 65535;
  1363. host->max_seg_size = 0x1000;
  1364. host->max_req_size = host->max_seg_size * host->max_segs;
  1365. host->max_blk_count = host->max_req_size / 512;
  1366. }
  1367. request_irq(mmc_obj->irq, 0, ark_mmc_interrupt, mmc_drv);
  1368. if (mmcsd_dev_is_sdio_card(mmc_obj->id) == 1) {
  1369. ark_mmc_enable_sdio_irq(host, 1);
  1370. }
  1371. if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) {
  1372. if (ark_mmc_get_card_status(host))
  1373. mmcsd_change(host);
  1374. } else {
  1375. mmcsd_change(host);
  1376. }
  1377. TRACE_DEBUG("%s end\n", __func__);
  1378. return 0;
  1379. }
  1380. int mmc_init(void)
  1381. {
  1382. #ifdef SDMMC0_SUPPORT
  1383. sema_take(SEMA_GATE_SDMMC0, portMAX_DELAY);
  1384. ark_mmc_probe(&mmc0_obj);
  1385. sema_give(SEMA_GATE_SDMMC0);
  1386. #endif
  1387. #ifdef SDMMC1_SUPPORT
  1388. ark_mmc_probe(&mmc1_obj);
  1389. #endif
  1390. return 0;
  1391. }
  1392. #endif