sdmmc.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671
  1. #include "FreeRTOS.h"
  2. #include "chip.h"
  3. #include "board.h"
  4. #include "mmc.h"
  5. #include "sdio.h"
  6. #include "sdmmc.h"
  7. #include "mmcsd_core.h"
  8. #include "os_adapt.h"
  9. #ifdef SDMMC_SUPPORT
  10. #define DW_MCI_DESC_DATA_LENGTH 0x1000
  11. struct idmac_desc {
  12. __le32 des0; /* Control Descriptor */
  13. #define IDMAC_DES0_DIC BIT(1)
  14. #define IDMAC_DES0_LD BIT(2)
  15. #define IDMAC_DES0_FD BIT(3)
  16. #define IDMAC_DES0_CH BIT(4)
  17. #define IDMAC_DES0_ER BIT(5)
  18. #define IDMAC_DES0_CES BIT(30)
  19. #define IDMAC_DES0_OWN BIT(31)
  20. __le32 des1; /* Buffer sizes */
  21. #define IDMAC_SET_BUFFER1_SIZE(d, s) \
  22. ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
  23. __le32 des2; /* buffer 1 physical address */
  24. __le32 des3; /* buffer 2 physical address */
  25. };
  26. static inline uint32_t MMC_GetWaterlevel(struct ark_mmc_obj *mmc_obj)
  27. {
  28. return (readl(mmc_obj->base + SDMMC_STATUS) >> 17) & 0x1fff;
  29. }
  30. static inline uint32_t MMC_GetStatus(struct ark_mmc_obj *mmc_obj)
  31. {
  32. return readl(mmc_obj->base + SDMMC_STATUS);
  33. }
  34. static inline uint32_t MMC_GetRawInterrupt(struct ark_mmc_obj *mmc_obj)
  35. {
  36. return readl(mmc_obj->base + SDMMC_RINTSTS);
  37. }
  38. static inline uint32_t MMC_GetUnmaskedInterrupt(struct ark_mmc_obj *mmc_obj)
  39. {
  40. return readl(mmc_obj->base + SDMMC_MINTSTS);
  41. }
  42. static inline uint32_t MMC_ClearRawInterrupt(struct ark_mmc_obj *mmc_obj, uint32_t interrupts)
  43. {
  44. return writel(interrupts, mmc_obj->base + SDMMC_RINTSTS);
  45. }
  46. static inline uint32_t MMC_GetInterruptMask(struct ark_mmc_obj *mmc_obj)
  47. {
  48. return readl(mmc_obj->base + SDMMC_INTMASK);
  49. }
  50. static inline uint32_t MMC_SetInterruptMask(struct ark_mmc_obj *mmc_obj, uint32_t mask)
  51. {
  52. return writel(mask, mmc_obj->base + SDMMC_INTMASK);
  53. }
  54. static inline void MMC_SetByteCount(struct ark_mmc_obj *mmc_obj, uint32_t bytes)
  55. {
  56. writel(bytes, mmc_obj->base + SDMMC_BYTCNT);
  57. }
  58. static inline void MMC_SetBlockSize(struct ark_mmc_obj *mmc_obj, uint32_t size)
  59. {
  60. writel(size, mmc_obj->base + SDMMC_BLKSIZ);
  61. }
  62. static inline uint32_t MMC_GetResponse(struct ark_mmc_obj *mmc_obj, int resp_num)
  63. {
  64. return readl(mmc_obj->base + SDMMC_RESP0 + resp_num * 4);
  65. }
  66. static inline uint32_t MMC_IsFifoEmpty(struct ark_mmc_obj *mmc_obj)
  67. {
  68. return (readl(mmc_obj->base + SDMMC_STATUS) >> 2) & 0x1;
  69. }
  70. /*
  71. static inline uint32_t MMC_IsDataStateBusy(struct ark_mmc_obj *mmc_obj)
  72. {
  73. return (readl(mmc_obj->base + SDMMC_STATUS) >> 10) & 0x1;
  74. }
  75. */
  76. int MMC_UpdateClockRegister(struct ark_mmc_obj *mmc_obj, int div)
  77. {
  78. uint32_t tick, timeout;
  79. int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  80. tick = xTaskGetTickCount();
  81. timeout = tick + configTICK_RATE_HZ / 10; //100ms in total
  82. /* disable clock */
  83. writel(0, mmc_obj->base + SDMMC_CLKENA);
  84. writel(0, mmc_obj->base + SDMMC_CLKSRC);
  85. /* inform CIU */
  86. writel(0, mmc_obj->base + SDMMC_CMDARG);
  87. wmb(); /* drain writebuffer */
  88. writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD);
  89. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  90. {
  91. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  92. {
  93. printf("ERROR: %s, CARD out\n", __func__);
  94. return -1;
  95. }
  96. tick = xTaskGetTickCount();
  97. if(tick > timeout)
  98. {
  99. printf("ERROR: %s, update clock timeout\n", __func__);
  100. return -1;
  101. }
  102. }
  103. /* set clock to desired speed */
  104. writel(div, mmc_obj->base + SDMMC_CLKDIV);
  105. /* inform CIU */
  106. writel(0, mmc_obj->base + SDMMC_CMDARG);
  107. wmb(); /* drain writebuffer */
  108. writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD);
  109. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  110. {
  111. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  112. {
  113. printf("ERROR: %s, CARD out\n", __func__);
  114. return -1;
  115. }
  116. tick = xTaskGetTickCount();
  117. if(tick > timeout)
  118. {
  119. TRACE_DEBUG("ERROR: %s, update clock timeout\n", __func__);
  120. return -1;
  121. }
  122. }
  123. /* enable clock */
  124. writel(1, mmc_obj->base + SDMMC_CLKENA);
  125. /* inform CIU */
  126. writel(1<<31 | 1<<21, mmc_obj->base + SDMMC_CMD);
  127. while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000)
  128. {
  129. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  130. {
  131. printf("ERROR: %s, CARD out\n", __func__);
  132. return -1;
  133. }
  134. tick = xTaskGetTickCount();
  135. if(tick > timeout)
  136. {
  137. printf("ERROR: %s, update clock timeout\n", __func__);
  138. return -1;
  139. }
  140. }
  141. return 0;
  142. }
  143. int MMC_SetCardWidth(struct ark_mmc_obj *mmc_obj, int width)
  144. {
  145. switch(width)
  146. {
  147. case SDMMC_CTYPE_1BIT:
  148. writel(0, mmc_obj->base + SDMMC_CTYPE);
  149. break;
  150. case SDMMC_CTYPE_4BIT:
  151. writel(1, mmc_obj->base + SDMMC_CTYPE);
  152. break;
  153. default:
  154. printf("ERROR: %s, card width %d is not supported\n", __func__, width);
  155. return -1;
  156. break;
  157. }
  158. return 0;
  159. }
  160. int MMC_SendCommand(struct ark_mmc_obj *mmc_obj, uint32_t cmd, uint32_t arg, uint32_t flags)
  161. {
  162. uint32_t tick, timeout;
  163. tick = xTaskGetTickCount();
  164. timeout = tick + configTICK_RATE_HZ; //1s
  165. writel(arg, mmc_obj->base + SDMMC_CMDARG);
  166. flags |= 1<<31 | 1<<29 | cmd;
  167. writel(flags, mmc_obj->base + SDMMC_CMD);
  168. while(readl(mmc_obj->base + SDMMC_CMD) & SDMMC_CMD_START)
  169. {
  170. if((mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  171. {
  172. printf("ERROR: %s,SDMMC_CMD BackWard Read , Card Out\n", __func__);
  173. return -1;
  174. }
  175. tick = xTaskGetTickCount();
  176. if(tick > timeout)
  177. {
  178. printf("ERROR: %s, send cmd timeout\n", __func__);
  179. return -1;
  180. }
  181. }
  182. //fixme: check HLE_INT_STATUS
  183. return 0;
  184. }
  185. int MMC_ResetFifo(struct ark_mmc_obj *mmc_obj)
  186. {
  187. uint32_t reg, tick, timeout;
  188. tick = xTaskGetTickCount();
  189. timeout = tick + configTICK_RATE_HZ / 10; //100ms
  190. reg = readl(mmc_obj->base + SDMMC_CTRL);
  191. reg |= 1 << 1;
  192. writel(reg, mmc_obj->base + SDMMC_CTRL);
  193. //wait until fifo reset finish
  194. while(readl(mmc_obj->base + SDMMC_CTRL) & SDMMC_CTRL_FIFO_RESET)
  195. {
  196. tick = xTaskGetTickCount();
  197. if(tick > timeout)
  198. {
  199. printf("ERROR: %s, FIFO reset timeout\n", __func__);
  200. return -1;
  201. }
  202. }
  203. return 0;
  204. }
  205. int MMC_Reset(struct ark_mmc_obj *mmc_obj)
  206. {
  207. uint32_t reg, tick, timeout;
  208. // tick = xTaskGetTickCount();
  209. // timeout = tick + configTICK_RATE_HZ / 10; //100ms
  210. reg = readl(mmc_obj->base + SDMMC_CTRL);
  211. reg |= SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET;
  212. writel(reg, mmc_obj->base + SDMMC_CTRL);
  213. tick = xTaskGetTickCount();
  214. timeout = tick + configTICK_RATE_HZ / 10; //100ms
  215. while(readl(mmc_obj->base + SDMMC_CTRL) & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))
  216. {
  217. tick = xTaskGetTickCount();
  218. if(tick > timeout)
  219. {
  220. printf("ERROR: %s, CTRL dma|fifo|ctrl reset timeout\n", __func__);
  221. return -1;
  222. }
  223. }
  224. return 0;
  225. }
  226. #define DW_MCI_DMA_THRESHOLD 32
  227. /* DMA interface functions */
  228. static void dw_mci_stop_dma(struct ark_mmc_obj *mmc_obj)
  229. {
  230. if (mmc_obj->using_dma) {
  231. mmc_obj->dma_ops->stop(mmc_obj);
  232. mmc_obj->dma_ops->cleanup(mmc_obj);
  233. }
  234. /* Data transfer was stopped by the interrupt handler */
  235. //set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  236. }
  237. static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj);
  238. static void dw_mci_dmac_complete_callback(void *param, unsigned int mask)
  239. {
  240. struct ark_mmc_obj *mmc_obj = param;
  241. //struct mmcsd_data *data = mmc_obj->data;
  242. dev_vdbg(mmc_obj->dev, "DMA complete\n");
  243. mmc_obj->dma_ops->cleanup(mmc_obj);
  244. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  245. /*
  246. * If the card was removed, data will be NULL. No point in trying to
  247. * send the stop command or waiting for NBUSY in this case.
  248. */
  249. /* if (data) {
  250. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  251. tasklet_schedule(&host->tasklet);
  252. } */
  253. }
  254. static UINT32 WaiteforDmaResetClear(UINT32 addr, UINT32 __delay_us, UINT32 timeout_us)
  255. {
  256. u64 __timeout_us = (timeout_us);
  257. UINT32 tmp,val;
  258. while(__timeout_us){
  259. val = readl(addr);
  260. tmp = (val>>2)& 0x1 ;
  261. if(tmp == 0)
  262. return 0;
  263. udelay(__delay_us);
  264. }
  265. return 1;
  266. }
  267. static UINT32 WaiteforDmaOwnClear(u32 addr, UINT32 __delay_us, UINT32 timeout_us)
  268. {
  269. u64 __timeout_us = (timeout_us);
  270. UINT32 tmp,val;
  271. while(__timeout_us){
  272. val = readl(addr);
  273. tmp = (val>>31)& 0x1 ;
  274. if(tmp == 0)
  275. return 0;
  276. udelay(__delay_us);
  277. __timeout_us-- ;
  278. }
  279. return 1;
  280. }
  281. static bool dw_mci_ctrl_reset(struct ark_mmc_obj *mmc_obj, u32 reset)
  282. {
  283. u32 ctrl;
  284. ctrl = readl(mmc_obj->base + SDMMC_CTRL);
  285. ctrl |= reset;
  286. writel( ctrl, mmc_obj->base + SDMMC_CTRL);
  287. /* wait till resets clear */
  288. if (WaiteforDmaResetClear(mmc_obj->base + SDMMC_CTRL,1, 500 * USEC_PER_MSEC)) {
  289. printf("Timeout resetting block (ctrl reset %#x)\n",
  290. readl(mmc_obj->base + SDMMC_CTRL));
  291. return false;
  292. }
  293. return true;
  294. }
  295. static void dw_mci_idmac_reset(struct ark_mmc_obj *mmc_obj)
  296. {
  297. u32 bmod = readl(mmc_obj->base + SDMMC_BMOD);
  298. /* Software reset of DMA */
  299. bmod |= SDMMC_IDMAC_SWRESET;
  300. writel(bmod, mmc_obj->base + SDMMC_BMOD);
  301. }
  302. static int dw_mci_idmac_init(struct ark_mmc_obj *mmc_obj)
  303. {
  304. int i;
  305. unsigned int ring_size =0;
  306. if (mmc_obj->dma_64bit_address == 1) {
  307. #if 0
  308. struct idmac_desc_64addr *p;
  309. /* Number of descriptors in the ring buffer */
  310. ring_size =
  311. DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
  312. /* Forward link the descriptor list */
  313. for (i = 0, p = mmc_obj->sg_cpu; i < ring_size - 1;
  314. i++, p++) {
  315. p->des6 = (mmc_obj->sg_dma +
  316. (sizeof(struct idmac_desc_64addr) *
  317. (i + 1))) & 0xffffffff;
  318. p->des7 = (u64)(mmc_obj->sg_dma +
  319. (sizeof(struct idmac_desc_64addr) *
  320. (i + 1))) >> 32;
  321. /* Initialize reserved and buffer size fields to "0" */
  322. p->des0 = 0;
  323. p->des1 = 0;
  324. p->des2 = 0;
  325. p->des3 = 0;
  326. }
  327. /* Set the last descriptor as the end-of-ring descriptor */
  328. p->des6 = mmc_obj->sg_dma & 0xffffffff;
  329. p->des7 = (u64)mmc_obj->sg_dma >> 32;
  330. p->des0 = IDMAC_DES0_ER;
  331. #endif
  332. } else {
  333. struct idmac_desc *p;
  334. /* Number of descriptors in the ring buffer */
  335. ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
  336. /* Forward link the descriptor list */
  337. for (i = 0, p = mmc_obj->sg_cpu;
  338. i < ring_size - 1;
  339. i++, p++) {
  340. p->des3 = cpu_to_le32(mmc_obj->sg_dma +
  341. (sizeof(struct idmac_desc) * (i + 1)));
  342. p->des0 = 0;
  343. p->des1 = 0;
  344. }
  345. /* Set the last descriptor as the end-of-ring descriptor */
  346. p->des3 = cpu_to_le32(mmc_obj->sg_dma);
  347. p->des0 = cpu_to_le32(IDMAC_DES0_ER);
  348. }
  349. dw_mci_idmac_reset(mmc_obj);
  350. if (mmc_obj->dma_64bit_address == 1) {
  351. /* Mask out interrupts - get Tx & Rx complete only */
  352. writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS64);
  353. writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI,mmc_obj->base + SDMMC_IDINTEN64);
  354. /* Set the descriptor base address */
  355. writel(mmc_obj->sg_dma & 0xffffffff, mmc_obj->base + SDMMC_DBADDRL);
  356. writel((u64)mmc_obj->sg_dma >> 32, mmc_obj->base + SDMMC_DBADDRU);
  357. } else {
  358. /* Mask out interrupts - get Tx & Rx complete only */
  359. writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS);
  360. writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI, mmc_obj->base + SDMMC_IDINTEN);
  361. /* Set the descriptor base address */
  362. writel(mmc_obj->sg_dma, mmc_obj->base + SDMMC_DBADDR);
  363. }
  364. return 0;
  365. }
  366. static inline int dw_mci_prepare_desc32(struct ark_mmc_obj *mmc_obj,
  367. struct mmcsd_data *data)
  368. {
  369. unsigned int desc_len;
  370. struct idmac_desc *desc_first, *desc_last, *desc;
  371. u32 dma_address;
  372. int i;
  373. unsigned int sg_len;
  374. u32 mem_addr;
  375. unsigned int length;
  376. unsigned int leftsize;
  377. dma_address = (u32)data->buf;
  378. if (dma_address & (ARCH_DMA_MINALIGN - 1)) {
  379. if (data->flags & DATA_DIR_WRITE) {
  380. mmc_obj->tx_dummy_buffer = pvPortMalloc(data->blks * data->blksize);
  381. if (!mmc_obj->tx_dummy_buffer)
  382. return -ENOMEM;
  383. memcpy(mmc_obj->tx_dummy_buffer, data->buf, data->blks * data->blksize);
  384. dma_address = (u32)mmc_obj->tx_dummy_buffer;
  385. } else if (data->flags & DATA_DIR_READ) {
  386. mmc_obj->rx_dummy_buffer = pvPortMalloc(data->blks * data->blksize);
  387. if (!mmc_obj->rx_dummy_buffer)
  388. return -ENOMEM;
  389. dma_address = (u32)mmc_obj->rx_dummy_buffer;
  390. }
  391. mmc_obj->dummy_buffer_used = 1;
  392. } else {
  393. mmc_obj->dummy_buffer_used = 0;
  394. }
  395. desc_first = desc_last = desc = mmc_obj->sg_cpu;
  396. leftsize = data->blks * data->blksize;
  397. //sg_len = data->blks * data->blksize;
  398. if(data->blks * data->blksize < DW_MCI_DESC_DATA_LENGTH){
  399. sg_len = 1;
  400. length = data->blks * data->blksize;
  401. }
  402. else{
  403. sg_len = (data->blks * data->blksize % DW_MCI_DESC_DATA_LENGTH)?
  404. ((data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH) + 1):
  405. data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH;
  406. length = DW_MCI_DESC_DATA_LENGTH;
  407. }
  408. mem_addr = VIRT_TO_PHY(dma_address);//PHY_TO_UNCACHED_VIRT(dma_address);//dma_address;//(u32)mmc_obj->sg_dma;//sg_dma_address(&data->sg[i]);
  409. //data->blks * data->blksize;//data->blksize;//0x200;//sg_dma_len(&data->sg[i]);
  410. for(i = 0; i < sg_len; i++){
  411. if(sg_len > 1){
  412. length = (leftsize>= DW_MCI_DESC_DATA_LENGTH) ?
  413. DW_MCI_DESC_DATA_LENGTH : leftsize;
  414. if( length == 0)
  415. break;
  416. }
  417. for ( ; length ; desc++) {
  418. desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
  419. length : DW_MCI_DESC_DATA_LENGTH;
  420. length -= desc_len;
  421. leftsize -= desc_len;
  422. /*
  423. * Wait for the former clear OWN bit operation
  424. * of IDMAC to make sure that this descriptor
  425. * isn't still owned by IDMAC as IDMAC's write
  426. * ops and CPU's read ops are asynchronous.
  427. */
  428. if(WaiteforDmaOwnClear((UINT32)(&desc->des0),10,
  429. 10))
  430. {
  431. printf(">>>>>IDMA OWN TIME OUT!!!\n");
  432. goto err_own_bit;
  433. }
  434. /*
  435. * Set the OWN bit and disable interrupts
  436. * for this descriptor
  437. */
  438. desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
  439. IDMAC_DES0_DIC |
  440. IDMAC_DES0_CH);
  441. /* Buffer length */
  442. IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
  443. /* Physical address to DMA to/from */
  444. desc->des2 = cpu_to_le32(mem_addr);
  445. /* Update physical address for the next desc */
  446. mem_addr += desc_len;
  447. /* Save pointer to the last descriptor */
  448. desc_last = desc;
  449. }
  450. }
  451. /* Set first descriptor */
  452. desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
  453. /* Set last descriptor */
  454. desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
  455. IDMAC_DES0_DIC));
  456. desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
  457. mmc_obj->data = data;
  458. return 0;
  459. err_own_bit:
  460. /* restore the descriptor chain as it's polluted */
  461. dev_dbg("descriptor is still owned by IDMAC.\n");
  462. memset(mmc_obj->sg_cpu, 0, DESC_RING_BUF_SZ);
  463. dw_mci_idmac_init(mmc_obj);
  464. return -EINVAL;
  465. }
  466. static int dw_mci_idmac_start_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  467. {
  468. u32 temp;
  469. int ret = 0;
  470. #if 1
  471. if (mmc_obj->dma_64bit_address == 1)
  472. ;//ret = dw_mci_prepare_desc64(host, mmc_obj->data, sg_len);
  473. else
  474. ret = dw_mci_prepare_desc32(mmc_obj, data);
  475. if (ret)
  476. goto out;
  477. #endif
  478. /* drain writebuffer */
  479. wmb();
  480. /* Make sure to reset DMA in case we did PIO before this */
  481. dw_mci_ctrl_reset(mmc_obj, SDMMC_CTRL_DMA_RESET);
  482. dw_mci_idmac_reset(mmc_obj);
  483. /* Select IDMAC interface */
  484. temp = readl( mmc_obj->base + SDMMC_CTRL);
  485. temp |= SDMMC_CTRL_USE_IDMAC;
  486. writel(temp, mmc_obj->base + SDMMC_CTRL);
  487. /* drain writebuffer */
  488. wmb();
  489. /* Enable the IDMAC */
  490. temp = readl(mmc_obj->base + SDMMC_BMOD);
  491. temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
  492. writel(temp, mmc_obj->base + SDMMC_BMOD);
  493. #if 0
  494. /* Flush cache before write */
  495. if (data->flags & DATA_DIR_WRITE)
  496. {
  497. CP15_clean_dcache_for_dma((u32)data->buf,
  498. (u32)data->buf + data->blks * data->blksize);
  499. //printf(">>>>>>>>Write clean cache>>>>>>\n");
  500. }
  501. /* Invalidate cache before read */
  502. else if (data->flags & DATA_DIR_READ)
  503. {
  504. CP15_flush_dcache_for_dma((u32)data->buf,
  505. (u32)data->buf + data->blks * data->blksize);
  506. //printf(">>>>>>>>Read flush cache>>>>>>\n");
  507. }
  508. #else
  509. if (mmc_obj->using_dma) {
  510. if (mmc_obj->dummy_buffer_used) {
  511. if (data->flags & DATA_DIR_WRITE) {
  512. if (mmc_obj->tx_dummy_buffer)
  513. CP15_clean_dcache_for_dma((u32)mmc_obj->tx_dummy_buffer,
  514. (u32)mmc_obj->tx_dummy_buffer + data->blks * data->blksize);
  515. } else if (data->flags & DATA_DIR_READ) {
  516. if (mmc_obj->rx_dummy_buffer)
  517. CP15_flush_dcache_for_dma((u32)mmc_obj->rx_dummy_buffer,
  518. (u32)mmc_obj->rx_dummy_buffer + data->blks * data->blksize);
  519. }
  520. } else {
  521. /* Flush cache before write */
  522. if (data->flags & DATA_DIR_WRITE)
  523. {
  524. CP15_clean_dcache_for_dma((u32)data->buf,
  525. (u32)data->buf + data->blks * data->blksize);
  526. //printf(">>>>>>>>Write clean cache>>>>>>\n");
  527. }
  528. /* Invalidate cache before read */
  529. else if (data->flags & DATA_DIR_READ)
  530. {
  531. CP15_flush_dcache_for_dma((u32)data->buf,
  532. (u32)data->buf + data->blks * data->blksize);
  533. //printf(">>>>>>>>Read flush cache>>>>>>\n");
  534. }
  535. }
  536. }
  537. #endif
  538. /* Start it running */
  539. writel(1, mmc_obj->base + SDMMC_PLDMND);
  540. out:
  541. return ret;
  542. }
  543. static void dw_mci_idmac_stop_dma(struct ark_mmc_obj *mmc_obj)
  544. {
  545. u32 temp;
  546. /* Disable and reset the IDMAC interface */
  547. temp = readl(mmc_obj->base+SDMMC_CTRL);
  548. temp &= ~SDMMC_CTRL_USE_IDMAC;
  549. temp |= SDMMC_CTRL_DMA_RESET;
  550. writel(temp,mmc_obj->base+SDMMC_CTRL);
  551. /* Stop the IDMAC running */
  552. temp = readl(mmc_obj->base+SDMMC_BMOD);
  553. temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
  554. temp |= SDMMC_IDMAC_SWRESET;
  555. writel(temp,mmc_obj->base+SDMMC_BMOD);
  556. }
  557. void dw_mci_dmac_complete_dma(void *arg)
  558. {
  559. #if 0
  560. struct dw_mci *host = arg;
  561. struct mmc_data *data = host->data;
  562. dev_vdbg(host->dev, "DMA complete\n");
  563. if ((host->use_dma == TRANS_MODE_EDMAC) &&
  564. data && (data->flags & MMC_DATA_READ))
  565. /* Invalidate cache after read */
  566. dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
  567. data->sg,
  568. data->sg_len,
  569. DMA_FROM_DEVICE);
  570. host->dma_ops->cleanup(host);
  571. /*
  572. * If the card was removed, data will be NULL. No point in trying to
  573. * send the stop command or waiting for NBUSY in this case.
  574. */
  575. if (data) {
  576. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  577. tasklet_schedule(&host->tasklet);
  578. }
  579. #endif
  580. return;
  581. }
  582. static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj)
  583. {
  584. return;
  585. if(mmc_obj->use_dma == TRANS_MODE_IDMAC)
  586. {
  587. void *data = mmc_obj->sg_cpu;
  588. if (data) {
  589. free(data);
  590. }
  591. }
  592. }
  593. static struct dw_mci_dma_ops dw_mci_idmac_ops = {
  594. .init = dw_mci_idmac_init,
  595. .start = dw_mci_idmac_start_dma,
  596. .stop = dw_mci_idmac_stop_dma,
  597. // .complete = dw_mci_dmac_complete_dma,
  598. .cleanup = dw_mci_dma_cleanup,
  599. };
  600. static void dw_mci_init_dma(struct ark_mmc_obj *mmc_obj)
  601. {
  602. unsigned int addr_config = 0;
  603. /*
  604. * Check tansfer mode from HCON[17:16]
  605. * Clear the ambiguous description of dw_mmc databook:
  606. * 2b'00: No DMA Interface -> Actually means using Internal DMA block
  607. * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
  608. * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
  609. * 2b'11: Non DW DMA Interface -> pio only
  610. * Compared to DesignWare DMA Interface, Generic DMA Interface has a
  611. * simpler request/acknowledge handshake mechanism and both of them
  612. * are regarded as external dma master for dw_mmc.
  613. */
  614. mmc_obj->use_dma = SDMMC_GET_TRANS_MODE(readl(mmc_obj->base + SDMMC_HCON));
  615. //mmc_obj->use_dma = TRANS_MODE_IDMAC;
  616. if (mmc_obj->use_dma == DMA_INTERFACE_IDMA) {
  617. mmc_obj->use_dma = TRANS_MODE_IDMAC;
  618. } else {
  619. goto no_dma;
  620. }
  621. /* Determine which DMA interface to use */
  622. /*
  623. * Check ADDR_CONFIG bit in HCON to find
  624. * IDMAC address bus width
  625. */
  626. addr_config = SDMMC_GET_ADDR_CONFIG(readl(mmc_obj->base + SDMMC_HCON));
  627. if (addr_config == 1) {
  628. ;/* host supports IDMAC in 64-bit address mode */
  629. } else {
  630. /* host supports IDMAC in 32-bit address mode */
  631. mmc_obj->dma_64bit_address = 0;
  632. dev_info(mmc_obj->dev, "IDMAC supports 32-bit address mode.\n");
  633. }
  634. /* Alloc memory for sg translation */
  635. mmc_obj->sg_cpu =(void *)(dma_addr_t)pvPortMalloc(DESC_RING_BUF_SZ + ARCH_DMA_MINALIGN);
  636. if (!mmc_obj->sg_cpu) {
  637. printf("%s: could not alloc DMA memory\n", __func__);
  638. goto no_dma;
  639. }
  640. mmc_obj->sg_dma = VIRT_TO_PHY((u32)mmc_obj->sg_cpu);//PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu);
  641. mmc_obj->sg_cpu = (u32*)PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu);
  642. printf(">>>sg_dma 0x%x,sg_cpu = 0x%x>>>>\n",mmc_obj->sg_dma,mmc_obj->sg_cpu);
  643. if (!mmc_obj->sg_cpu) {
  644. dev_err(mmc_obj->dev,"%s: could not alloc DMA memory\n",__func__);
  645. goto no_dma;
  646. }
  647. mmc_obj->dma_ops = &dw_mci_idmac_ops;
  648. dev_info(mmc_obj->dev, "Using internal DMA controller.\n");
  649. if (mmc_obj->dma_ops->init && mmc_obj->dma_ops->start &&
  650. mmc_obj->dma_ops->stop && mmc_obj->dma_ops->cleanup) {
  651. if (mmc_obj->dma_ops->init(mmc_obj)) {
  652. printf("%s: Unable to initialize DMA Controller.\n",
  653. __func__);
  654. goto no_dma;
  655. }
  656. } else {
  657. printf("DMA initialization not found.\n");
  658. goto no_dma;
  659. }
  660. return;
  661. no_dma:
  662. dev_info(mmc_obj->dev, "Using PIO mode.\n");
  663. mmc_obj->use_dma = TRANS_MODE_PIO;
  664. }
  665. void MMC_Init(struct ark_mmc_obj *mmc_obj)
  666. {
  667. uint32_t reg;
  668. if(mmc_obj->mmc_reset)
  669. mmc_obj->mmc_reset(mmc_obj);
  670. vClkEnable(mmc_obj->clk_id);
  671. MMC_Reset(mmc_obj);
  672. dw_mci_init_dma(mmc_obj);
  673. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_ALL);
  674. MMC_SetInterruptMask(mmc_obj, 0x0);
  675. reg = readl(mmc_obj->base + SDMMC_CTRL);
  676. reg |= SDMMC_CTRL_INT_ENABLE;
  677. writel(reg, mmc_obj->base + SDMMC_CTRL);
  678. //set timeout param
  679. writel(0xffffffff, mmc_obj->base + SDMMC_TMOUT);
  680. //set fifo
  681. reg = readl(mmc_obj->base + SDMMC_FIFOTH);
  682. reg = ((reg >> 16) & 0xfff) + 1;
  683. mmc_obj->fifoth_val = SDMMC_SET_FIFOTH(0x3, reg / 2 - 1, reg / 2);
  684. writel(mmc_obj->fifoth_val, mmc_obj->base + SDMMC_FIFOTH);
  685. MMC_SetInterruptMask(mmc_obj, SDMMC_INT_CD);
  686. }
  687. static int ark_mmc_write_pio(struct mmc_driver *mmc_drv)
  688. {
  689. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  690. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  691. struct mmcsd_data *data = NULL;
  692. uint32_t status;
  693. uint32_t len;
  694. uint32_t remain, fcnt;
  695. uint32_t *buf;
  696. int i;
  697. int hotpluge_support;
  698. if(cmd)
  699. data = cmd->data;
  700. if(!data)
  701. {
  702. TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__);
  703. return -EIO;
  704. }
  705. hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  706. do {
  707. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  708. break;
  709. if (data->blks * data->blksize == data->bytes_xfered)
  710. break;
  711. buf = data->buf + data->bytes_xfered / 4;
  712. remain = data->blks * data->blksize - data->bytes_xfered;
  713. do {
  714. fcnt = (SDMMC_FIFO_DEPTH - MMC_GetWaterlevel(mmc_obj)) * 4;
  715. len = configMIN(remain, fcnt);
  716. if (!len)
  717. break;
  718. for (i = 0; i < len / 4; i ++) {
  719. writel(*buf++, mmc_obj->base + SDMMC_FIFO);
  720. }
  721. data->bytes_xfered += len;
  722. remain -= len;
  723. } while (remain);
  724. status = readl(mmc_obj->base + SDMMC_MINTSTS);
  725. writel(SDMMC_INT_TXDR, mmc_obj->base + SDMMC_MINTSTS);
  726. } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
  727. return 0;
  728. }
  729. static int ark_mmc_read_pio(struct mmc_driver *mmc_drv, bool dto)
  730. {
  731. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  732. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  733. struct mmcsd_data *data = NULL;
  734. u32 status;
  735. unsigned int len;
  736. unsigned int remain, fcnt;
  737. uint32_t *buf;
  738. int i;
  739. int hotpluge_support;
  740. if(cmd)
  741. data = cmd->data;
  742. if(!data)
  743. {
  744. TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__);
  745. return -EIO;
  746. }
  747. hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  748. do {
  749. if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1))
  750. break;
  751. if (data->blks * data->blksize == data->bytes_xfered)
  752. break;
  753. buf = data->buf + data->bytes_xfered / 4;
  754. remain = data->blks * data->blksize - data->bytes_xfered;
  755. do {
  756. fcnt = MMC_GetWaterlevel(mmc_obj) * 4;
  757. len = configMIN(remain, fcnt);
  758. if (!len)
  759. break;
  760. for (i = 0; i < len / 4; i ++) {
  761. *buf++ = readl(mmc_obj->base + SDMMC_FIFO);
  762. }
  763. data->bytes_xfered += len;
  764. remain -= len;
  765. } while (remain);
  766. status = readl(mmc_obj->base + SDMMC_MINTSTS);
  767. writel(SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS);
  768. /* if the RXDR is ready read again */
  769. } while ((status & SDMMC_INT_RXDR) ||
  770. (dto && MMC_GetWaterlevel(mmc_obj)));
  771. return 0;
  772. }
  773. static void ark_mmc_set_iocfg(struct mmcsd_host *host, struct mmcsd_io_cfg *io_cfg)
  774. {
  775. uint32_t clksrc, clkdiv;
  776. struct mmc_driver *mmc_drv = host->private_data;
  777. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  778. unsigned int regs;
  779. int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id);
  780. /* maybe switch power to the card */
  781. switch (io_cfg->power_mode)
  782. {
  783. case MMCSD_POWER_OFF:
  784. if(hotpluge_support == 1)
  785. {
  786. regs = readl(mmc_obj->base + SDMMC_PWREN);
  787. regs &= ~(1 << 0);
  788. writel(regs, mmc_obj->base + SDMMC_PWREN);
  789. }
  790. break;
  791. case MMCSD_POWER_UP:
  792. if(hotpluge_support == 1)
  793. {
  794. regs = readl(mmc_obj->base + SDMMC_PWREN);
  795. regs &= ~(1 << 0);
  796. regs |= (1 << 0);
  797. writel(regs, mmc_obj->base + SDMMC_PWREN);
  798. }
  799. break;
  800. case MMCSD_POWER_ON:
  801. if(hotpluge_support == 1)
  802. {
  803. MMC_Reset(mmc_obj);
  804. }
  805. break;
  806. default:
  807. printf("ERROR: %s, unknown power_mode %d\n", __func__, io_cfg->power_mode);
  808. break;
  809. }
  810. //fixme: read from PMU
  811. //why io_cfg->clock == 0 ?
  812. if(io_cfg->clock)
  813. {
  814. clksrc = ulClkGetRate(mmc_obj->clk_id);
  815. clkdiv = clksrc / io_cfg->clock / 2;
  816. MMC_UpdateClockRegister(mmc_obj, clkdiv);
  817. TRACE_DEBUG("io_cfg->clock: %lu, clock in: %lu, clkdiv: %d\n", io_cfg->clock, clkdiv, clkdiv);
  818. }
  819. else
  820. {
  821. writel(0, mmc_obj->base + SDMMC_CLKENA);
  822. }
  823. if (io_cfg->bus_width == MMCSD_BUS_WIDTH_4)
  824. {
  825. MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_4BIT);
  826. TRACE_DEBUG("set to 4-bit mode\n");
  827. }
  828. else
  829. {
  830. MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_1BIT);
  831. // printf("set to 1-bit mode\n");
  832. }
  833. TRACE_DEBUG("%s end\n", __func__);
  834. }
  835. static void ark_mmc_enable_sdio_irq(struct mmcsd_host *host, int32_t enable)
  836. {
  837. struct mmc_driver *mmc_drv = host->private_data;
  838. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  839. uint32_t reg;
  840. TRACE_DEBUG("%s start\n", __func__);
  841. if (enable)
  842. {
  843. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO);
  844. reg = MMC_GetInterruptMask(mmc_obj);
  845. reg |= SDMMC_INT_SDIO;
  846. MMC_SetInterruptMask(mmc_obj, reg);
  847. }
  848. else
  849. {
  850. reg = MMC_GetInterruptMask(mmc_obj);
  851. reg &= ~SDMMC_INT_SDIO;
  852. MMC_SetInterruptMask(mmc_obj, reg);
  853. }
  854. }
  855. static int32_t ark_mmc_get_card_status(struct mmcsd_host *host)
  856. {
  857. struct mmc_driver *mmc_drv = host->private_data;
  858. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  859. return !(readl(mmc_obj->base + SDMMC_CDETECT) & 0x1);
  860. }
  861. static void ark_mmc_send_command(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd)
  862. {
  863. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  864. struct mmcsd_req *req = mmc_drv->req;
  865. //fixme: cmd->data or req->data
  866. struct mmcsd_data *data = cmd->data;
  867. int ret;
  868. uint32_t cmd_flags = 0;
  869. TRACE_DEBUG("%s, start\n", __func__);
  870. if (!cmd)
  871. {
  872. //fixme: stop dma
  873. printf("ERROR: %s, cmd is NULL\n", __func__);
  874. return;
  875. }
  876. if (data)
  877. {
  878. cmd_flags |= SDMMC_CMD_DAT_EXP;
  879. /* always set data start - also set direction flag for read */
  880. if (data->flags & DATA_DIR_WRITE)
  881. cmd_flags |= SDMMC_CMD_DAT_WR;
  882. if (data->flags & DATA_STREAM)
  883. cmd_flags |= SDMMC_CMD_STRM_MODE;
  884. }
  885. if (cmd == req->stop)
  886. cmd_flags |= SDMMC_CMD_STOP;
  887. else
  888. cmd_flags |= SDMMC_CMD_PRV_DAT_WAIT;
  889. switch (resp_type(cmd))
  890. {
  891. case RESP_NONE:
  892. break;
  893. case RESP_R1:
  894. case RESP_R5:
  895. case RESP_R6:
  896. case RESP_R7:
  897. case RESP_R1B:
  898. cmd_flags |= SDMMC_CMD_RESP_EXP;
  899. cmd_flags |= SDMMC_CMD_RESP_CRC;
  900. break;
  901. case RESP_R2:
  902. cmd_flags |= SDMMC_CMD_RESP_EXP;
  903. cmd_flags |= SDMMC_CMD_RESP_CRC;
  904. cmd_flags |= SDMMC_CMD_RESP_LONG;
  905. break;
  906. case RESP_R3:
  907. case RESP_R4:
  908. cmd_flags |= SDMMC_CMD_RESP_EXP;
  909. break;
  910. default:
  911. printf("ERROR: %s, unknown cmd type %x\n", __func__, resp_type(cmd));
  912. return;
  913. }
  914. if (cmd->cmd_code == GO_IDLE_STATE)
  915. cmd_flags |= SDMMC_CMD_INIT;
  916. /* CMD 11 check switch voltage */
  917. if (cmd->cmd_code == READ_DAT_UNTIL_STOP)
  918. cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
  919. TRACE_DEBUG("cmd code: %d, args: 0x%x, resp type: 0x%x, flag: 0x%x\n", cmd->cmd_code, cmd->arg, resp_type(cmd), cmd_flags);
  920. ret = MMC_SendCommand(mmc_obj, cmd->cmd_code, cmd->arg, cmd_flags);
  921. if(ret)
  922. {
  923. printf("ERROR: %s, Send command timeout, cmd: %d, status: 0x%x\n", __func__, cmd->cmd_code, MMC_GetStatus(mmc_obj));
  924. }
  925. }
  926. static void dw_mci_adjust_fifoth(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  927. {
  928. unsigned int blksz = data->blksize;
  929. const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
  930. u32 fifo_width = 4;
  931. u32 blksz_depth = blksz / fifo_width, fifoth_val;
  932. u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
  933. int idx = ARRAY_SIZE(mszs) - 1;
  934. /* pio should ship this scenario */
  935. if (!mmc_obj->use_dma)
  936. return;
  937. tx_wmark = SDMMC_FIFO_DEPTH / 2;
  938. tx_wmark_invers = SDMMC_FIFO_DEPTH - tx_wmark;
  939. /*
  940. * MSIZE is '1',
  941. * if blksz is not a multiple of the FIFO width
  942. */
  943. if (blksz % fifo_width)
  944. goto done;
  945. do {
  946. if (!((blksz_depth % mszs[idx]) ||
  947. (tx_wmark_invers % mszs[idx]))) {
  948. msize = idx;
  949. rx_wmark = mszs[idx] - 1;
  950. break;
  951. }
  952. } while (--idx > 0);
  953. /*
  954. * If idx is '0', it won't be tried
  955. * Thus, initial values are uesed
  956. */
  957. done:
  958. fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
  959. writel(fifoth_val, mmc_obj->base + SDMMC_FIFOTH);
  960. }
  961. static int dw_mci_submit_data_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data)
  962. {
  963. u32 temp;
  964. mmc_obj->using_dma = 0;
  965. /* If we don't have a channel, we can't do DMA */
  966. if (!mmc_obj->use_dma)
  967. return -1;
  968. if (data->blks * data->blksize < DW_MCI_DMA_THRESHOLD ||
  969. data->blksize & 3 || (u32)data->buf & 3) {
  970. // //printf(">>>>Error,blksize 0x%x,dataaddr:0x%x\n",data->blksize,(u32)data->buf);
  971. mmc_obj->dma_ops->stop(mmc_obj);
  972. return -1;
  973. }
  974. mmc_obj->using_dma = 1;
  975. temp = MMC_GetInterruptMask(mmc_obj);
  976. temp |= SDMMC_INT_DATA_OVER | SDMMC_INT_DATA_ERROR;
  977. temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
  978. MMC_SetInterruptMask(mmc_obj, temp);
  979. /* Enable the DMA interface */
  980. temp = readl(mmc_obj->base + SDMMC_CTRL);
  981. temp |= SDMMC_CTRL_DMA_ENABLE;
  982. writel(temp, mmc_obj->base + SDMMC_CTRL);
  983. /*
  984. * Decide the MSIZE and RX/TX Watermark.
  985. * If current block size is same with previous size,
  986. * no need to update fifoth.
  987. */
  988. if (mmc_obj->prev_blksz != data->blksize)
  989. dw_mci_adjust_fifoth(mmc_obj, data);
  990. if (mmc_obj->dma_ops->start(mmc_obj, data)) {
  991. mmc_obj->dma_ops->stop(mmc_obj);
  992. /* We can't do DMA, try PIO for this one */
  993. dev_dbg(mmc_obj->dev,
  994. "%s: fall back to PIO mode for current transfer\n",
  995. __func__);
  996. mmc_obj->using_dma = 0;
  997. return -1;
  998. }
  999. return 0;
  1000. }
  1001. static void ark_mmc_perpare_data(struct mmc_driver *mmc_drv)
  1002. {
  1003. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1004. struct mmcsd_data *data = cmd->data;
  1005. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1006. uint32_t data_size;
  1007. uint32_t reg;
  1008. if(!data)
  1009. {
  1010. MMC_SetBlockSize(mmc_obj, 0);
  1011. MMC_SetByteCount(mmc_obj, 0);
  1012. return;
  1013. }
  1014. TRACE_DEBUG("%s, start\n", __func__);
  1015. if(MMC_ResetFifo(mmc_obj))
  1016. {
  1017. return;
  1018. }
  1019. data_size = data->blks * data->blksize;
  1020. MMC_SetBlockSize(mmc_obj, data->blksize);
  1021. data->bytes_xfered = 0;
  1022. if(data_size % 4)
  1023. {
  1024. printf("ERROR: data_size should be a multiple of 4, but now is %d\n", data_size);
  1025. }
  1026. MMC_SetByteCount(mmc_obj, data_size);
  1027. TRACE_DEBUG("%s, set blk size: 0x%x, byte count: 0x%x\n", __func__, data->blksize, data_size);
  1028. if (dw_mci_submit_data_dma(mmc_obj, data)) {
  1029. writel(SDMMC_INT_TXDR | SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS);
  1030. reg = readl(mmc_obj->base + SDMMC_INTMASK);
  1031. reg |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
  1032. writel(reg, mmc_obj->base + SDMMC_INTMASK);
  1033. reg = readl(mmc_obj->base + SDMMC_CTRL);
  1034. reg &= ~SDMMC_CTRL_DMA_ENABLE;
  1035. writel(reg, mmc_obj->base + SDMMC_CTRL);
  1036. } else {
  1037. mmc_obj->prev_blksz = data->blksize;
  1038. }
  1039. TRACE_DEBUG("%s, end\n", __func__);
  1040. }
  1041. int ark_mmc_wait_card_idle(struct ark_mmc_obj *mmc_obj)
  1042. {
  1043. uint32_t tick, timeout;
  1044. tick = xTaskGetTickCount();
  1045. timeout = tick + configTICK_RATE_HZ / 2; //500ms
  1046. while(MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY)
  1047. {
  1048. tick = xTaskGetTickCount();
  1049. if(tick > timeout)
  1050. {
  1051. return -1;
  1052. }
  1053. }
  1054. return 0;
  1055. }
  1056. static int ark_mmc_get_response(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd)
  1057. {
  1058. int i;
  1059. uint32_t tick, timeout, status;
  1060. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1061. cmd->resp[0] = 0;
  1062. cmd->resp[1] = 0;
  1063. cmd->resp[2] = 0;
  1064. cmd->resp[3] = 0;
  1065. tick = xTaskGetTickCount();
  1066. timeout = tick + configTICK_RATE_HZ / 2; //500ms
  1067. //fixme: spin_lock_irqsave?
  1068. do
  1069. {
  1070. status = MMC_GetRawInterrupt(mmc_obj);
  1071. tick = xTaskGetTickCount();
  1072. if(tick > timeout)
  1073. {
  1074. TRACE_DEBUG("ERROR: %s, get response timeout(cmd is not received by card), RINTSTS: 0x%x, cmd: %d\n", __func__, status, cmd->cmd_code);
  1075. return -1;
  1076. }
  1077. }
  1078. while(!(status & SDMMC_INT_CMD_DONE));
  1079. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE);
  1080. for (i = 0; i < 4; i++)
  1081. {
  1082. if (resp_type(cmd) == RESP_R2)
  1083. {
  1084. cmd->resp[i] = MMC_GetResponse(mmc_obj, 3 - i);
  1085. //fixme : R2 must delay some time here ,when use UHI card, need check why
  1086. //1ms
  1087. //vTaskDelay(configTICK_RATE_HZ / 100);
  1088. }
  1089. else
  1090. {
  1091. cmd->resp[i] = MMC_GetResponse(mmc_obj, i);
  1092. }
  1093. }
  1094. TRACE_DEBUG("resp: 0x%x, 0x%x, 0x%x, 0x%x\n", cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  1095. if (status & SDMMC_INT_RTO)
  1096. {
  1097. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RTO);
  1098. TRACE_DEBUG("ERROR: %s, get response timeout, RINTSTS: 0x%x\n", __func__, status);
  1099. return -1;
  1100. }
  1101. else if (status & (SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR))
  1102. {
  1103. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR);
  1104. printf("ERROR: %s, response error or response crc error, RINTSTS: 0x%x\n", __func__, status);
  1105. return -1;
  1106. }
  1107. return 0;
  1108. }
  1109. static int ark_mmc_start_transfer(struct mmc_driver *mmc_drv)
  1110. {
  1111. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1112. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1113. struct mmcsd_data *data = NULL;
  1114. int ret;
  1115. uint32_t interrupt, status, reg;
  1116. uint32_t timeout;
  1117. if(cmd)
  1118. data = cmd->data;
  1119. if(!data)
  1120. {
  1121. return 0;
  1122. }
  1123. TRACE_DEBUG("%s, start\n", __func__);
  1124. //fixme: spin_lock_irqsave(&host->lock, flags);
  1125. if (!mmc_obj->using_dma) {
  1126. //fifo mode open data interrupts
  1127. reg = MMC_GetInterruptMask(mmc_obj);
  1128. reg |= SDMMC_INT_STATUS_DATA;
  1129. MMC_SetInterruptMask(mmc_obj, reg);
  1130. }
  1131. //fixme: spin_unlock_irqrestore(&host->lock, flags);
  1132. timeout = configTICK_RATE_HZ + pdMS_TO_TICKS(data->blks * data->blksize * 100/ 1024); // Minimum 10KB per second
  1133. ret = xQueueReceive(mmc_obj->transfer_completion, NULL, timeout);
  1134. if (mmc_obj->using_dma) {
  1135. if (mmc_obj->dummy_buffer_used) {
  1136. if (data->flags & DATA_DIR_WRITE) {
  1137. if (mmc_obj->tx_dummy_buffer) {
  1138. vPortFree(mmc_obj->tx_dummy_buffer);
  1139. mmc_obj->tx_dummy_buffer = NULL;
  1140. }
  1141. } else if (data->flags & DATA_DIR_READ) {
  1142. if (mmc_obj->rx_dummy_buffer) {
  1143. memcpy(data->buf, mmc_obj->rx_dummy_buffer, data->blks * data->blksize);
  1144. vPortFree(mmc_obj->rx_dummy_buffer);
  1145. mmc_obj->rx_dummy_buffer = NULL;
  1146. }
  1147. }
  1148. }
  1149. } else {
  1150. reg = MMC_GetInterruptMask(mmc_obj);
  1151. reg &= ~SDMMC_INT_STATUS_DATA;
  1152. MMC_SetInterruptMask(mmc_obj, reg);
  1153. }
  1154. if(ret != pdTRUE || mmc_obj->result)
  1155. {
  1156. //fixme: error handle
  1157. if (mmc_obj->using_dma)
  1158. dw_mci_stop_dma(mmc_obj);
  1159. cmd->err = ret;
  1160. interrupt = MMC_GetRawInterrupt(mmc_obj);
  1161. status = MMC_GetStatus(mmc_obj);
  1162. printf("ERROR: %s, transfer timeout, ret: %d, RINTSTS: 0x%x, STATUS: 0x%x\n", __func__, ret, interrupt, status);
  1163. return -1;
  1164. }
  1165. data->bytes_xfered = data->blks * data->blksize;
  1166. return 0;
  1167. }
  1168. static void ark_mmc_complete_request(struct mmc_driver *mmc_drv)
  1169. {
  1170. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1171. mmc_drv->cmd = NULL;
  1172. mmc_drv->req = NULL;
  1173. mmc_drv->data = NULL;
  1174. MMC_SetBlockSize(mmc_obj, 0);
  1175. MMC_SetByteCount(mmc_obj, 0);
  1176. mmcsd_req_complete(mmc_drv->host);
  1177. }
  1178. static void ark_mmc_request(struct mmcsd_host *host, struct mmcsd_req *req)
  1179. {
  1180. int ret;
  1181. struct mmc_driver *mmc_drv = host->private_data;
  1182. struct mmcsd_cmd *cmd = req->cmd;
  1183. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1184. TRACE_DEBUG("%s start\n", __func__);
  1185. mmc_drv->req = req;
  1186. mmc_drv->cmd = cmd;
  1187. if (mmc_obj->transfer_completion == NULL)
  1188. mmc_obj->transfer_completion = xQueueCreate(1, 0);
  1189. else
  1190. xQueueReset(mmc_obj->transfer_completion);
  1191. ret = ark_mmc_wait_card_idle(mmc_obj);
  1192. if (ret)
  1193. {
  1194. printf("ERROR: %s, data transfer timeout, status: 0x%x\r\n", __func__, MMC_GetStatus(mmc_obj));
  1195. if (MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY)
  1196. goto out;
  1197. }
  1198. mmc_obj->result = 0;
  1199. ark_mmc_perpare_data(mmc_drv);
  1200. ark_mmc_send_command(mmc_drv, cmd);
  1201. if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1)
  1202. vTaskDelay(pdMS_TO_TICKS(1));
  1203. ret = ark_mmc_get_response(mmc_drv, cmd);
  1204. if(ret)
  1205. {
  1206. cmd->err = ret;
  1207. printf("%s,get response returns %d, cmd: %d\r\n", __func__, ret, cmd->cmd_code);
  1208. goto out;
  1209. }
  1210. ark_mmc_start_transfer(mmc_drv);
  1211. if(req->stop)
  1212. {
  1213. /* send stop command */
  1214. TRACE_DEBUG("%s send stop\n", __func__);
  1215. ark_mmc_send_command(mmc_drv, req->stop);
  1216. }
  1217. out:
  1218. ark_mmc_complete_request(mmc_drv);
  1219. TRACE_DEBUG("%s end\n", __func__);
  1220. }
  1221. static const struct mmcsd_host_ops ark_mmc_ops =
  1222. {
  1223. .request = ark_mmc_request,
  1224. .set_iocfg = ark_mmc_set_iocfg,
  1225. .enable_sdio_irq = ark_mmc_enable_sdio_irq,
  1226. .get_card_status = ark_mmc_get_card_status,
  1227. };
  1228. static void ark_mmc_interrupt(void *param)
  1229. {
  1230. struct mmc_driver *mmc_drv = (struct mmc_driver *)param;
  1231. struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv;
  1232. struct mmcsd_cmd *cmd = mmc_drv->cmd;
  1233. struct mmcsd_data *data = NULL;
  1234. uint32_t status;
  1235. if (cmd && cmd->data)
  1236. {
  1237. data = cmd->data;
  1238. }
  1239. status = MMC_GetUnmaskedInterrupt(mmc_obj);
  1240. TRACE_DEBUG("unmasked interrupts: 0x%x\n", status);
  1241. if (status & SDMMC_CMD_ERROR_FLAGS) {
  1242. MMC_ClearRawInterrupt(mmc_obj, SDMMC_CMD_ERROR_FLAGS);
  1243. mmc_obj->result = -1;
  1244. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1245. }
  1246. if (status & SDMMC_DATA_ERROR_FLAGS) {
  1247. /* if there is an error report DATA_ERROR */
  1248. MMC_ClearRawInterrupt(mmc_obj, SDMMC_DATA_ERROR_FLAGS);
  1249. mmc_obj->result = -1;
  1250. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1251. }
  1252. if (status & SDMMC_INT_DATA_OVER) {
  1253. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_DATA_OVER);
  1254. if (data && data->flags & DATA_DIR_READ) {
  1255. if (!mmc_obj->using_dma && data->bytes_xfered != data->blks * data->blksize)
  1256. ark_mmc_read_pio(mmc_drv, 1);
  1257. }
  1258. if (!mmc_obj->using_dma)
  1259. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1260. }
  1261. if (status & SDMMC_INT_RXDR) {
  1262. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR);
  1263. if (data && data->flags & DATA_DIR_READ)
  1264. ark_mmc_read_pio(mmc_drv, 0);
  1265. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR);
  1266. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_HTO);
  1267. }
  1268. if (status & SDMMC_INT_TXDR) {
  1269. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_TXDR);
  1270. if (data && data->flags & DATA_DIR_WRITE)
  1271. ark_mmc_write_pio(mmc_drv);
  1272. }
  1273. if (status & SDMMC_INT_CMD_DONE) {
  1274. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE);
  1275. }
  1276. if (status & SDMMC_INT_CD) {
  1277. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CD);
  1278. mmcsd_change_from_isr(mmc_drv->host);
  1279. }
  1280. if (status & SDMMC_INT_SDIO) {
  1281. MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO);
  1282. sdio_irq_wakeup_isr(mmc_drv->host);
  1283. }
  1284. if (mmc_obj->use_dma == TRANS_MODE_IDMAC){
  1285. /* Handle IDMA interrupts */
  1286. if (mmc_obj->dma_64bit_address == 1) {
  1287. status = readl(mmc_obj->base + SDMMC_IDSTS64);
  1288. if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))) {
  1289. writel(SDMMC_IDMAC_INT_TI|SDMMC_IDMAC_INT_RI, mmc_obj->base + SDMMC_IDSTS64);
  1290. writel(SDMMC_IDMAC_INT_NI,mmc_obj->base + SDMMC_IDSTS64);
  1291. // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  1292. // host->dma_ops->complete((void *)host);
  1293. }
  1294. } else {
  1295. status = readl(mmc_obj->base+SDMMC_IDSTS);
  1296. if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))){
  1297. writel(SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI,mmc_obj->base+SDMMC_IDSTS);
  1298. writel(SDMMC_IDMAC_INT_NI,mmc_obj->base+SDMMC_IDSTS);
  1299. xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1300. // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
  1301. // mmc_obj->dma_ops->complete((void *)host);
  1302. }
  1303. }
  1304. }
  1305. // xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0);
  1306. }
  1307. void ark_mmc_reset(struct ark_mmc_obj *mmc_obj)
  1308. {
  1309. sys_soft_reset(mmc_obj->softreset_id);
  1310. }
  1311. static struct ark_mmc_obj mmc0_obj =
  1312. {
  1313. .id = 0,
  1314. .irq = SDMMC0_IRQn,
  1315. .base = REGS_SDMMC0_BASE,
  1316. .mmc_reset = ark_mmc_reset,
  1317. .softreset_id = softreset_sdmmc,
  1318. .clk_id = CLK_SDMMC0,
  1319. };
  1320. static struct ark_mmc_obj mmc1_obj =
  1321. {
  1322. .id = 1,
  1323. .irq = SDMMC1_IRQn,
  1324. .base = REGS_SDMMC1_BASE,
  1325. .mmc_reset = ark_mmc_reset,
  1326. .softreset_id = softreset_sdmmc1,
  1327. .clk_id = CLK_SDMMC1,
  1328. };
  1329. int ark_mmc_probe(struct ark_mmc_obj *mmc_obj)
  1330. {
  1331. struct mmc_driver *mmc_drv;
  1332. struct mmcsd_host *host;
  1333. TRACE_DEBUG("%s start\n", __func__);
  1334. mmc_drv = (struct mmc_driver*)pvPortMalloc(sizeof(struct mmc_driver));
  1335. memset(mmc_drv, 0, sizeof(struct mmc_driver));
  1336. mmc_drv->priv = mmc_obj;
  1337. host = mmcsd_alloc_host();
  1338. if (!host)
  1339. {
  1340. printf("ERROR: %s, failed to malloc host\n", __func__);
  1341. return -ENOMEM;
  1342. }
  1343. host->ops = &ark_mmc_ops;
  1344. host->freq_min = MMC_FEQ_MIN;
  1345. host->freq_max = MMC_FEQ_MAX;
  1346. host->valid_ocr = VDD_32_33 | VDD_33_34;
  1347. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_BUSWIDTH_4;
  1348. host->max_blk_size = 512;
  1349. //fixme: max_blk_count?
  1350. host->max_blk_count = 2048;
  1351. host->private_data = mmc_drv;
  1352. mmc_drv->host = host;
  1353. MMC_Init(mmc_obj);
  1354. if (mmc_obj->use_dma == TRANS_MODE_IDMAC)
  1355. {
  1356. host->max_segs = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);//host->ring_size;
  1357. host->max_blk_size = 65535;
  1358. host->max_seg_size = 0x1000;
  1359. host->max_req_size = host->max_seg_size * host->max_segs;
  1360. host->max_blk_count = host->max_req_size / 512;
  1361. }
  1362. request_irq(mmc_obj->irq, 0, ark_mmc_interrupt, mmc_drv);
  1363. if (mmcsd_dev_is_sdio_card(mmc_obj->id) == 1) {
  1364. ark_mmc_enable_sdio_irq(host, 1);
  1365. }
  1366. if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) {
  1367. if (ark_mmc_get_card_status(host))
  1368. mmcsd_change(host);
  1369. } else {
  1370. mmcsd_change(host);
  1371. }
  1372. TRACE_DEBUG("%s end\n", __func__);
  1373. return 0;
  1374. }
  1375. int mmc_init(void)
  1376. {
  1377. #ifdef SDMMC0_SUPPORT
  1378. ark_mmc_probe(&mmc0_obj);
  1379. #endif
  1380. #ifdef SDMMC1_SUPPORT
  1381. ark_mmc_probe(&mmc1_obj);
  1382. #endif
  1383. return 0;
  1384. }
  1385. #endif