dma.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. #include "FreeRTOS.h"
  2. #include "chip.h"
  3. #define DMA_CH_NUM 8
  4. #define DMA_BLOCK_SIZE 0xfff
  5. #define rDMACIntStatus *((volatile unsigned int *)(REGS_DMAC_BASE + 0x000))
  6. #define rDMACIntTCStatus *((volatile unsigned int *)(REGS_DMAC_BASE + 0x004))
  7. #define rDMACIntTCClear *((volatile unsigned int *)(REGS_DMAC_BASE + 0x008))
  8. #define rDMACIntErrorStatus *((volatile unsigned int *)(REGS_DMAC_BASE + 0x00C))
  9. #define rDMACIntErrClr *((volatile unsigned int *)(REGS_DMAC_BASE + 0x010))
  10. #define rDMACRawIntTCStatus *((volatile unsigned int *)(REGS_DMAC_BASE + 0x014))
  11. #define rDMACRawIntErrorStatus *((volatile unsigned int *)(REGS_DMAC_BASE + 0x018))
  12. #define rDMACEnbldChns *((volatile unsigned int *)(REGS_DMAC_BASE + 0x01C))
  13. #define rDMACSoftBReq *((volatile unsigned int *)(REGS_DMAC_BASE + 0x020))
  14. #define rDMACSoftSReq *((volatile unsigned int *)(REGS_DMAC_BASE + 0x024))
  15. #define rDMACSoftLBReq *((volatile unsigned int *)(REGS_DMAC_BASE + 0x028))
  16. #define rDMACSoftLSReq *((volatile unsigned int *)(REGS_DMAC_BASE + 0x02C))
  17. #define rDMACConfiguration *((volatile unsigned int *)(REGS_DMAC_BASE + 0x030))
  18. #define rDMACSync *((volatile unsigned int *)(REGS_DMAC_BASE + 0x034))
  19. #define rDMACCxSrcAddr(x) *((volatile unsigned int *)(REGS_DMAC_BASE + 0x100 + 0x00 + (x)*0x20))
  20. #define rDMACCxDestAddr(x) *((volatile unsigned int *)(REGS_DMAC_BASE + 0x100 + 0x04 + (x)*0x20))
  21. #define rDMACCxLLI(x) *((volatile unsigned int *)(REGS_DMAC_BASE + 0x100 + 0x08 + (x)*0x20))
  22. #define rDMACCxControl(x) *((volatile unsigned int *)(REGS_DMAC_BASE + 0x100 + 0x0C + (x)*0x20))
  23. #define rDMACCxConfiguration(x) *((volatile unsigned int *)(REGS_DMAC_BASE + 0x100 + 0x10 + (x)*0x20))
  24. static struct dma_chan dma_ch[DMA_CH_NUM] = {0};
  25. static SemaphoreHandle_t dma_mutex;
  26. static QueueHandle_t dma_m2m_done = NULL;
  27. struct dma_chan *dma_request_channel(int favorite_ch)
  28. {
  29. int i;
  30. configASSERT (favorite_ch >= 0 && favorite_ch < DMA_CH_NUM)
  31. xSemaphoreTake(dma_mutex, portMAX_DELAY);
  32. if (!dma_ch[favorite_ch].in_use) {
  33. dma_ch[favorite_ch].chan_id = favorite_ch;
  34. dma_ch[favorite_ch].in_use = 1;
  35. xSemaphoreGive(dma_mutex);
  36. return &dma_ch[favorite_ch];
  37. }
  38. for (i = 0; i < DMA_CH_NUM; i++) {
  39. if (!dma_ch[i].in_use) {
  40. dma_ch[i].chan_id = i;
  41. dma_ch[i].in_use = 1;
  42. xSemaphoreGive(dma_mutex);
  43. return &dma_ch[i];
  44. }
  45. }
  46. xSemaphoreGive(dma_mutex);
  47. return NULL;
  48. }
  49. void dma_release_channel(struct dma_chan *chan)
  50. {
  51. /* This channel is not in use, bail out */
  52. if (!chan->in_use)
  53. return;
  54. dma_stop_channel(chan);
  55. xSemaphoreTake(dma_mutex, portMAX_DELAY);
  56. /* This channel is not in use anymore, free it */
  57. chan->irq_callback = NULL;
  58. chan->callback_param = NULL;
  59. chan->in_use = 0;
  60. xSemaphoreGive(dma_mutex);
  61. }
  62. /*
  63. * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
  64. * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
  65. *
  66. * NOTE: burst size 2 is not supported by controller.
  67. *
  68. * This can be done by finding least significant bit set: n & (n - 1)
  69. */
  70. static void convert_burst(u32 *maxburst)
  71. {
  72. if (*maxburst > 1)
  73. *maxburst = fls(*maxburst) - 2;
  74. else
  75. *maxburst = 0;
  76. }
  77. int dma_config_channel(struct dma_chan *chan, struct dma_config *config)
  78. {
  79. unsigned int ctl;
  80. unsigned int cfg;
  81. unsigned int src_width, dst_width;
  82. unsigned int src_id = 0, dst_id = 0;
  83. unsigned int di = 0, si = 0;
  84. unsigned int data_width = (1 << DMA_BUSWIDTH_4_BYTES);
  85. convert_burst(&config->src_maxburst);
  86. convert_burst(&config->dst_maxburst);
  87. if (config->direction == DMA_MEM_TO_DEV) {
  88. src_width = __ffs(data_width | config->src_addr | config->transfer_size);
  89. dst_width = config->dst_addr_width;
  90. dst_id = config->dst_id;
  91. si = 1;
  92. } else if (config->direction == DMA_DEV_TO_MEM) {
  93. src_width = config->src_addr_width;
  94. dst_width = __ffs(data_width | config->dst_addr | config->transfer_size);
  95. src_id = config->src_id;
  96. di = 1;
  97. } else if (config->direction == DMA_MEM_TO_MEM) {
  98. src_width = __ffs(data_width | config->src_addr | config->transfer_size);
  99. dst_width = __ffs(data_width | config->dst_addr | config->transfer_size);
  100. si = 1;
  101. di = 1;
  102. }
  103. ctl = (1 << 31) | /* [31] I Read/write Terminal count interrupt enable bit */
  104. (0 << 28) | /* [30:28] Prot Read/write Protection */
  105. (di << 27) | /* [27] DI Read/write Destination increment */
  106. (si << 26) | /* [26] SI Read/write Source increment */
  107. (config->dst_master_id << 25) | /* [25] D Read/write Destination AHB master select */
  108. (config->src_master_id << 24) | /* [24] S Read/write Source AHB master select */
  109. (dst_width << 21) | /* [23:21] DWidth Read/write Destination transfer width */
  110. (src_width << 18) | /* [20:18] SWidth Read/write Source transfer width */
  111. (config->dst_maxburst << 15) | /* [17:15] DBSize Read/write Destination burst size */
  112. (config->src_maxburst << 12) | /* [14:12] SBSize Read/write Source burst size */
  113. 0; /* [11:0] TransferSize Read/write Transfer size */
  114. cfg = (0 << 18) | /* [18] H Read/write Halt */
  115. (0 << 16) | /* [16] L Read/write Lock */
  116. (1 << 15) | /* [15] ITC Read/write Terminal count interrupt mask */
  117. (1 << 14) | /* [14] IE Read/write Interrupt error mask */
  118. (config->direction << 11) | /* [13:11] FlowCntrl Read/write Flow control and transfer type */
  119. (dst_id << 6) | /* [9:6] DestPeripheral Read/write Destination peripheral */
  120. (src_id << 1) | /* [4:1] SrcPeripheral Read/write Source peripheral */
  121. 0; /* [0] Channel enable */
  122. if ((config->transfer_size >> src_width) > DMA_BLOCK_SIZE) {
  123. unsigned int blk_size = config->transfer_size >> src_width;
  124. int lli_num;
  125. int i;
  126. lli_num = (blk_size + DMA_BLOCK_SIZE - 1) / DMA_BLOCK_SIZE - 1;
  127. if (chan->lli) {
  128. vPortFree(chan->lli);
  129. chan->lli = NULL;
  130. }
  131. chan->lli = pvPortMalloc(sizeof(struct dma_lli) * lli_num);
  132. if (!chan->lli)
  133. return -ENOMEM;
  134. for (i = 0; i < lli_num - 1; i++) {
  135. chan->lli[i].src_addr = config->src_addr + (si ? (i + 1) : 0) * (DMA_BLOCK_SIZE << src_width);
  136. chan->lli[i].dst_addr = config->dst_addr + (di ? (i + 1) : 0) * (DMA_BLOCK_SIZE << src_width);
  137. chan->lli[i].next_lli = (unsigned int)&chan->lli[i + 1];
  138. chan->lli[i].control = ctl | DMA_BLOCK_SIZE;
  139. if (!config->blkint_en)
  140. chan->lli[i].control &= ~(1 << 31);
  141. }
  142. chan->lli[i].src_addr = config->src_addr + (si ? (i + 1) : 0) * (DMA_BLOCK_SIZE << src_width);
  143. chan->lli[i].dst_addr = config->dst_addr + (di ? (i + 1) : 0) * (DMA_BLOCK_SIZE << src_width);
  144. chan->lli[i].next_lli = 0;
  145. chan->lli[i].control = ctl | (blk_size - DMA_BLOCK_SIZE * lli_num);
  146. dma_flush_range((uint32_t)chan->lli, (uint32_t)chan->lli + sizeof(struct dma_lli) * lli_num);
  147. rDMACCxSrcAddr(chan->chan_id) = config->src_addr;
  148. rDMACCxDestAddr(chan->chan_id) = config->dst_addr;
  149. rDMACCxLLI(chan->chan_id) = (unsigned int)chan->lli | 1;
  150. rDMACCxControl(chan->chan_id) = ctl & ~(1 << 31) | DMA_BLOCK_SIZE;
  151. rDMACCxConfiguration(chan->chan_id) = cfg;
  152. } else {
  153. rDMACCxSrcAddr(chan->chan_id) = config->src_addr;
  154. rDMACCxDestAddr(chan->chan_id) = config->dst_addr;
  155. rDMACCxLLI(chan->chan_id) = 0;
  156. rDMACCxControl(chan->chan_id) = ctl | (config->transfer_size >> src_width);
  157. rDMACCxConfiguration(chan->chan_id) = cfg;
  158. }
  159. return 0;
  160. }
  161. int dma_config_cylic_channel(struct dma_chan *chan, struct dma_config *config, int num)
  162. {
  163. unsigned int ctl;
  164. unsigned int cfg;
  165. unsigned int src_width, dst_width;
  166. unsigned int src_id = 0, dst_id = 0;
  167. unsigned int di = 0, si = 0;
  168. unsigned int src_data_width = (1 << config->src_addr_width);//(1 << DMA_BUSWIDTH_4_BYTES);
  169. unsigned int dst_data_width = (1 << config->dst_addr_width);//(1 << DMA_BUSWIDTH_4_BYTES);
  170. int i;
  171. if (chan->lli) {
  172. vPortFree(chan->lli);
  173. chan->lli = NULL;
  174. }
  175. chan->lli = pvPortMalloc(sizeof(struct dma_lli) * num);
  176. if (!chan->lli)
  177. return -ENOMEM;
  178. for (i = 0; i < num; i++) {
  179. convert_burst(&config->src_maxburst);
  180. convert_burst(&config->dst_maxburst);
  181. if (config->direction == DMA_MEM_TO_DEV) {
  182. src_width = __ffs(src_data_width | config->src_addr | config->transfer_size);
  183. dst_width = config->dst_addr_width;
  184. dst_id = config->dst_id;
  185. si = 1;
  186. } else if (config->direction == DMA_DEV_TO_MEM) {
  187. src_width = config->src_addr_width;
  188. dst_width = __ffs(dst_data_width | config->dst_addr | config->transfer_size);
  189. src_id = config->src_id;
  190. di = 1;
  191. } else if (config->direction == DMA_MEM_TO_MEM) {
  192. src_width = __ffs(src_data_width | config->src_addr | config->transfer_size);
  193. dst_width = __ffs(dst_data_width | config->dst_addr | config->transfer_size);
  194. si = 1;
  195. di = 1;
  196. }
  197. ctl = (1 << 31) | /* [31] I Read/write Terminal count interrupt enable bit */
  198. (0 << 28) | /* [30:28] Prot Read/write Protection */
  199. (di << 27) | /* [27] DI Read/write Destination increment */
  200. (si << 26) | /* [26] SI Read/write Source increment */
  201. (0 << 25) | /* [25] D Read/write Destination AHB master select */
  202. (1 << 24) | /* [24] S Read/write Source AHB master select */
  203. (dst_width << 21) | /* [23:21] DWidth Read/write Destination transfer width */
  204. (src_width << 18) | /* [20:18] SWidth Read/write Source transfer width */
  205. (config->dst_maxburst << 15) | /* [17:15] DBSize Read/write Destination burst size */
  206. (config->src_maxburst << 12) | /* [14:12] SBSize Read/write Source burst size */
  207. 0; /* [11:0] TransferSize Read/write Transfer size */
  208. cfg = (0 << 18) | /* [18] H Read/write Halt */
  209. (0 << 16) | /* [16] L Read/write Lock */
  210. (1 << 15) | /* [15] ITC Read/write Terminal count interrupt mask */
  211. (1 << 14) | /* [14] IE Read/write Interrupt error mask */
  212. (config->direction << 11) | /* [13:11] FlowCntrl Read/write Flow control and transfer type */
  213. (dst_id << 6) | /* [9:6] DestPeripheral Read/write Destination peripheral */
  214. (src_id << 1) | /* [4:1] SrcPeripheral Read/write Source peripheral */
  215. 0; /* [0] Channel enable */
  216. chan->lli[i].src_addr = config->src_addr;
  217. chan->lli[i].dst_addr = config->dst_addr;
  218. chan->lli[i].next_lli = (unsigned int)&chan->lli[i + 1];
  219. chan->lli[i].control = ctl | (config->transfer_size >> src_width);
  220. if (!config->blkint_en)
  221. chan->lli[i].control &= ~(1 << 31);
  222. config++;
  223. }
  224. chan->lli[i - 1].next_lli = (unsigned int)&chan->lli[0];
  225. CP15_clean_dcache_for_dma((unsigned int)chan->lli,
  226. (unsigned int)chan->lli + sizeof(struct dma_lli) * num);
  227. rDMACCxSrcAddr(chan->chan_id) = 0;
  228. rDMACCxDestAddr(chan->chan_id) = 0;
  229. rDMACCxLLI(chan->chan_id) = (unsigned int)chan->lli | 1;
  230. rDMACCxControl(chan->chan_id) = ctl & ~(1 << 31) | 1;
  231. rDMACCxConfiguration(chan->chan_id) = cfg;
  232. return 0;
  233. }
  234. int dma_register_complete_callback(struct dma_chan *chan,
  235. void (*callback)(void *param, unsigned int mask),
  236. void *callback_param)
  237. {
  238. chan->irq_callback = callback;
  239. chan->callback_param = callback_param;
  240. return 0;
  241. }
  242. int dma_start_channel(struct dma_chan *chan)
  243. {
  244. configASSERT(chan && chan->chan_id < DMA_CH_NUM);
  245. rDMACCxConfiguration(chan->chan_id) |= (1 << 0);
  246. return 0;
  247. }
  248. int dma_stop_channel(struct dma_chan *chan)
  249. {
  250. unsigned int timeout = xTaskGetTickCount() + 1000;
  251. configASSERT(chan && chan->chan_id < DMA_CH_NUM);
  252. xSemaphoreTake(dma_mutex, portMAX_DELAY);
  253. if(!(rDMACEnbldChns & (1 << chan->chan_id))) {
  254. xSemaphoreGive(dma_mutex);
  255. return 0;
  256. }
  257. // A channel can be disabled by clearing the Enable bit.
  258. rDMACCxConfiguration(chan->chan_id) &= ~(1 << 0);
  259. // waiting
  260. while(rDMACEnbldChns & (1 << chan->chan_id)) {
  261. if(xTaskGetTickCount() >= timeout) {
  262. printf ("dma_stop_channel %d timeout\n", chan->chan_id);
  263. xSemaphoreGive(dma_mutex);
  264. return -1;
  265. }
  266. vTaskDelay(pdMS_TO_TICKS(10));
  267. }
  268. if (chan->lli) {
  269. vPortFree(chan->lli);
  270. chan->lli = NULL;
  271. }
  272. xSemaphoreGive(dma_mutex);
  273. return 0;
  274. }
  275. static void dma_m2m_callback(void *param, unsigned int mask)
  276. {
  277. if(dma_m2m_done)
  278. xQueueSendFromISR(dma_m2m_done, NULL, 0);
  279. }
  280. int dma_m2mcpy(unsigned int dst_addr, unsigned int src_addr, int size)
  281. {
  282. struct dma_config cfg = {0};
  283. int ret = -1;
  284. struct dma_chan *dma_ch = dma_request_channel(0);
  285. if (!dma_ch) {
  286. printf("%s() dma_request_channel fail.\n", __func__);
  287. return -1;
  288. }
  289. cfg.dst_addr_width = DMA_BUSWIDTH_4_BYTES;
  290. cfg.dst_maxburst = 256;
  291. cfg.src_addr_width = DMA_BUSWIDTH_4_BYTES;
  292. cfg.src_maxburst = 256;
  293. cfg.transfer_size = size;
  294. cfg.src_addr = src_addr;
  295. cfg.dst_addr = dst_addr;
  296. cfg.direction = DMA_MEM_TO_MEM;
  297. cfg.dst_master_id = 0;
  298. cfg.src_master_id = 1;
  299. dma_clean_range(src_addr, src_addr + size);
  300. dma_inv_range(dst_addr, dst_addr + size);
  301. ret = dma_config_channel(dma_ch, &cfg);
  302. if (ret) {
  303. printf("%s, dma_config_channel failed.\n", __func__);
  304. goto exit;
  305. }
  306. dma_register_complete_callback(dma_ch, dma_m2m_callback, NULL);
  307. xQueueReset(dma_m2m_done);
  308. dma_start_channel(dma_ch);
  309. if (xQueueReceive(dma_m2m_done, NULL, pdMS_TO_TICKS(3000)) != pdTRUE) {
  310. printf("dma_m2mcpy wait timeout.\n");
  311. ret = -ETIMEDOUT;
  312. goto exit;
  313. }
  314. dma_stop_channel(dma_ch);
  315. ret = 0;
  316. exit:
  317. if(dma_ch)
  318. dma_release_channel(dma_ch);
  319. return ret;
  320. }
  321. static void dma_int_handler(void *param)
  322. {
  323. unsigned int err_status, tfr_status;
  324. struct dma_chan *chan;
  325. unsigned int irqmask = 0;
  326. int i;
  327. err_status = rDMACIntErrorStatus;
  328. tfr_status = rDMACIntTCStatus;
  329. rDMACIntTCClear = tfr_status;
  330. rDMACIntErrClr = err_status;
  331. for(i= 0; i< DMA_CH_NUM; i++) {
  332. irqmask = 0;
  333. if (err_status & (1 << i)) {
  334. irqmask |= DMA_INT_ERR;
  335. }
  336. if (tfr_status & (1 << i)) {
  337. irqmask |= DMA_INT_TC;
  338. }
  339. if (!irqmask)
  340. continue;
  341. chan = &dma_ch[i];
  342. if (chan->irq_callback)
  343. chan->irq_callback(chan->callback_param, irqmask);
  344. }
  345. }
  346. int dma_init(void)
  347. {
  348. dma_mutex = xSemaphoreCreateMutex();
  349. dma_m2m_done = xQueueCreate(1, 0);
  350. sys_soft_reset(softreset_dma);
  351. request_irq(DMA_IRQn, 0, dma_int_handler, NULL);
  352. /* Clear all interrupts on all channels. */
  353. rDMACIntTCClear = 0xff;
  354. rDMACIntErrClr = 0xff;
  355. rDMACConfiguration |= (1<<0); // [0] E Read/write PrimeCell DMAC enable
  356. return 0;
  357. }