#include "FreeRTOS.h" #include "chip.h" #include "board.h" #include "mmc.h" #include "sdio.h" #include "sdmmc.h" #include "mmcsd_core.h" #include "os_adapt.h" #include "sema.h" #ifdef SDMMC_SUPPORT #define DW_MCI_DESC_DATA_LENGTH 0x1000 struct idmac_desc { __le32 des0; /* Control Descriptor */ #define IDMAC_DES0_DIC BIT(1) #define IDMAC_DES0_LD BIT(2) #define IDMAC_DES0_FD BIT(3) #define IDMAC_DES0_CH BIT(4) #define IDMAC_DES0_ER BIT(5) #define IDMAC_DES0_CES BIT(30) #define IDMAC_DES0_OWN BIT(31) __le32 des1; /* Buffer sizes */ #define IDMAC_SET_BUFFER1_SIZE(d, s) \ ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) __le32 des2; /* buffer 1 physical address */ __le32 des3; /* buffer 2 physical address */ }; static inline uint32_t MMC_GetWaterlevel(struct ark_mmc_obj *mmc_obj) { return (readl(mmc_obj->base + SDMMC_STATUS) >> 17) & 0x1fff; } static inline uint32_t MMC_GetStatus(struct ark_mmc_obj *mmc_obj) { return readl(mmc_obj->base + SDMMC_STATUS); } static inline uint32_t MMC_GetRawInterrupt(struct ark_mmc_obj *mmc_obj) { return readl(mmc_obj->base + SDMMC_RINTSTS); } static inline uint32_t MMC_GetUnmaskedInterrupt(struct ark_mmc_obj *mmc_obj) { return readl(mmc_obj->base + SDMMC_MINTSTS); } static inline uint32_t MMC_ClearRawInterrupt(struct ark_mmc_obj *mmc_obj, uint32_t interrupts) { return writel(interrupts, mmc_obj->base + SDMMC_RINTSTS); } static inline uint32_t MMC_GetInterruptMask(struct ark_mmc_obj *mmc_obj) { return readl(mmc_obj->base + SDMMC_INTMASK); } static inline uint32_t MMC_SetInterruptMask(struct ark_mmc_obj *mmc_obj, uint32_t mask) { return writel(mask, mmc_obj->base + SDMMC_INTMASK); } static inline void MMC_SetByteCount(struct ark_mmc_obj *mmc_obj, uint32_t bytes) { writel(bytes, mmc_obj->base + SDMMC_BYTCNT); } static inline void MMC_SetBlockSize(struct ark_mmc_obj *mmc_obj, uint32_t size) { writel(size, mmc_obj->base + SDMMC_BLKSIZ); } static inline uint32_t MMC_GetResponse(struct ark_mmc_obj *mmc_obj, int resp_num) { return readl(mmc_obj->base + SDMMC_RESP0 + resp_num * 4); } static inline uint32_t MMC_IsFifoEmpty(struct ark_mmc_obj *mmc_obj) { return (readl(mmc_obj->base + SDMMC_STATUS) >> 2) & 0x1; } /* static inline uint32_t MMC_IsDataStateBusy(struct ark_mmc_obj *mmc_obj) { return (readl(mmc_obj->base + SDMMC_STATUS) >> 10) & 0x1; } */ int MMC_UpdateClockRegister(struct ark_mmc_obj *mmc_obj, int div) { uint32_t tick, timeout; int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id); tick = xTaskGetTickCount(); timeout = tick + configTICK_RATE_HZ / 10; //100ms in total /* disable clock */ writel(0, mmc_obj->base + SDMMC_CLKENA); writel(0, mmc_obj->base + SDMMC_CLKSRC); /* inform CIU */ writel(0, mmc_obj->base + SDMMC_CMDARG); wmb(); /* drain writebuffer */ writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD); while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000) { if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1)) { printf("ERROR: %s, CARD out\n", __func__); return -1; } tick = xTaskGetTickCount(); if(tick > timeout) { printf("ERROR: %s, update clock timeout\n", __func__); return -1; } } /* set clock to desired speed */ writel(div, mmc_obj->base + SDMMC_CLKDIV); /* inform CIU */ writel(0, mmc_obj->base + SDMMC_CMDARG); wmb(); /* drain writebuffer */ writel(1<<31 | 1<<21| 1<<13, mmc_obj->base + SDMMC_CMD); while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000) { if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1)) { printf("ERROR: %s, CARD out\n", __func__); return -1; } tick = xTaskGetTickCount(); if(tick > timeout) { TRACE_DEBUG("ERROR: %s, update clock timeout\n", __func__); return -1; } } /* enable clock */ writel(0x10001, mmc_obj->base + SDMMC_CLKENA); //low power /* inform CIU */ writel(1<<31 | 1<<21, mmc_obj->base + SDMMC_CMD); while(readl(mmc_obj->base + SDMMC_CMD) & 0x80000000) { if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1)) { printf("ERROR: %s, CARD out\n", __func__); return -1; } tick = xTaskGetTickCount(); if(tick > timeout) { printf("ERROR: %s, update clock timeout\n", __func__); return -1; } } return 0; } int MMC_SetCardWidth(struct ark_mmc_obj *mmc_obj, int width) { switch(width) { case SDMMC_CTYPE_1BIT: writel(0, mmc_obj->base + SDMMC_CTYPE); break; case SDMMC_CTYPE_4BIT: writel(1, mmc_obj->base + SDMMC_CTYPE); break; default: printf("ERROR: %s, card width %d is not supported\n", __func__, width); return -1; break; } return 0; } int MMC_SendCommand(struct ark_mmc_obj *mmc_obj, uint32_t cmd, uint32_t arg, uint32_t flags) { uint32_t tick, timeout; tick = xTaskGetTickCount(); timeout = tick + configTICK_RATE_HZ; //1s writel(arg, mmc_obj->base + SDMMC_CMDARG); flags |= 1<<31 | 1<<29 | cmd; writel(flags, mmc_obj->base + SDMMC_CMD); while(readl(mmc_obj->base + SDMMC_CMD) & SDMMC_CMD_START) { if((mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1)) { printf("ERROR: %s,SDMMC_CMD BackWard Read , Card Out\n", __func__); return -1; } tick = xTaskGetTickCount(); if(tick > timeout) { printf("ERROR: %s, send cmd timeout\n", __func__); return -1; } } //fixme: check HLE_INT_STATUS return 0; } int MMC_ResetFifo(struct ark_mmc_obj *mmc_obj) { uint32_t reg, tick, timeout; tick = xTaskGetTickCount(); timeout = tick + configTICK_RATE_HZ / 10; //100ms reg = readl(mmc_obj->base + SDMMC_CTRL); reg |= 1 << 1; writel(reg, mmc_obj->base + SDMMC_CTRL); //wait until fifo reset finish while(readl(mmc_obj->base + SDMMC_CTRL) & SDMMC_CTRL_FIFO_RESET) { tick = xTaskGetTickCount(); if(tick > timeout) { printf("ERROR: %s, FIFO reset timeout\n", __func__); return -1; } } return 0; } int MMC_Reset(struct ark_mmc_obj *mmc_obj) { uint32_t reg, tick, timeout; // tick = xTaskGetTickCount(); // timeout = tick + configTICK_RATE_HZ / 10; //100ms reg = readl(mmc_obj->base + SDMMC_CTRL); reg |= SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET; writel(reg, mmc_obj->base + SDMMC_CTRL); tick = xTaskGetTickCount(); timeout = tick + configTICK_RATE_HZ / 10; //100ms while(readl(mmc_obj->base + SDMMC_CTRL) & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET)) { tick = xTaskGetTickCount(); if(tick > timeout) { printf("ERROR: %s, CTRL dma|fifo|ctrl reset timeout\n", __func__); return -1; } } return 0; } #define DW_MCI_DMA_THRESHOLD 32 /* DMA interface functions */ static void dw_mci_stop_dma(struct ark_mmc_obj *mmc_obj) { if (mmc_obj->using_dma) { mmc_obj->dma_ops->stop(mmc_obj); mmc_obj->dma_ops->cleanup(mmc_obj); } /* Data transfer was stopped by the interrupt handler */ //set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj); static void dw_mci_dmac_complete_callback(void *param, unsigned int mask) { struct ark_mmc_obj *mmc_obj = param; //struct mmcsd_data *data = mmc_obj->data; dev_vdbg(mmc_obj->dev, "DMA complete\n"); mmc_obj->dma_ops->cleanup(mmc_obj); xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0); /* * If the card was removed, data will be NULL. No point in trying to * send the stop command or waiting for NBUSY in this case. */ /* if (data) { set_bit(EVENT_XFER_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); } */ } static UINT32 WaiteforDmaResetClear(UINT32 addr, UINT32 __delay_us, UINT32 timeout_us) { u64 __timeout_us = (timeout_us); UINT32 tmp,val; while(__timeout_us){ val = readl(addr); tmp = (val>>2)& 0x1 ; if(tmp == 0) return 0; udelay(__delay_us); } return 1; } static UINT32 WaiteforDmaOwnClear(u32 addr, UINT32 __delay_us, UINT32 timeout_us) { u64 __timeout_us = (timeout_us); UINT32 tmp,val; while(__timeout_us){ val = readl(addr); tmp = (val>>31)& 0x1 ; if(tmp == 0) return 0; udelay(__delay_us); __timeout_us-- ; } return 1; } static bool dw_mci_ctrl_reset(struct ark_mmc_obj *mmc_obj, u32 reset) { u32 ctrl; ctrl = readl(mmc_obj->base + SDMMC_CTRL); ctrl |= reset; writel( ctrl, mmc_obj->base + SDMMC_CTRL); /* wait till resets clear */ if (WaiteforDmaResetClear(mmc_obj->base + SDMMC_CTRL,1, 500 * USEC_PER_MSEC)) { printf("Timeout resetting block (ctrl reset %#x)\n", readl(mmc_obj->base + SDMMC_CTRL)); return false; } return true; } static void dw_mci_idmac_reset(struct ark_mmc_obj *mmc_obj) { u32 bmod = readl(mmc_obj->base + SDMMC_BMOD); /* Software reset of DMA */ bmod |= SDMMC_IDMAC_SWRESET; writel(bmod, mmc_obj->base + SDMMC_BMOD); } static int dw_mci_idmac_init(struct ark_mmc_obj *mmc_obj) { int i; unsigned int ring_size =0; if (mmc_obj->dma_64bit_address == 1) { #if 0 struct idmac_desc_64addr *p; /* Number of descriptors in the ring buffer */ ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); /* Forward link the descriptor list */ for (i = 0, p = mmc_obj->sg_cpu; i < ring_size - 1; i++, p++) { p->des6 = (mmc_obj->sg_dma + (sizeof(struct idmac_desc_64addr) * (i + 1))) & 0xffffffff; p->des7 = (u64)(mmc_obj->sg_dma + (sizeof(struct idmac_desc_64addr) * (i + 1))) >> 32; /* Initialize reserved and buffer size fields to "0" */ p->des0 = 0; p->des1 = 0; p->des2 = 0; p->des3 = 0; } /* Set the last descriptor as the end-of-ring descriptor */ p->des6 = mmc_obj->sg_dma & 0xffffffff; p->des7 = (u64)mmc_obj->sg_dma >> 32; p->des0 = IDMAC_DES0_ER; #endif } else { struct idmac_desc *p; /* Number of descriptors in the ring buffer */ ring_size = DESC_RING_BUF_SZ / sizeof(struct idmac_desc); /* Forward link the descriptor list */ for (i = 0, p = mmc_obj->sg_cpu; i < ring_size - 1; i++, p++) { p->des3 = cpu_to_le32(mmc_obj->sg_dma + (sizeof(struct idmac_desc) * (i + 1))); p->des0 = 0; p->des1 = 0; } /* Set the last descriptor as the end-of-ring descriptor */ p->des3 = cpu_to_le32(mmc_obj->sg_dma); p->des0 = cpu_to_le32(IDMAC_DES0_ER); } dw_mci_idmac_reset(mmc_obj); if (mmc_obj->dma_64bit_address == 1) { /* Mask out interrupts - get Tx & Rx complete only */ writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS64); writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI,mmc_obj->base + SDMMC_IDINTEN64); /* Set the descriptor base address */ writel(mmc_obj->sg_dma & 0xffffffff, mmc_obj->base + SDMMC_DBADDRL); writel((u64)mmc_obj->sg_dma >> 32, mmc_obj->base + SDMMC_DBADDRU); } else { /* Mask out interrupts - get Tx & Rx complete only */ writel(IDMAC_INT_CLR, mmc_obj->base + SDMMC_IDSTS); writel(SDMMC_IDMAC_INT_NI|SDMMC_IDMAC_INT_RI|SDMMC_IDMAC_INT_TI, mmc_obj->base + SDMMC_IDINTEN); /* Set the descriptor base address */ writel(mmc_obj->sg_dma, mmc_obj->base + SDMMC_DBADDR); } return 0; } static inline int dw_mci_prepare_desc32(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data) { unsigned int desc_len; struct idmac_desc *desc_first, *desc_last, *desc; u32 dma_address; int i; unsigned int sg_len; u32 mem_addr; unsigned int length; unsigned int leftsize; dma_address = (u32)data->buf; if (dma_address & (ARCH_DMA_MINALIGN - 1)) { if (data->flags & DATA_DIR_WRITE) { mmc_obj->tx_dummy_buffer = pvPortMalloc(data->blks * data->blksize); if (!mmc_obj->tx_dummy_buffer) return -ENOMEM; memcpy(mmc_obj->tx_dummy_buffer, data->buf, data->blks * data->blksize); dma_address = (u32)mmc_obj->tx_dummy_buffer; } else if (data->flags & DATA_DIR_READ) { mmc_obj->rx_dummy_buffer = pvPortMalloc(data->blks * data->blksize); if (!mmc_obj->rx_dummy_buffer) return -ENOMEM; dma_address = (u32)mmc_obj->rx_dummy_buffer; } mmc_obj->dummy_buffer_used = 1; } else { mmc_obj->dummy_buffer_used = 0; } desc_first = desc_last = desc = mmc_obj->sg_cpu; leftsize = data->blks * data->blksize; //sg_len = data->blks * data->blksize; if(data->blks * data->blksize < DW_MCI_DESC_DATA_LENGTH){ sg_len = 1; length = data->blks * data->blksize; } else{ sg_len = (data->blks * data->blksize % DW_MCI_DESC_DATA_LENGTH)? ((data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH) + 1): data->blks * data->blksize / DW_MCI_DESC_DATA_LENGTH; length = DW_MCI_DESC_DATA_LENGTH; } mem_addr = VIRT_TO_PHY(dma_address);//PHY_TO_UNCACHED_VIRT(dma_address);//dma_address;//(u32)mmc_obj->sg_dma;//sg_dma_address(&data->sg[i]); //data->blks * data->blksize;//data->blksize;//0x200;//sg_dma_len(&data->sg[i]); for(i = 0; i < sg_len; i++){ if(sg_len > 1){ length = (leftsize>= DW_MCI_DESC_DATA_LENGTH) ? DW_MCI_DESC_DATA_LENGTH : leftsize; if( length == 0) break; } for ( ; length ; desc++) { desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? length : DW_MCI_DESC_DATA_LENGTH; length -= desc_len; leftsize -= desc_len; /* * Wait for the former clear OWN bit operation * of IDMAC to make sure that this descriptor * isn't still owned by IDMAC as IDMAC's write * ops and CPU's read ops are asynchronous. */ if(WaiteforDmaOwnClear((UINT32)(&desc->des0),10, 10)) { printf(">>>>>IDMA OWN TIME OUT!!!\n"); goto err_own_bit; } /* * Set the OWN bit and disable interrupts * for this descriptor */ desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH); /* Buffer length */ IDMAC_SET_BUFFER1_SIZE(desc, desc_len); /* Physical address to DMA to/from */ desc->des2 = cpu_to_le32(mem_addr); /* Update physical address for the next desc */ mem_addr += desc_len; /* Save pointer to the last descriptor */ desc_last = desc; } } /* Set first descriptor */ desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); /* Set last descriptor */ desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC)); desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); mmc_obj->data = data; return 0; err_own_bit: /* restore the descriptor chain as it's polluted */ dev_dbg("descriptor is still owned by IDMAC.\n"); memset(mmc_obj->sg_cpu, 0, DESC_RING_BUF_SZ); dw_mci_idmac_init(mmc_obj); return -EINVAL; } static int dw_mci_idmac_start_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data) { u32 temp; int ret = 0; #if 1 if (mmc_obj->dma_64bit_address == 1) ;//ret = dw_mci_prepare_desc64(host, mmc_obj->data, sg_len); else ret = dw_mci_prepare_desc32(mmc_obj, data); if (ret) goto out; #endif /* drain writebuffer */ wmb(); /* Make sure to reset DMA in case we did PIO before this */ dw_mci_ctrl_reset(mmc_obj, SDMMC_CTRL_DMA_RESET); dw_mci_idmac_reset(mmc_obj); /* Select IDMAC interface */ temp = readl( mmc_obj->base + SDMMC_CTRL); temp |= SDMMC_CTRL_USE_IDMAC; writel(temp, mmc_obj->base + SDMMC_CTRL); /* drain writebuffer */ wmb(); /* Enable the IDMAC */ temp = readl(mmc_obj->base + SDMMC_BMOD); temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; writel(temp, mmc_obj->base + SDMMC_BMOD); #if 0 /* Flush cache before write */ if (data->flags & DATA_DIR_WRITE) { CP15_clean_dcache_for_dma((u32)data->buf, (u32)data->buf + data->blks * data->blksize); //printf(">>>>>>>>Write clean cache>>>>>>\n"); } /* Invalidate cache before read */ else if (data->flags & DATA_DIR_READ) { CP15_flush_dcache_for_dma((u32)data->buf, (u32)data->buf + data->blks * data->blksize); //printf(">>>>>>>>Read flush cache>>>>>>\n"); } #else if (mmc_obj->using_dma) { if (mmc_obj->dummy_buffer_used) { if (data->flags & DATA_DIR_WRITE) { if (mmc_obj->tx_dummy_buffer) CP15_clean_dcache_for_dma((u32)mmc_obj->tx_dummy_buffer, (u32)mmc_obj->tx_dummy_buffer + data->blks * data->blksize); } else if (data->flags & DATA_DIR_READ) { if (mmc_obj->rx_dummy_buffer) CP15_flush_dcache_for_dma((u32)mmc_obj->rx_dummy_buffer, (u32)mmc_obj->rx_dummy_buffer + data->blks * data->blksize); } } else { /* Flush cache before write */ if (data->flags & DATA_DIR_WRITE) { CP15_clean_dcache_for_dma((u32)data->buf, (u32)data->buf + data->blks * data->blksize); //printf(">>>>>>>>Write clean cache>>>>>>\n"); } /* Invalidate cache before read */ else if (data->flags & DATA_DIR_READ) { CP15_flush_dcache_for_dma((u32)data->buf, (u32)data->buf + data->blks * data->blksize); //printf(">>>>>>>>Read flush cache>>>>>>\n"); } } } #endif /* Start it running */ writel(1, mmc_obj->base + SDMMC_PLDMND); out: return ret; } static void dw_mci_idmac_stop_dma(struct ark_mmc_obj *mmc_obj) { u32 temp; /* Disable and reset the IDMAC interface */ temp = readl(mmc_obj->base+SDMMC_CTRL); temp &= ~SDMMC_CTRL_USE_IDMAC; temp |= SDMMC_CTRL_DMA_RESET; writel(temp,mmc_obj->base+SDMMC_CTRL); /* Stop the IDMAC running */ temp = readl(mmc_obj->base+SDMMC_BMOD); temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); temp |= SDMMC_IDMAC_SWRESET; writel(temp,mmc_obj->base+SDMMC_BMOD); } void dw_mci_dmac_complete_dma(void *arg) { #if 0 struct dw_mci *host = arg; struct mmc_data *data = host->data; dev_vdbg(host->dev, "DMA complete\n"); if ((host->use_dma == TRANS_MODE_EDMAC) && data && (data->flags & MMC_DATA_READ)) /* Invalidate cache after read */ dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), data->sg, data->sg_len, DMA_FROM_DEVICE); host->dma_ops->cleanup(host); /* * If the card was removed, data will be NULL. No point in trying to * send the stop command or waiting for NBUSY in this case. */ if (data) { set_bit(EVENT_XFER_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); } #endif return; } static void dw_mci_dma_cleanup(struct ark_mmc_obj *mmc_obj) { return; if(mmc_obj->use_dma == TRANS_MODE_IDMAC) { void *data = mmc_obj->sg_cpu; if (data) { free(data); } } } static struct dw_mci_dma_ops dw_mci_idmac_ops = { .init = dw_mci_idmac_init, .start = dw_mci_idmac_start_dma, .stop = dw_mci_idmac_stop_dma, // .complete = dw_mci_dmac_complete_dma, .cleanup = dw_mci_dma_cleanup, }; static void dw_mci_init_dma(struct ark_mmc_obj *mmc_obj) { unsigned int addr_config = 0; /* * Check tansfer mode from HCON[17:16] * Clear the ambiguous description of dw_mmc databook: * 2b'00: No DMA Interface -> Actually means using Internal DMA block * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block * 2b'11: Non DW DMA Interface -> pio only * Compared to DesignWare DMA Interface, Generic DMA Interface has a * simpler request/acknowledge handshake mechanism and both of them * are regarded as external dma master for dw_mmc. */ mmc_obj->use_dma = SDMMC_GET_TRANS_MODE(readl(mmc_obj->base + SDMMC_HCON)); //mmc_obj->use_dma = TRANS_MODE_IDMAC; if (mmc_obj->use_dma == DMA_INTERFACE_IDMA) { mmc_obj->use_dma = TRANS_MODE_IDMAC; } else { goto no_dma; } /* Determine which DMA interface to use */ /* * Check ADDR_CONFIG bit in HCON to find * IDMAC address bus width */ addr_config = SDMMC_GET_ADDR_CONFIG(readl(mmc_obj->base + SDMMC_HCON)); if (addr_config == 1) { ;/* host supports IDMAC in 64-bit address mode */ } else { /* host supports IDMAC in 32-bit address mode */ mmc_obj->dma_64bit_address = 0; dev_info(mmc_obj->dev, "IDMAC supports 32-bit address mode.\n"); } /* Alloc memory for sg translation */ mmc_obj->sg_cpu =(void *)(dma_addr_t)pvPortMalloc(DESC_RING_BUF_SZ + ARCH_DMA_MINALIGN); if (!mmc_obj->sg_cpu) { printf("%s: could not alloc DMA memory\n", __func__); goto no_dma; } mmc_obj->sg_dma = VIRT_TO_PHY((u32)mmc_obj->sg_cpu);//PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu); mmc_obj->sg_cpu = (u32*)PHY_TO_UNCACHED_VIRT((u32)mmc_obj->sg_cpu); printf(">>>sg_dma 0x%x,sg_cpu = 0x%x>>>>\n",mmc_obj->sg_dma,mmc_obj->sg_cpu); if (!mmc_obj->sg_cpu) { dev_err(mmc_obj->dev,"%s: could not alloc DMA memory\n",__func__); goto no_dma; } mmc_obj->dma_ops = &dw_mci_idmac_ops; dev_info(mmc_obj->dev, "Using internal DMA controller.\n"); if (mmc_obj->dma_ops->init && mmc_obj->dma_ops->start && mmc_obj->dma_ops->stop && mmc_obj->dma_ops->cleanup) { if (mmc_obj->dma_ops->init(mmc_obj)) { printf("%s: Unable to initialize DMA Controller.\n", __func__); goto no_dma; } } else { printf("DMA initialization not found.\n"); goto no_dma; } return; no_dma: dev_info(mmc_obj->dev, "Using PIO mode.\n"); mmc_obj->use_dma = TRANS_MODE_PIO; } void MMC_Init(struct ark_mmc_obj *mmc_obj) { uint32_t reg; if(mmc_obj->mmc_reset) mmc_obj->mmc_reset(mmc_obj); vClkEnable(mmc_obj->clk_id); MMC_Reset(mmc_obj); dw_mci_init_dma(mmc_obj); MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_ALL); MMC_SetInterruptMask(mmc_obj, 0x0); reg = readl(mmc_obj->base + SDMMC_CTRL); reg |= SDMMC_CTRL_INT_ENABLE; writel(reg, mmc_obj->base + SDMMC_CTRL); //set timeout param writel(0xffffffff, mmc_obj->base + SDMMC_TMOUT); //set fifo reg = readl(mmc_obj->base + SDMMC_FIFOTH); reg = ((reg >> 16) & 0xfff) + 1; mmc_obj->fifoth_val = SDMMC_SET_FIFOTH(0x3, reg / 2 - 1, reg / 2); writel(mmc_obj->fifoth_val, mmc_obj->base + SDMMC_FIFOTH); MMC_SetInterruptMask(mmc_obj, SDMMC_INT_CD); } static int ark_mmc_write_pio(struct mmc_driver *mmc_drv) { struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; struct mmcsd_cmd *cmd = mmc_drv->cmd; struct mmcsd_data *data = NULL; uint32_t status; uint32_t len; uint32_t remain, fcnt; uint32_t *buf; int i; int hotpluge_support; if(cmd) data = cmd->data; if(!data) { TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__); return -EIO; } hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id); do { if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1)) break; if (data->blks * data->blksize == data->bytes_xfered) break; buf = data->buf + data->bytes_xfered / 4; remain = data->blks * data->blksize - data->bytes_xfered; do { fcnt = (SDMMC_FIFO_DEPTH - MMC_GetWaterlevel(mmc_obj)) * 4; len = configMIN(remain, fcnt); if (!len) break; for (i = 0; i < len / 4; i ++) { writel(*buf++, mmc_obj->base + SDMMC_FIFO); } data->bytes_xfered += len; remain -= len; } while (remain); status = readl(mmc_obj->base + SDMMC_MINTSTS); writel(SDMMC_INT_TXDR, mmc_obj->base + SDMMC_MINTSTS); } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ return 0; } static int ark_mmc_read_pio(struct mmc_driver *mmc_drv, bool dto) { struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; struct mmcsd_cmd *cmd = mmc_drv->cmd; struct mmcsd_data *data = NULL; u32 status; unsigned int len; unsigned int remain, fcnt; uint32_t *buf; int i; int hotpluge_support; if(cmd) data = cmd->data; if(!data) { TRACE_DEBUG("ERROR: %s, data is NULL\n", __func__); return -EIO; } hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id); do { if((hotpluge_support == 1) && (readl(mmc_obj->base + SDMMC_CDETECT) & 0x1)) break; if (data->blks * data->blksize == data->bytes_xfered) break; buf = data->buf + data->bytes_xfered / 4; remain = data->blks * data->blksize - data->bytes_xfered; do { fcnt = MMC_GetWaterlevel(mmc_obj) * 4; len = configMIN(remain, fcnt); if (!len) break; for (i = 0; i < len / 4; i ++) { *buf++ = readl(mmc_obj->base + SDMMC_FIFO); } data->bytes_xfered += len; remain -= len; } while (remain); status = readl(mmc_obj->base + SDMMC_MINTSTS); writel(SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS); /* if the RXDR is ready read again */ } while ((status & SDMMC_INT_RXDR) || (dto && MMC_GetWaterlevel(mmc_obj))); return 0; } static void ark_mmc_set_iocfg(struct mmcsd_host *host, struct mmcsd_io_cfg *io_cfg) { uint32_t clksrc, clkdiv; struct mmc_driver *mmc_drv = host->private_data; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; unsigned int regs; int hotpluge_support = mmcsd_dev_is_support_hotpluge(mmc_obj->id); /* maybe switch power to the card */ switch (io_cfg->power_mode) { case MMCSD_POWER_OFF: if(hotpluge_support == 1) { regs = readl(mmc_obj->base + SDMMC_PWREN); regs &= ~(1 << 0); writel(regs, mmc_obj->base + SDMMC_PWREN); } break; case MMCSD_POWER_UP: if(hotpluge_support == 1) { regs = readl(mmc_obj->base + SDMMC_PWREN); regs &= ~(1 << 0); regs |= (1 << 0); writel(regs, mmc_obj->base + SDMMC_PWREN); } break; case MMCSD_POWER_ON: if(hotpluge_support == 1) { MMC_Reset(mmc_obj); } break; default: printf("ERROR: %s, unknown power_mode %d\n", __func__, io_cfg->power_mode); break; } //fixme: read from PMU //why io_cfg->clock == 0 ? if(io_cfg->clock) { clksrc = ulClkGetRate(mmc_obj->clk_id); clkdiv = clksrc / io_cfg->clock / 2; MMC_UpdateClockRegister(mmc_obj, clkdiv); TRACE_DEBUG("io_cfg->clock: %lu, clock in: %lu, clkdiv: %d\n", io_cfg->clock, clkdiv, clkdiv); } else { writel(0, mmc_obj->base + SDMMC_CLKENA); } if (io_cfg->bus_width == MMCSD_BUS_WIDTH_4) { MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_4BIT); TRACE_DEBUG("set to 4-bit mode\n"); } else { MMC_SetCardWidth(mmc_obj, SDMMC_CTYPE_1BIT); // printf("set to 1-bit mode\n"); } TRACE_DEBUG("%s end\n", __func__); } static void ark_mmc_enable_sdio_irq(struct mmcsd_host *host, int32_t enable) { struct mmc_driver *mmc_drv = host->private_data; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; uint32_t reg; TRACE_DEBUG("%s start\n", __func__); if (enable) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO); reg = MMC_GetInterruptMask(mmc_obj); reg |= SDMMC_INT_SDIO; MMC_SetInterruptMask(mmc_obj, reg); } else { reg = MMC_GetInterruptMask(mmc_obj); reg &= ~SDMMC_INT_SDIO; MMC_SetInterruptMask(mmc_obj, reg); } } static int32_t ark_mmc_get_card_status(struct mmcsd_host *host) { struct mmc_driver *mmc_drv = host->private_data; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; return !(readl(mmc_obj->base + SDMMC_CDETECT) & 0x1); } static void ark_mmc_send_command(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd) { struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; struct mmcsd_req *req = mmc_drv->req; //fixme: cmd->data or req->data struct mmcsd_data *data = cmd->data; int ret; uint32_t cmd_flags = 0; TRACE_DEBUG("%s, start\n", __func__); if (!cmd) { //fixme: stop dma printf("ERROR: %s, cmd is NULL\n", __func__); return; } if (data) { cmd_flags |= SDMMC_CMD_DAT_EXP; /* always set data start - also set direction flag for read */ if (data->flags & DATA_DIR_WRITE) cmd_flags |= SDMMC_CMD_DAT_WR; if (data->flags & DATA_STREAM) cmd_flags |= SDMMC_CMD_STRM_MODE; } if (cmd == req->stop) cmd_flags |= SDMMC_CMD_STOP; else cmd_flags |= SDMMC_CMD_PRV_DAT_WAIT; switch (resp_type(cmd)) { case RESP_NONE: break; case RESP_R1: case RESP_R5: case RESP_R6: case RESP_R7: case RESP_R1B: cmd_flags |= SDMMC_CMD_RESP_EXP; cmd_flags |= SDMMC_CMD_RESP_CRC; break; case RESP_R2: cmd_flags |= SDMMC_CMD_RESP_EXP; cmd_flags |= SDMMC_CMD_RESP_CRC; cmd_flags |= SDMMC_CMD_RESP_LONG; break; case RESP_R3: case RESP_R4: cmd_flags |= SDMMC_CMD_RESP_EXP; break; default: printf("ERROR: %s, unknown cmd type %x\n", __func__, resp_type(cmd)); return; } if (cmd->cmd_code == GO_IDLE_STATE) cmd_flags |= SDMMC_CMD_INIT; /* CMD 11 check switch voltage */ if (cmd->cmd_code == READ_DAT_UNTIL_STOP) cmd_flags |= SDMMC_CMD_VOLT_SWITCH; TRACE_DEBUG("cmd code: %d, args: 0x%x, resp type: 0x%x, flag: 0x%x\n", cmd->cmd_code, cmd->arg, resp_type(cmd), cmd_flags); ret = MMC_SendCommand(mmc_obj, cmd->cmd_code, cmd->arg, cmd_flags); if(ret) { printf("ERROR: %s, Send command timeout, cmd: %d, status: 0x%x\n", __func__, cmd->cmd_code, MMC_GetStatus(mmc_obj)); } } static void dw_mci_adjust_fifoth(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data) { unsigned int blksz = data->blksize; const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; u32 fifo_width = 4; u32 blksz_depth = blksz / fifo_width, fifoth_val; u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; int idx = ARRAY_SIZE(mszs) - 1; /* pio should ship this scenario */ if (!mmc_obj->use_dma) return; tx_wmark = SDMMC_FIFO_DEPTH / 2; tx_wmark_invers = SDMMC_FIFO_DEPTH - tx_wmark; /* * MSIZE is '1', * if blksz is not a multiple of the FIFO width */ if (blksz % fifo_width) goto done; do { if (!((blksz_depth % mszs[idx]) || (tx_wmark_invers % mszs[idx]))) { msize = idx; rx_wmark = mszs[idx] - 1; break; } } while (--idx > 0); /* * If idx is '0', it won't be tried * Thus, initial values are uesed */ done: fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); writel(fifoth_val, mmc_obj->base + SDMMC_FIFOTH); } static int dw_mci_submit_data_dma(struct ark_mmc_obj *mmc_obj, struct mmcsd_data *data) { u32 temp; mmc_obj->using_dma = 0; /* If we don't have a channel, we can't do DMA */ if (!mmc_obj->use_dma) return -1; if (data->blks * data->blksize < DW_MCI_DMA_THRESHOLD || data->blksize & 3 || (u32)data->buf & 3) { // //printf(">>>>Error,blksize 0x%x,dataaddr:0x%x\n",data->blksize,(u32)data->buf); mmc_obj->dma_ops->stop(mmc_obj); return -1; } mmc_obj->using_dma = 1; temp = MMC_GetInterruptMask(mmc_obj); temp |= SDMMC_INT_DATA_OVER | SDMMC_INT_DATA_ERROR; temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); MMC_SetInterruptMask(mmc_obj, temp); /* Enable the DMA interface */ temp = readl(mmc_obj->base + SDMMC_CTRL); temp |= SDMMC_CTRL_DMA_ENABLE; writel(temp, mmc_obj->base + SDMMC_CTRL); /* * Decide the MSIZE and RX/TX Watermark. * If current block size is same with previous size, * no need to update fifoth. */ if (mmc_obj->prev_blksz != data->blksize) dw_mci_adjust_fifoth(mmc_obj, data); if (mmc_obj->dma_ops->start(mmc_obj, data)) { mmc_obj->dma_ops->stop(mmc_obj); /* We can't do DMA, try PIO for this one */ dev_dbg(mmc_obj->dev, "%s: fall back to PIO mode for current transfer\n", __func__); mmc_obj->using_dma = 0; return -1; } return 0; } static void ark_mmc_perpare_data(struct mmc_driver *mmc_drv) { struct mmcsd_cmd *cmd = mmc_drv->cmd; struct mmcsd_data *data = cmd->data; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; uint32_t data_size; uint32_t reg; if(!data) { MMC_SetBlockSize(mmc_obj, 0); MMC_SetByteCount(mmc_obj, 0); return; } TRACE_DEBUG("%s, start\n", __func__); if(MMC_ResetFifo(mmc_obj)) { return; } data_size = data->blks * data->blksize; MMC_SetBlockSize(mmc_obj, data->blksize); data->bytes_xfered = 0; if(data_size % 4) { printf("ERROR: data_size should be a multiple of 4, but now is %d\n", data_size); } MMC_SetByteCount(mmc_obj, data_size); TRACE_DEBUG("%s, set blk size: 0x%x, byte count: 0x%x\n", __func__, data->blksize, data_size); if (dw_mci_submit_data_dma(mmc_obj, data)) { writel(SDMMC_INT_TXDR | SDMMC_INT_RXDR, mmc_obj->base + SDMMC_RINTSTS); reg = readl(mmc_obj->base + SDMMC_INTMASK); reg |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; writel(reg, mmc_obj->base + SDMMC_INTMASK); reg = readl(mmc_obj->base + SDMMC_CTRL); reg &= ~SDMMC_CTRL_DMA_ENABLE; writel(reg, mmc_obj->base + SDMMC_CTRL); } else { mmc_obj->prev_blksz = data->blksize; } TRACE_DEBUG("%s, end\n", __func__); } int ark_mmc_wait_card_idle(struct ark_mmc_obj *mmc_obj) { uint32_t tick, timeout; tick = xTaskGetTickCount(); timeout = tick + configTICK_RATE_HZ / 2; //500ms while(MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY) { tick = xTaskGetTickCount(); if(tick > timeout) { return -1; } } return 0; } static int ark_mmc_get_response(struct mmc_driver *mmc_drv, struct mmcsd_cmd *cmd) { int i; uint32_t tick, timeout, status; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; cmd->resp[0] = 0; cmd->resp[1] = 0; cmd->resp[2] = 0; cmd->resp[3] = 0; tick = xTaskGetTickCount(); timeout = tick + configTICK_RATE_HZ / 2; //500ms //fixme: spin_lock_irqsave? do { status = MMC_GetRawInterrupt(mmc_obj); tick = xTaskGetTickCount(); if(tick > timeout) { TRACE_DEBUG("ERROR: %s, get response timeout(cmd is not received by card), RINTSTS: 0x%x, cmd: %d\n", __func__, status, cmd->cmd_code); return -1; } } while(!(status & SDMMC_INT_CMD_DONE)); MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE); for (i = 0; i < 4; i++) { if (resp_type(cmd) == RESP_R2) { cmd->resp[i] = MMC_GetResponse(mmc_obj, 3 - i); //fixme : R2 must delay some time here ,when use UHI card, need check why //1ms //vTaskDelay(configTICK_RATE_HZ / 100); } else { cmd->resp[i] = MMC_GetResponse(mmc_obj, i); } } TRACE_DEBUG("resp: 0x%x, 0x%x, 0x%x, 0x%x\n", cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (status & SDMMC_INT_RTO) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RTO); TRACE_DEBUG("ERROR: %s, get response timeout, RINTSTS: 0x%x\n", __func__, status); return -1; } else if (status & (SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR)) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RCRC | SDMMC_INT_RESP_ERR); printf("ERROR: %s, response error or response crc error, RINTSTS: 0x%x\n", __func__, status); return -1; } return 0; } static int ark_mmc_start_transfer(struct mmc_driver *mmc_drv) { struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; struct mmcsd_cmd *cmd = mmc_drv->cmd; struct mmcsd_data *data = NULL; int ret; uint32_t interrupt, status, reg; uint32_t timeout; if(cmd) data = cmd->data; if(!data) { return 0; } TRACE_DEBUG("%s, start\n", __func__); //fixme: spin_lock_irqsave(&host->lock, flags); if (!mmc_obj->using_dma) { //fifo mode open data interrupts reg = MMC_GetInterruptMask(mmc_obj); reg |= SDMMC_INT_STATUS_DATA; MMC_SetInterruptMask(mmc_obj, reg); } //fixme: spin_unlock_irqrestore(&host->lock, flags); timeout = configTICK_RATE_HZ + pdMS_TO_TICKS(data->blks * data->blksize * 100/ 1024); // Minimum 10KB per second ret = xQueueReceive(mmc_obj->transfer_completion, NULL, timeout); if (mmc_obj->using_dma) { if (mmc_obj->dummy_buffer_used) { if (data->flags & DATA_DIR_WRITE) { if (mmc_obj->tx_dummy_buffer) { vPortFree(mmc_obj->tx_dummy_buffer); mmc_obj->tx_dummy_buffer = NULL; } } else if (data->flags & DATA_DIR_READ) { if (mmc_obj->rx_dummy_buffer) { CP15_invalidate_dcache_for_dma((u32)mmc_obj->rx_dummy_buffer, (u32)mmc_obj->rx_dummy_buffer + data->blks * data->blksize); memcpy(data->buf, mmc_obj->rx_dummy_buffer, data->blks * data->blksize); vPortFree(mmc_obj->rx_dummy_buffer); mmc_obj->rx_dummy_buffer = NULL; } } } else { if (data->flags & DATA_DIR_READ) CP15_invalidate_dcache_for_dma((u32)data->buf, (u32)data->buf + data->blks * data->blksize); } } else { reg = MMC_GetInterruptMask(mmc_obj); reg &= ~SDMMC_INT_STATUS_DATA; MMC_SetInterruptMask(mmc_obj, reg); } if(ret != pdTRUE || mmc_obj->result) { //fixme: error handle if (mmc_obj->using_dma) dw_mci_stop_dma(mmc_obj); cmd->err = ret; interrupt = MMC_GetRawInterrupt(mmc_obj); status = MMC_GetStatus(mmc_obj); printf("ERROR: %s, transfer timeout, ret: %d, RINTSTS: 0x%x, STATUS: 0x%x\n", __func__, ret, interrupt, status); return -1; } data->bytes_xfered = data->blks * data->blksize; return 0; } static void ark_mmc_complete_request(struct mmc_driver *mmc_drv) { struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; mmc_drv->cmd = NULL; mmc_drv->req = NULL; mmc_drv->data = NULL; MMC_SetBlockSize(mmc_obj, 0); MMC_SetByteCount(mmc_obj, 0); mmcsd_req_complete(mmc_drv->host); } static void ark_mmc_request(struct mmcsd_host *host, struct mmcsd_req *req) { int ret; struct mmc_driver *mmc_drv = host->private_data; struct mmcsd_cmd *cmd = req->cmd; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; TRACE_DEBUG("%s start\n", __func__); mmc_drv->req = req; mmc_drv->cmd = cmd; if (mmc_obj->transfer_completion == NULL) mmc_obj->transfer_completion = xQueueCreate(1, 0); else xQueueReset(mmc_obj->transfer_completion); ret = ark_mmc_wait_card_idle(mmc_obj); if (ret) { printf("ERROR: %s, data transfer timeout, status: 0x%x\r\n", __func__, MMC_GetStatus(mmc_obj)); if (MMC_GetStatus(mmc_obj) & SDMMC_STATUS_BUSY) goto out; } mmc_obj->result = 0; ark_mmc_perpare_data(mmc_drv); ark_mmc_send_command(mmc_drv, cmd); if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) vTaskDelay(pdMS_TO_TICKS(1)); ret = ark_mmc_get_response(mmc_drv, cmd); if(ret) { cmd->err = ret; printf("%s,get response returns %d, cmd: %d\r\n", __func__, ret, cmd->cmd_code); goto out; } ark_mmc_start_transfer(mmc_drv); if(req->stop) { /* send stop command */ TRACE_DEBUG("%s send stop\n", __func__); ark_mmc_send_command(mmc_drv, req->stop); } out: ark_mmc_complete_request(mmc_drv); TRACE_DEBUG("%s end\n", __func__); } static const struct mmcsd_host_ops ark_mmc_ops = { .request = ark_mmc_request, .set_iocfg = ark_mmc_set_iocfg, .enable_sdio_irq = ark_mmc_enable_sdio_irq, .get_card_status = ark_mmc_get_card_status, }; static void ark_mmc_interrupt(void *param) { struct mmc_driver *mmc_drv = (struct mmc_driver *)param; struct ark_mmc_obj *mmc_obj = (struct ark_mmc_obj *)mmc_drv->priv; struct mmcsd_cmd *cmd = mmc_drv->cmd; struct mmcsd_data *data = NULL; uint32_t status; if (cmd && cmd->data) { data = cmd->data; } status = MMC_GetUnmaskedInterrupt(mmc_obj); TRACE_DEBUG("unmasked interrupts: 0x%x\n", status); if (status & SDMMC_CMD_ERROR_FLAGS) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_CMD_ERROR_FLAGS); mmc_obj->result = -1; xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0); } if (status & SDMMC_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ MMC_ClearRawInterrupt(mmc_obj, SDMMC_DATA_ERROR_FLAGS); mmc_obj->result = -1; xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0); } if (status & SDMMC_INT_DATA_OVER) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_DATA_OVER); if (data && data->flags & DATA_DIR_READ) { if (!mmc_obj->using_dma && data->bytes_xfered != data->blks * data->blksize) ark_mmc_read_pio(mmc_drv, 1); } if (!mmc_obj->using_dma) xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0); } if (status & SDMMC_INT_RXDR) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR); if (data && data->flags & DATA_DIR_READ) ark_mmc_read_pio(mmc_drv, 0); MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_RXDR); MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_HTO); } if (status & SDMMC_INT_TXDR) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_TXDR); if (data && data->flags & DATA_DIR_WRITE) ark_mmc_write_pio(mmc_drv); } if (status & SDMMC_INT_CMD_DONE) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CMD_DONE); } if (status & SDMMC_INT_CD) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_CD); mmcsd_change_from_isr(mmc_drv->host); } if (status & SDMMC_INT_SDIO) { MMC_ClearRawInterrupt(mmc_obj, SDMMC_INT_SDIO); sdio_irq_wakeup_isr(mmc_drv->host); } if (mmc_obj->use_dma == TRANS_MODE_IDMAC){ /* Handle IDMA interrupts */ if (mmc_obj->dma_64bit_address == 1) { status = readl(mmc_obj->base + SDMMC_IDSTS64); if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))) { writel(SDMMC_IDMAC_INT_TI|SDMMC_IDMAC_INT_RI, mmc_obj->base + SDMMC_IDSTS64); writel(SDMMC_IDMAC_INT_NI,mmc_obj->base + SDMMC_IDSTS64); // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) // host->dma_ops->complete((void *)host); } } else { status = readl(mmc_obj->base+SDMMC_IDSTS); if (MMC_IsFifoEmpty(mmc_obj) && (status & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI))){ writel(SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI,mmc_obj->base+SDMMC_IDSTS); writel(SDMMC_IDMAC_INT_NI,mmc_obj->base+SDMMC_IDSTS); xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0); // if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) // mmc_obj->dma_ops->complete((void *)host); } } } // xQueueSendFromISR(mmc_obj->transfer_completion, NULL, 0); } void ark_mmc_reset(struct ark_mmc_obj *mmc_obj) { sys_soft_reset(mmc_obj->softreset_id); } static struct ark_mmc_obj mmc0_obj = { .id = 0, .irq = SDMMC0_IRQn, .base = REGS_SDMMC0_BASE, .mmc_reset = ark_mmc_reset, .softreset_id = softreset_sdmmc, .clk_id = CLK_SDMMC0, }; static struct ark_mmc_obj mmc1_obj = { .id = 1, .irq = SDMMC1_IRQn, .base = REGS_SDMMC1_BASE, .mmc_reset = ark_mmc_reset, .softreset_id = softreset_sdmmc1, .clk_id = CLK_SDMMC1, }; int ark_mmc_probe(struct ark_mmc_obj *mmc_obj) { struct mmc_driver *mmc_drv; struct mmcsd_host *host; TRACE_DEBUG("%s start\n", __func__); mmc_drv = (struct mmc_driver*)pvPortMalloc(sizeof(struct mmc_driver)); memset(mmc_drv, 0, sizeof(struct mmc_driver)); mmc_drv->priv = mmc_obj; host = mmcsd_alloc_host(); if (!host) { printf("ERROR: %s, failed to malloc host\n", __func__); return -ENOMEM; } host->ops = &ark_mmc_ops; host->freq_min = MMC_FEQ_MIN; host->freq_max = MMC_FEQ_MAX; host->valid_ocr = VDD_32_33 | VDD_33_34; host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_BUSWIDTH_4; host->max_blk_size = 512; //fixme: max_blk_count? host->max_blk_count = 2048; host->private_data = mmc_drv; mmc_drv->host = host; MMC_Init(mmc_obj); if (mmc_obj->use_dma == TRANS_MODE_IDMAC) { host->max_segs = DESC_RING_BUF_SZ / sizeof(struct idmac_desc);//host->ring_size; host->max_blk_size = 65535; host->max_seg_size = 0x1000; host->max_req_size = host->max_seg_size * host->max_segs; host->max_blk_count = host->max_req_size / 512; } request_irq(mmc_obj->irq, 0, ark_mmc_interrupt, mmc_drv); if (mmcsd_dev_is_sdio_card(mmc_obj->id) == 1) { ark_mmc_enable_sdio_irq(host, 1); } if (mmcsd_dev_is_support_hotpluge(mmc_obj->id) == 1) { if (ark_mmc_get_card_status(host)) mmcsd_change(host); } else { mmcsd_change(host); } TRACE_DEBUG("%s end\n", __func__); return 0; } int mmc_init(void) { #ifdef SDMMC0_SUPPORT sema_take(SEMA_GATE_SDMMC0, portMAX_DELAY); ark_mmc_probe(&mmc0_obj); sema_give(SEMA_GATE_SDMMC0); #endif #ifdef SDMMC1_SUPPORT ark_mmc_probe(&mmc1_obj); #endif return 0; } #endif