dwspi.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. #include "FreeRTOS.h"
  2. #include "chip.h"
  3. #include "board.h"
  4. #include "pinctrl.h"
  5. #include <string.h>
  6. #define SPI0_CS0_GPIO 32
  7. #define SPI0_IO0_GPIO 34
  8. /* Register offsets */
  9. #define DW_SPI_CTRL0 0x00
  10. #define DW_SPI_CTRL1 0x04
  11. #define DW_SPI_SSIENR 0x08
  12. #define DW_SPI_MWCR 0x0c
  13. #define DW_SPI_SER 0x10
  14. #define DW_SPI_BAUDR 0x14
  15. #define DW_SPI_TXFLTR 0x18
  16. #define DW_SPI_RXFLTR 0x1c
  17. #define DW_SPI_TXFLR 0x20
  18. #define DW_SPI_RXFLR 0x24
  19. #define DW_SPI_SR 0x28
  20. #define DW_SPI_IMR 0x2c
  21. #define DW_SPI_ISR 0x30
  22. #define DW_SPI_RISR 0x34
  23. #define DW_SPI_TXOICR 0x38
  24. #define DW_SPI_RXOICR 0x3c
  25. #define DW_SPI_RXUICR 0x40
  26. #define DW_SPI_MSTICR 0x44
  27. #define DW_SPI_ICR 0x48
  28. #define DW_SPI_DMACR 0x4c
  29. #define DW_SPI_DMATDLR 0x50
  30. #define DW_SPI_DMARDLR 0x54
  31. #define DW_SPI_IDR 0x58
  32. #define DW_SPI_VERSION 0x5c
  33. #define DW_SPI_DR 0x60
  34. #define DW_SPI_QSPI_CTRL0 0xf4
  35. /* Bit fields in CTRLR0 */
  36. #define SPI_DFS_OFFSET 0
  37. #define SPI_FRF_OFFSET 4
  38. #define SPI_FRF_SPI 0x0
  39. #define SPI_FRF_SSP 0x1
  40. #define SPI_FRF_MICROWIRE 0x2
  41. #define SPI_FRF_RESV 0x3
  42. #define SPI_MODE_OFFSET 6
  43. #define SPI_SCPH_OFFSET 6
  44. #define SPI_SCOL_OFFSET 7
  45. #define SPI_TMOD_OFFSET 8
  46. #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
  47. #define SPI_TMOD_TR 0x0 /* xmit & recv */
  48. #define SPI_TMOD_TO 0x1 /* xmit only */
  49. #define SPI_TMOD_RO 0x2 /* recv only */
  50. #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
  51. #define SPI_SLVOE_OFFSET 10
  52. #define SPI_SRL_OFFSET 11
  53. #define SPI_CFS_OFFSET 12
  54. #define SPI_DFS32_OFFSET 16
  55. #define SPI_DAF_OFFSET 21
  56. #define SPI_DAF_STANDARD 0
  57. #define SPI_DAF_DUAL 1
  58. #define SPI_DAF_QUAD 2
  59. /* Bit fields in QSPI_CTRLR0 */
  60. #define SPI_TRANS_TYPE_OFFSET 0
  61. #define SPI_ADDR_LENGTH_OFFSET 2
  62. #define SPI_INST_LENGTH_OFFSET 8
  63. #define SPI_WAIT_CYCLES_OFFSET 11
  64. /* Bit fields in SR, 7 bits */
  65. #define SR_MASK 0x7f /* cover 7 bits */
  66. #define SR_BUSY (1 << 0)
  67. #define SR_TF_NOT_FULL (1 << 1)
  68. #define SR_TF_EMPT (1 << 2)
  69. #define SR_RF_NOT_EMPT (1 << 3)
  70. #define SR_RF_FULL (1 << 4)
  71. #define SR_TX_ERR (1 << 5)
  72. #define SR_DCOL (1 << 6)
  73. /* Bit fields in ISR, IMR, RISR, 7 bits */
  74. #define SPI_INT_TXEI (1 << 0)
  75. #define SPI_INT_TXOI (1 << 1)
  76. #define SPI_INT_RXUI (1 << 2)
  77. #define SPI_INT_RXOI (1 << 3)
  78. #define SPI_INT_RXFI (1 << 4)
  79. #define SPI_INT_MSTI (1 << 5)
  80. /* Bit fields in DMACR */
  81. #define SPI_DMA_RDMAE (1 << 0)
  82. #define SPI_DMA_TDMAE (1 << 1)
  83. /* TX RX interrupt level threshold, max can be 256 */
  84. #define SPI_INT_THRESHOLD 32
  85. enum dw_ssi_type {
  86. SSI_MOTO_SPI = 0,
  87. SSI_TI_SSP,
  88. SSI_NS_MICROWIRE,
  89. };
  90. struct dw_spi;
  91. struct dw_spi_dma_ops {
  92. int (*dma_init)(struct dw_spi *dws);
  93. void (*dma_exit)(struct dw_spi *dws);
  94. int (*dma_setup)(struct dw_spi *dws, struct spi_message *message);
  95. bool (*can_dma)(struct dw_spi *dws, struct spi_message *message);
  96. int (*dma_transfer)(struct dw_spi *dws, struct spi_message *message);
  97. void (*dma_stop)(struct dw_spi *dws);
  98. };
  99. /* Slave spi_dev related */
  100. struct chip_data {
  101. u8 cs; /* chip select pin */
  102. u8 tmode; /* TR/TO/RO/EEPROM */
  103. u8 type; /* SPI/SSP/MicroWire */
  104. u8 poll_mode; /* 1 means use poll mode */
  105. u8 enable_dma;
  106. u16 clk_div; /* baud rate divider */
  107. u16 qspi_clk_div;
  108. u32 speed_hz; /* baud rate */
  109. void (*cs_control)(u32 command);
  110. };
  111. struct dw_spi {
  112. struct spi_slave slave;
  113. QueueHandle_t xfer_done;
  114. enum dw_ssi_type type;
  115. void __iomem *regs;
  116. struct clk *clk;
  117. unsigned long paddr;
  118. int irq;
  119. u32 fifo_len; /* depth of the FIFO buffer */
  120. u32 max_freq; /* max bus freq supported */
  121. u16 bus_num;
  122. u16 num_cs; /* supported slave numbers */
  123. u32 cs_gpio;
  124. /* Current message transfer state info */
  125. size_t len;
  126. void *tx;
  127. void *tx_end;
  128. void *rx;
  129. void *rx_end;
  130. u32 rxlevel;
  131. int dma_mapped;
  132. char *rx_dummy_buffer;
  133. u8 n_bytes; /* current is a 1/2 bytes op */
  134. u32 dma_width;
  135. void (*transfer_handler)(struct dw_spi *dws);
  136. u32 current_freq; /* frequency in hz */
  137. u32 current_qspi_freq;
  138. int xfer_ret;
  139. /* DMA info */
  140. int dma_inited;
  141. struct dma_chan *txchan;
  142. struct dma_chan *rxchan;
  143. unsigned long dma_chan_busy;
  144. dma_addr_t dma_addr; /* phy address of the Data register */
  145. const struct dw_spi_dma_ops *dma_ops;
  146. void *dma_tx;
  147. void *dma_rx;
  148. /* Bus interface info */
  149. void *priv;
  150. struct chip_data *chip;
  151. };
  152. /*
  153. * Each SPI slave device to work with dw_api controller should
  154. * has such a structure claiming its working mode (poll or PIO/DMA),
  155. * which can be save in the "controller_data" member of the
  156. * struct spi_device.
  157. */
  158. struct dw_spi_chip {
  159. u8 poll_mode; /* 1 for controller polling mode */
  160. u8 type; /* SPI/SSP/MicroWire */
  161. void (*cs_control)(u32 command);
  162. };
  163. static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
  164. {
  165. return readl((u32)dws->regs + offset);
  166. }
  167. static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
  168. {
  169. writel(val, (u32)dws->regs + offset);
  170. }
  171. static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
  172. {
  173. return dw_readl(dws, offset);
  174. }
  175. static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
  176. {
  177. dw_writel(dws, offset, val);
  178. }
  179. static inline void spi_enable_chip(struct dw_spi *dws, int enable)
  180. {
  181. dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
  182. }
  183. static inline void spi_set_clk(struct dw_spi *dws, u16 div)
  184. {
  185. dw_writel(dws, DW_SPI_BAUDR, div);
  186. }
  187. /* Disable IRQ bits */
  188. static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
  189. {
  190. u32 new_mask;
  191. new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
  192. dw_writel(dws, DW_SPI_IMR, new_mask);
  193. }
  194. /* Enable IRQ bits */
  195. static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
  196. {
  197. u32 new_mask;
  198. new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
  199. dw_writel(dws, DW_SPI_IMR, new_mask);
  200. }
  201. /*
  202. * This does disable the SPI controller, interrupts, and re-enable the
  203. * controller back. Transmit and receive FIFO buffers are cleared when the
  204. * device is disabled.
  205. */
  206. static inline void spi_reset_chip(struct dw_spi *dws)
  207. {
  208. spi_enable_chip(dws, 0);
  209. spi_mask_intr(dws, 0xff);
  210. spi_enable_chip(dws, 1);
  211. }
  212. /* static inline void spi_shutdown_chip(struct dw_spi *dws)
  213. {
  214. spi_enable_chip(dws, 0);
  215. spi_set_clk(dws, 0);
  216. } */
  217. /* Return the max entries we can fill into tx fifo */
  218. static inline u32 tx_max(struct dw_spi *dws)
  219. {
  220. u32 tx_left, tx_room, rxtx_gap;
  221. tx_left = ((u32)dws->tx_end - (u32)dws->tx) / dws->n_bytes;
  222. tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
  223. /*
  224. * Another concern is about the tx/rx mismatch, we
  225. * though to use (dws->fifo_len - rxflr - txflr) as
  226. * one maximum value for tx, but it doesn't cover the
  227. * data which is out of tx/rx fifo and inside the
  228. * shift registers. So a control from sw point of
  229. * view is taken.
  230. */
  231. rxtx_gap = (((u32)dws->rx_end - (u32)dws->rx) - ((u32)dws->tx_end - (u32)dws->tx))
  232. / dws->n_bytes;
  233. return configMIN(tx_left, configMIN(tx_room, (u32) (dws->fifo_len - rxtx_gap)));
  234. }
  235. /* Return the max entries we should read out of rx fifo */
  236. static inline u32 rx_max(struct dw_spi *dws)
  237. {
  238. u32 rx_left = ((u32)dws->rx_end - (u32)dws->rx) / dws->n_bytes;
  239. return configMIN(rx_left, dw_readl(dws, DW_SPI_RXFLR));
  240. }
  241. static void dw_writer(struct dw_spi *dws)
  242. {
  243. u32 max = tx_max(dws);
  244. u32 txw = 0;
  245. while (max--) {
  246. /* Set the tx word if the transfer's original "tx" is not null */
  247. if ((u32)dws->tx_end - dws->len) {
  248. if (dws->n_bytes == 1)
  249. txw = *(u8 *)(dws->tx);
  250. else if (dws->n_bytes == 2)
  251. txw = *(u16 *)(dws->tx);
  252. else
  253. txw = *(u32 *)(dws->tx);
  254. }
  255. dw_write_io_reg(dws, DW_SPI_DR, txw);
  256. dws->tx = (u8*)dws->tx + dws->n_bytes;
  257. }
  258. }
  259. static void dw_reader(struct dw_spi *dws)
  260. {
  261. u32 max = rx_max(dws);
  262. u32 rxw;
  263. while (max--) {
  264. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  265. /* Care rx only if the transfer's original "rx" is not null */
  266. if ((u32)dws->rx_end - dws->len) {
  267. if (dws->n_bytes == 1)
  268. *(u8 *)(dws->rx) = rxw;
  269. else if (dws->n_bytes == 2)
  270. *(u16 *)(dws->rx) = rxw;
  271. else
  272. *(u32 *)(dws->rx) = rxw;
  273. }
  274. dws->rx = (u8*)dws->rx + dws->n_bytes;
  275. }
  276. }
  277. static void int_error_stop(struct dw_spi *dws, const char *msg)
  278. {
  279. spi_reset_chip(dws);
  280. dev_err(&dws->master->dev, "%s\n", msg);
  281. }
  282. static void interrupt_transfer(struct dw_spi *dws)
  283. {
  284. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  285. /* Error handling */
  286. if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
  287. dw_readl(dws, DW_SPI_ICR);
  288. int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
  289. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  290. return;
  291. }
  292. dw_reader(dws);
  293. if (dws->rx_end == dws->rx) {
  294. spi_mask_intr(dws, SPI_INT_TXEI);
  295. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  296. return;
  297. }
  298. if (irq_status & SPI_INT_TXEI) {
  299. spi_mask_intr(dws, SPI_INT_TXEI);
  300. dw_writer(dws);
  301. /* Enable TX irq always, it will be disabled when RX finished */
  302. spi_umask_intr(dws, SPI_INT_TXEI);
  303. }
  304. return;
  305. }
  306. static void qspi_read_interrupt(struct dw_spi *dws)
  307. {
  308. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  309. //u32 rxw;
  310. //int i;
  311. /* Error handling */
  312. if (irq_status & (SPI_INT_RXOI | SPI_INT_RXUI)) {
  313. dw_readl(dws, DW_SPI_ICR);
  314. int_error_stop(dws, "qspi_read_interrupt: fifo overrun/underrun");
  315. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  316. return;
  317. }
  318. if (irq_status & SPI_INT_RXFI) {
  319. /*for (i = 0; i < dws->rxlevel; i++) {
  320. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  321. if (dws->n_bytes == 1)
  322. *(u8 *)(dws->rx) = rxw;
  323. else if (dws->n_bytes == 2)
  324. *(u16 *)(dws->rx) = rxw;
  325. else
  326. *(u32 *)(dws->rx) = rxw;
  327. dws->rx = (u8*)dws->rx + dws->n_bytes;
  328. }*/
  329. dw_reader(dws);
  330. }
  331. if (dws->rx_end == dws->rx) {
  332. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  333. return;
  334. }
  335. return;
  336. }
  337. static void dma_transfer(struct dw_spi *dws)
  338. {
  339. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  340. printf("status=0x%x.\n", irq_status);
  341. if (!irq_status)
  342. return;
  343. dw_readl(dws, DW_SPI_ICR);
  344. int_error_stop(dws, "dma_transfer: fifo overrun/underrun");
  345. dws->xfer_ret = 1;
  346. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  347. return;
  348. }
  349. static void dw_spi_irq(void *param)
  350. {
  351. struct dw_spi *dws = param;
  352. u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
  353. if (!irq_status)
  354. return;
  355. dws->transfer_handler(dws);
  356. }
  357. /* Must be called inside pump_transfers() */
  358. static int poll_transfer(struct dw_spi *dws)
  359. {
  360. do {
  361. dw_writer(dws);
  362. dw_reader(dws);
  363. taskYIELD();
  364. } while (dws->rx_end > dws->rx);
  365. return 0;
  366. }
  367. static void dw_spi_chipselect(struct dw_spi *dws, int is_active)
  368. {
  369. int dev_is_lowactive = !(dws->slave.mode & SPI_CS_HIGH);
  370. if (dws->slave.mode & SPI_NO_CS)
  371. return;
  372. gpio_direction_output(dws->cs_gpio, is_active ^ dev_is_lowactive);
  373. }
  374. static int dw_spi_calculate_timeout(struct dw_spi *dws, int size)
  375. {
  376. unsigned long timeout = 0;
  377. /* Time with actual data transfer and CS change delay related to HW */
  378. timeout = (8 + 4) * size / dws->current_freq;
  379. /* Add extra second for scheduler related activities */
  380. timeout += 1;
  381. /* Double calculated timeout */
  382. return pdMS_TO_TICKS(2 * timeout * MSEC_PER_SEC);
  383. }
  384. static int dw_spi_transfer_one(struct spi_slave *slave, struct spi_message *message)
  385. {
  386. struct dw_spi *dws = (struct dw_spi *)slave;
  387. struct chip_data *chip = dws->chip;
  388. u8 imask = 0;
  389. u16 txlevel = 0;
  390. u32 cr0;
  391. u32 bits_per_word = 0;
  392. unsigned long transfer_timeout;
  393. int ret = 0;
  394. chip->tmode = SPI_TMOD_TR;
  395. dws->dma_mapped = 0;
  396. dws->tx = (void *)message->send_buf;
  397. dws->tx_end = (u8*)dws->tx + message->length;
  398. dws->rx = message->recv_buf;
  399. dws->rx_end = (u8*)dws->rx + message->length;
  400. dws->len = message->length;
  401. spi_enable_chip(dws, 0);
  402. spi_set_clk(dws, chip->clk_div);
  403. if (message->cs_take)
  404. dw_spi_chipselect(dws, 1);
  405. if (message->length & 1 || (u32)dws->tx & 1 || (u32)dws->rx & 1)
  406. bits_per_word = 8;
  407. else if (message->length & 3 || (u32)dws->tx & 3 || (u32)dws->rx & 3)
  408. bits_per_word = 16;
  409. else
  410. bits_per_word = 32;
  411. //printk("len=%d, bits_per_word=%d.\n", transfer->len, bits_per_word);
  412. if (bits_per_word == 8) {
  413. dws->n_bytes = 1;
  414. dws->dma_width = DMA_BUSWIDTH_1_BYTE;
  415. } else if (bits_per_word == 16) {
  416. dws->n_bytes = 2;
  417. dws->dma_width = DMA_BUSWIDTH_2_BYTES;
  418. } else if (bits_per_word == 32) {
  419. dws->n_bytes = 4;
  420. dws->dma_width = DMA_BUSWIDTH_4_BYTES;
  421. } else {
  422. ret = -EINVAL;
  423. goto end;
  424. }
  425. /* Default SPI mode is SCPOL = 0, SCPH = 0 */
  426. cr0 = ((bits_per_word - 1) << SPI_DFS32_OFFSET)
  427. | (chip->type << SPI_FRF_OFFSET)
  428. | ((dws->slave.mode & 3) << SPI_MODE_OFFSET)
  429. | (chip->tmode << SPI_TMOD_OFFSET);
  430. /*
  431. * Adjust transfer mode if necessary. Requires platform dependent
  432. * chipselect mechanism.
  433. */
  434. if (chip->cs_control) {
  435. if (dws->rx && dws->tx)
  436. chip->tmode = SPI_TMOD_TR;
  437. else if (dws->rx)
  438. chip->tmode = SPI_TMOD_RO;
  439. else
  440. chip->tmode = SPI_TMOD_TO;
  441. cr0 &= ~SPI_TMOD_MASK;
  442. cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
  443. }
  444. dw_writel(dws, DW_SPI_CTRL0, cr0);
  445. /* For poll mode just disable all interrupts */
  446. spi_mask_intr(dws, 0xff);
  447. /*
  448. * Interrupt mode
  449. * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
  450. */
  451. if (dws->dma_mapped) {
  452. ret = dws->dma_ops->dma_setup(dws, message);
  453. if (ret < 0) {
  454. spi_enable_chip(dws, 1);
  455. goto end;
  456. }
  457. } else if (!chip->poll_mode) {
  458. dw_writel(dws, DW_SPI_DMACR, 0);
  459. txlevel = configMIN(dws->fifo_len / 2, dws->len / dws->n_bytes);
  460. dw_writel(dws, DW_SPI_TXFLTR, txlevel);
  461. /* Set the interrupt mask */
  462. imask |= SPI_INT_TXEI | SPI_INT_TXOI |
  463. SPI_INT_RXUI | SPI_INT_RXOI;
  464. spi_umask_intr(dws, imask);
  465. dws->transfer_handler = interrupt_transfer;
  466. }
  467. xQueueReset(dws->xfer_done);
  468. dw_writel(dws, DW_SPI_SER, BIT(dws->slave.cs));
  469. spi_enable_chip(dws, 1);
  470. if (dws->dma_mapped) {
  471. ret = dws->dma_ops->dma_transfer(dws, message);
  472. if (ret < 0)
  473. goto end;
  474. }
  475. if (chip->poll_mode) {
  476. ret = poll_transfer(dws);
  477. goto end;
  478. }
  479. transfer_timeout = dw_spi_calculate_timeout(dws, message->length);
  480. if (xQueueReceive(dws->xfer_done, NULL, transfer_timeout) != pdTRUE) {
  481. int_error_stop(dws, "transfer timeout");
  482. ret = -ETIMEDOUT;
  483. goto end;
  484. }
  485. end:
  486. if (message->cs_release)
  487. dw_spi_chipselect(dws, 0);
  488. return ret;
  489. }
  490. static int dw_qspi_read(struct spi_slave *slave, struct qspi_message *qspi_message)
  491. {
  492. struct dw_spi *dws = (struct dw_spi *)slave;
  493. struct chip_data *chip = dws->chip;
  494. struct spi_message *message = (struct spi_message *)&qspi_message->message;
  495. u8 imask = 0;
  496. u16 rxlevel = 0;
  497. u32 cr0, qspi_cr0, ndf;
  498. u32 bits_per_word = 0;
  499. u32 addr;
  500. unsigned long transfer_timeout;
  501. u32 xfer_len = 0;
  502. int ret = 0;
  503. chip->tmode = SPI_TMOD_RO;
  504. if (message->length & 1)
  505. bits_per_word = 8;
  506. else if (message->length & 3)
  507. bits_per_word = 16;
  508. else
  509. bits_per_word = 32;
  510. if (bits_per_word == 8) {
  511. dws->n_bytes = 1;
  512. dws->dma_width = DMA_BUSWIDTH_1_BYTE;
  513. } else if (bits_per_word == 16) {
  514. dws->n_bytes = 2;
  515. dws->dma_width = DMA_BUSWIDTH_2_BYTES;
  516. } else if (bits_per_word == 32) {
  517. dws->n_bytes = 4;
  518. dws->dma_width = DMA_BUSWIDTH_4_BYTES;
  519. } else {
  520. ret = -EINVAL;
  521. goto end;
  522. }
  523. xfer_continue:
  524. ndf = (message->length - xfer_len) / dws->n_bytes - 1;
  525. if (ndf > 0xffff) ndf = 0xffff;
  526. dws->dma_mapped = 1;
  527. dws->xfer_ret = 0;
  528. dws->rx = (u8*)message->recv_buf + xfer_len;
  529. dws->len = (ndf + 1) * dws->n_bytes;
  530. dws->rx_end = (u8*)dws->rx + dws->len;
  531. spi_enable_chip(dws, 0);
  532. spi_set_clk(dws, chip->qspi_clk_div);
  533. if (message->cs_take)
  534. dw_spi_chipselect(dws, 1);
  535. /* Default SPI mode is SCPOL = 0, SCPH = 0 */
  536. cr0 = ((bits_per_word - 1) << SPI_DFS32_OFFSET)
  537. | (chip->type << SPI_FRF_OFFSET)
  538. | ((dws->slave.mode & 3) << SPI_MODE_OFFSET)
  539. | (chip->tmode << SPI_TMOD_OFFSET);
  540. if (qspi_message->qspi_data_lines == 4)
  541. cr0 |= SPI_DAF_QUAD << SPI_DAF_OFFSET;
  542. else if (qspi_message->qspi_data_lines == 2)
  543. cr0 |= SPI_DAF_DUAL << SPI_DAF_OFFSET;
  544. dw_writel(dws, DW_SPI_CTRL0, cr0);
  545. dw_writel(dws, DW_SPI_CTRL1, ndf);
  546. qspi_cr0 = ((qspi_message->dummy_cycles & 0xf) << SPI_WAIT_CYCLES_OFFSET) |
  547. (2 << SPI_INST_LENGTH_OFFSET) |
  548. ((qspi_message->address.size >> 2) << SPI_ADDR_LENGTH_OFFSET);
  549. if (qspi_message->instruction.qspi_lines == 1 && qspi_message->address.qspi_lines > 1)
  550. qspi_cr0 |= 1;
  551. else if (qspi_message->instruction.qspi_lines > 1 && qspi_message->address.qspi_lines > 1)
  552. qspi_cr0 |= 2;
  553. dw_writel(dws, DW_SPI_QSPI_CTRL0, qspi_cr0);
  554. /* For poll mode just disable all interrupts */
  555. spi_mask_intr(dws, 0xff);
  556. /*
  557. * Interrupt mode
  558. * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
  559. */
  560. if (dws->dma_mapped) {
  561. ret = dws->dma_ops->dma_setup(dws, message);
  562. if (ret < 0) {
  563. spi_enable_chip(dws, 1);
  564. goto end;
  565. }
  566. } else if (!chip->poll_mode) {
  567. dw_writel(dws, DW_SPI_DMACR, 0);
  568. rxlevel = 1 << __ffs(dws->len / dws->n_bytes);
  569. while (rxlevel > dws->fifo_len / 2)
  570. rxlevel >>= 1;
  571. dws->rxlevel = rxlevel;
  572. dw_writel(dws, DW_SPI_RXFLTR, rxlevel - 1);
  573. /* Set the interrupt mask */
  574. imask |= SPI_INT_RXUI | SPI_INT_RXOI | SPI_INT_RXFI;
  575. spi_umask_intr(dws, imask);
  576. dws->transfer_handler = qspi_read_interrupt;
  577. }
  578. xQueueReset(dws->xfer_done);
  579. dw_writel(dws, DW_SPI_SER, BIT(dws->slave.cs));
  580. spi_enable_chip(dws, 1);
  581. if (dws->dma_mapped) {
  582. ret = dws->dma_ops->dma_transfer(dws, message);
  583. if (ret < 0)
  584. goto end;
  585. }
  586. addr = qspi_message->address.content + xfer_len;
  587. if (qspi_message->address.size == 32) {
  588. addr = ((addr >> 24) & 0xff) | (((addr >> 16) & 0xff) << 8) |
  589. (((addr >> 8) & 0xff) << 16) | ((addr & 0xff) << 24);
  590. } else {
  591. addr = ((addr >> 16) & 0xff) | (((addr >> 8) & 0xff) << 8) | ((addr & 0xff) << 16);
  592. }
  593. dw_write_io_reg(dws, DW_SPI_DR, qspi_message->instruction.content);
  594. dw_write_io_reg(dws, DW_SPI_DR, addr);
  595. transfer_timeout = dw_spi_calculate_timeout(dws, message->length);
  596. if (xQueueReceive(dws->xfer_done, NULL, transfer_timeout) != pdTRUE) {
  597. int_error_stop(dws, "transfer timeout");
  598. ret = -ETIMEDOUT;
  599. if (dws->dma_mapped)
  600. dma_stop_channel(dws->dma_rx);
  601. goto end;
  602. }
  603. if (dws->xfer_ret) {
  604. dws->xfer_ret = 0;
  605. ret = -1;
  606. if (dws->dma_mapped) {
  607. if (dws->rx_dummy_buffer) {
  608. vPortFree(dws->rx_dummy_buffer);
  609. dws->rx_dummy_buffer = NULL;
  610. }
  611. dma_stop_channel(dws->dma_rx);
  612. }
  613. goto end;
  614. }
  615. if (dws->dma_mapped) {
  616. portDISABLE_INTERRUPTS();
  617. if (dws->rx_dummy_buffer) {
  618. CP15_invalidate_dcache_for_dma((uint32_t)dws->rx_dummy_buffer,
  619. (uint32_t)dws->rx_dummy_buffer + dws->len);
  620. } else {
  621. CP15_invalidate_dcache_for_dma((uint32_t)dws->rx, (uint32_t)dws->rx + dws->len);
  622. }
  623. portENABLE_INTERRUPTS();
  624. if (dws->rx_dummy_buffer) {
  625. memcpy(dws->rx, dws->rx_dummy_buffer, dws->len);
  626. vPortFree(dws->rx_dummy_buffer);
  627. dws->rx_dummy_buffer = NULL;
  628. }
  629. dma_stop_channel(dws->dma_rx);
  630. }
  631. xfer_len += dws->len;
  632. if (xfer_len < message->length) {
  633. if (message->cs_release)
  634. dw_spi_chipselect(dws, 0);
  635. udelay(1);
  636. goto xfer_continue;
  637. }
  638. end:
  639. if (message->cs_release)
  640. dw_spi_chipselect(dws, 0);
  641. return ret;
  642. }
  643. /* This may be called twice for each spi dev */
  644. int dw_spi_setup(struct spi_slave *slave, struct spi_configuration *configuration)
  645. {
  646. struct dw_spi *dws = (struct dw_spi *)slave;
  647. struct chip_data *chip;
  648. /* Only alloc on first setup */
  649. chip = dws->chip;
  650. if (!chip) {
  651. chip = pvPortMalloc(sizeof(struct chip_data));
  652. if (!chip)
  653. return -ENOMEM;
  654. memset(chip, 0, sizeof(struct chip_data));
  655. dws->chip = chip;
  656. }
  657. dws->slave.mode = configuration->mode;
  658. chip->clk_div = (DIV_ROUND_UP(dws->max_freq, configuration->max_hz) + 1) & 0xfffe;
  659. chip->qspi_clk_div = (DIV_ROUND_UP(dws->max_freq, configuration->qspi_max_hz) + 1) & 0xfffe;
  660. dws->current_freq = dws->max_freq / chip->clk_div;
  661. dws->current_qspi_freq = dws->max_freq / chip->qspi_clk_div;
  662. printf("spi max_freq %u, current freq %u, qspi_freq %u.\n", dws->max_freq,
  663. dws->current_freq, dws->current_qspi_freq);
  664. gpio_direction_output(dws->cs_gpio,
  665. !(dws->slave.mode & SPI_CS_HIGH));
  666. return 0;
  667. }
  668. /* Restart the controller, disable all interrupts, clean rx fifo */
  669. static void spi_hw_init(struct dw_spi *dws)
  670. {
  671. spi_reset_chip(dws);
  672. /*
  673. * Try to detect the FIFO depth if not set by interface driver,
  674. * the depth could be from 2 to 256 from HW spec
  675. */
  676. if (!dws->fifo_len) {
  677. u32 fifo;
  678. for (fifo = 1; fifo < 256; fifo++) {
  679. dw_writel(dws, DW_SPI_TXFLTR, fifo);
  680. if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
  681. break;
  682. }
  683. dw_writel(dws, DW_SPI_TXFLTR, 0);
  684. dws->fifo_len = (fifo == 1) ? 0 : fifo;
  685. dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
  686. }
  687. }
  688. static int dw_spi_add_host(struct dw_spi *dws)
  689. {
  690. int ret;
  691. BUG_ON(dws == NULL);
  692. dws->type = SSI_MOTO_SPI;
  693. dws->dma_inited = 0;
  694. dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
  695. ret = request_irq(dws->irq, 0, dw_spi_irq, dws);
  696. if (ret < 0) {
  697. dev_err(dev, "can not get IRQ\n");
  698. goto err_exit;
  699. }
  700. /* Basic HW init */
  701. spi_hw_init(dws);
  702. if (dws->dma_ops && dws->dma_ops->dma_init) {
  703. ret = dws->dma_ops->dma_init(dws);
  704. if (ret) {
  705. dev_warn(dev, "DMA init failed\n");
  706. dws->dma_inited = 0;
  707. dws->dma_mapped = 1;
  708. }
  709. }
  710. return 0;
  711. err_exit:
  712. return ret;
  713. }
  714. /* static void dw_spi_remove_host(struct dw_spi *dws)
  715. {
  716. if (dws->dma_ops && dws->dma_ops->dma_exit)
  717. dws->dma_ops->dma_exit(dws);
  718. spi_shutdown_chip(dws);
  719. free_irq(dws->irq);
  720. } */
  721. void dwspi_jedec252_reset(void)
  722. {
  723. int i;
  724. int si = 0;
  725. gpio_direction_output(SPI0_CS0_GPIO, 1);
  726. gpio_direction_output(SPI0_IO0_GPIO, 1);
  727. udelay(300);
  728. for (i = 0; i < 4; i++) {
  729. gpio_direction_output(SPI0_CS0_GPIO, 0);
  730. gpio_direction_output(SPI0_IO0_GPIO, si);
  731. si = !si;
  732. udelay(300);
  733. gpio_direction_output(SPI0_CS0_GPIO, 1);
  734. udelay(300);
  735. }
  736. }
  737. static void dw_spi_dma_complete_callback(void *param, unsigned int mask)
  738. {
  739. struct dw_spi *dws = param;
  740. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  741. }
  742. static int dw_spi_dma_init(struct dw_spi *dws)
  743. {
  744. dws->dma_rx = dma_request_channel(SPI0_RX_DMA_CH);
  745. if (!dws->dma_rx) {
  746. printf("dwspi request dma channel fail.\n");
  747. return -1;
  748. }
  749. return 0;
  750. }
  751. static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_message *message)
  752. {
  753. dws->rxlevel = dws->fifo_len / 4;
  754. dw_writel(dws, DW_SPI_DMARDLR, dws->rxlevel - 1);
  755. dw_writel(dws, DW_SPI_DMACR, SPI_DMA_RDMAE);
  756. /* Set the interrupt mask */
  757. spi_umask_intr(dws, SPI_INT_RXUI | SPI_INT_RXOI);
  758. dws->transfer_handler = dma_transfer;
  759. return 0;
  760. }
  761. static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_message *message)
  762. {
  763. struct dma_config cfg = {0};
  764. int ret;
  765. /* Set external dma config: burst size, burst width */
  766. cfg.dst_addr_width = dws->dma_width;
  767. cfg.src_addr_width = dws->dma_width;
  768. /* Match burst msize with external dma config */
  769. cfg.dst_maxburst = dws->rxlevel;
  770. cfg.src_maxburst = dws->rxlevel;
  771. cfg.transfer_size = dws->len;
  772. cfg.direction = DMA_DEV_TO_MEM;
  773. cfg.src_addr = REGS_SPI0_BASE + DW_SPI_DR;
  774. //if (((u32)dws->rx/* | dws->len*/) & (ARCH_DMA_MINALIGN - 1)) {
  775. if ((u32)dws->rx & 31) {
  776. dws->rx_dummy_buffer = pvPortMalloc(dws->len);
  777. if (!dws->rx_dummy_buffer)
  778. return -ENOMEM;
  779. cfg.dst_addr = (u32)dws->rx_dummy_buffer;
  780. } else {
  781. cfg.dst_addr = (u32)dws->rx;
  782. }
  783. /* Invalidate cache before read */
  784. CP15_flush_dcache_for_dma(cfg.dst_addr,
  785. cfg.dst_addr + dws->len);
  786. cfg.src_id = SPI0_RX;
  787. ret = dma_config_channel(dws->dma_rx, &cfg);
  788. if (ret) {
  789. printf("dwspi failed to config dma.\n");
  790. return -EBUSY;
  791. }
  792. /* Set dw_spi_dma_complete_callback as callback */
  793. dma_register_complete_callback(dws->dma_rx, dw_spi_dma_complete_callback, dws);
  794. dma_start_channel(dws->dma_rx);
  795. return 0;
  796. }
  797. static const struct dw_spi_dma_ops dw_dma_ops = {
  798. .dma_init = dw_spi_dma_init,
  799. .dma_setup = dw_spi_dma_setup,
  800. .dma_transfer = dw_spi_dma_transfer,
  801. };
  802. int dwspi_init(void)
  803. {
  804. struct dw_spi *dws;
  805. int ret;
  806. dwspi_jedec252_reset();
  807. pinctrl_set_group(PGRP_SPI0);
  808. dws = pvPortMalloc(sizeof(struct dw_spi));
  809. if (!dws)
  810. return -ENOMEM;
  811. memset(dws, 0, sizeof(struct dw_spi));
  812. dws->xfer_done = xQueueCreate(1, 0);
  813. dws->regs = (void __iomem *)REGS_SPI0_BASE;
  814. dws->irq = SPI0_IRQn;
  815. dws->bus_num = 0;
  816. dws->max_freq = ulClkGetRate(CLK_SPI0);
  817. vClkEnable(CLK_SPI0);
  818. dws->num_cs = 1;
  819. dws->cs_gpio = SPI0_CS0_GPIO;
  820. dws->slave.mode = SPI_MODE_0;
  821. dws->slave.cs = 0;
  822. dws->slave.xfer = dw_spi_transfer_one;
  823. dws->slave.qspi_read = dw_qspi_read;
  824. dws->slave.configure = dw_spi_setup;
  825. dws->dma_ops = &dw_dma_ops;
  826. ret = dw_spi_add_host(dws);
  827. if (ret)
  828. goto out;
  829. strncpy(dws->slave.name, "spi0", 16);
  830. spi_add_slave(&dws->slave);
  831. return 0;
  832. out:
  833. return ret;
  834. }