dwspi.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #include "FreeRTOS.h"
  2. #include "chip.h"
  3. #include "board.h"
  4. #include "pinctrl.h"
  5. #include "os_adapt.h"
  6. #include <string.h>
  7. /* Register offsets */
  8. #define DW_SPI_CTRL0 0x00
  9. #define DW_SPI_CTRL1 0x04
  10. #define DW_SPI_SSIENR 0x08
  11. #define DW_SPI_MWCR 0x0c
  12. #define DW_SPI_SER 0x10
  13. #define DW_SPI_BAUDR 0x14
  14. #define DW_SPI_TXFLTR 0x18
  15. #define DW_SPI_RXFLTR 0x1c
  16. #define DW_SPI_TXFLR 0x20
  17. #define DW_SPI_RXFLR 0x24
  18. #define DW_SPI_SR 0x28
  19. #define DW_SPI_IMR 0x2c
  20. #define DW_SPI_ISR 0x30
  21. #define DW_SPI_RISR 0x34
  22. #define DW_SPI_TXOICR 0x38
  23. #define DW_SPI_RXOICR 0x3c
  24. #define DW_SPI_RXUICR 0x40
  25. #define DW_SPI_MSTICR 0x44
  26. #define DW_SPI_ICR 0x48
  27. #define DW_SPI_DMACR 0x4c
  28. #define DW_SPI_DMATDLR 0x50
  29. #define DW_SPI_DMARDLR 0x54
  30. #define DW_SPI_IDR 0x58
  31. #define DW_SPI_VERSION 0x5c
  32. #define DW_SPI_DR 0x60
  33. #define DW_SPI_RX_SAMPLE_DLY 0xf0
  34. #define DW_SPI_QSPI_CTRL0 0xf4
  35. /* Bit fields in CTRLR0 */
  36. #define SPI_DFS_OFFSET 0
  37. #define SPI_FRF_OFFSET 6
  38. #define SPI_FRF_SPI 0x0
  39. #define SPI_FRF_SSP 0x1
  40. #define SPI_FRF_MICROWIRE 0x2
  41. #define SPI_FRF_RESV 0x3
  42. #define SPI_MODE_OFFSET 8
  43. #define SPI_SCPH_OFFSET 8
  44. #define SPI_SCOL_OFFSET 9
  45. #define SPI_TMOD_OFFSET 10
  46. #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
  47. #define SPI_TMOD_TR 0x0 /* xmit & recv */
  48. #define SPI_TMOD_TO 0x1 /* xmit only */
  49. #define SPI_TMOD_RO 0x2 /* recv only */
  50. #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
  51. #define SPI_SLVOE_OFFSET 12
  52. #define SPI_SRL_OFFSET 13
  53. #define SPI_CFS_OFFSET 16
  54. #define SPI_DAF_OFFSET 22
  55. #define SPI_DAF_STANDARD 0
  56. #define SPI_DAF_DUAL 1
  57. #define SPI_DAF_QUAD 2
  58. /* Bit fields in QSPI_CTRLR0 */
  59. #define SPI_TRANS_TYPE_OFFSET 0
  60. #define SPI_ADDR_LENGTH_OFFSET 2
  61. #define SPI_INST_LENGTH_OFFSET 8
  62. #define SPI_WAIT_CYCLES_OFFSET 11
  63. /* Bit fields in SR, 7 bits */
  64. #define SR_MASK 0x7f /* cover 7 bits */
  65. #define SR_BUSY (1 << 0)
  66. #define SR_TF_NOT_FULL (1 << 1)
  67. #define SR_TF_EMPT (1 << 2)
  68. #define SR_RF_NOT_EMPT (1 << 3)
  69. #define SR_RF_FULL (1 << 4)
  70. #define SR_TX_ERR (1 << 5)
  71. #define SR_DCOL (1 << 6)
  72. /* Bit fields in ISR, IMR, RISR, 7 bits */
  73. #define SPI_INT_TXEI (1 << 0)
  74. #define SPI_INT_TXOI (1 << 1)
  75. #define SPI_INT_RXUI (1 << 2)
  76. #define SPI_INT_RXOI (1 << 3)
  77. #define SPI_INT_RXFI (1 << 4)
  78. #define SPI_INT_MSTI (1 << 5)
  79. /* Bit fields in DMACR */
  80. #define SPI_DMA_RDMAE (1 << 0)
  81. #define SPI_DMA_TDMAE (1 << 1)
  82. /* TX RX interrupt level threshold, max can be 256 */
  83. #define SPI_INT_THRESHOLD 32
  84. typedef struct {
  85. uint32_t base;
  86. int irqn;
  87. int software_n;
  88. int cs_gpio;
  89. int io0_gpio;
  90. int gpio_mux_grpid;
  91. int clkid;
  92. int mode;
  93. const char *driver_name;
  94. int dma_tx_id;
  95. int dma_rx_id;
  96. }dw_spi_obj_t;
  97. enum dw_ssi_type {
  98. SSI_MOTO_SPI = 0,
  99. SSI_TI_SSP,
  100. SSI_NS_MICROWIRE,
  101. };
  102. struct dw_spi;
  103. struct dw_spi_dma_ops {
  104. int (*dma_init)(struct dw_spi *dws);
  105. void (*dma_exit)(struct dw_spi *dws);
  106. int (*dma_setup)(struct dw_spi *dws, struct spi_message *message);
  107. bool (*can_dma)(struct dw_spi *dws, struct spi_message *message);
  108. int (*dma_transfer)(struct dw_spi *dws, struct spi_message *message);
  109. void (*dma_stop)(struct dw_spi *dws);
  110. };
  111. /* Slave spi_dev related */
  112. struct chip_data {
  113. u8 cs; /* chip select pin */
  114. u8 tmode; /* TR/TO/RO/EEPROM */
  115. u8 type; /* SPI/SSP/MicroWire */
  116. u8 poll_mode; /* 1 means use poll mode */
  117. u8 enable_dma;
  118. u16 clk_div; /* baud rate divider */
  119. u16 qspi_clk_div;
  120. u32 speed_hz; /* baud rate */
  121. void (*cs_control)(u32 command);
  122. };
  123. struct dw_spi {
  124. struct spi_slave slave;
  125. QueueHandle_t xfer_done;
  126. enum dw_ssi_type type;
  127. void __iomem *regs;
  128. struct clk *clk;
  129. int irq;
  130. u32 fifo_len; /* depth of the FIFO buffer */
  131. u32 max_freq; /* max bus freq supported */
  132. u16 bus_num;
  133. u16 num_cs; /* supported slave numbers */
  134. u32 cs_gpio;
  135. /* Current message transfer state info */
  136. size_t len;
  137. void *tx;
  138. void *tx_end;
  139. void *rx;
  140. void *rx_end;
  141. u32 rxlevel;
  142. int dma_mapped;
  143. char *rx_dummy_buffer;
  144. u8 n_bytes; /* current is a 1/2 bytes op */
  145. u32 dma_width;
  146. void (*transfer_handler)(struct dw_spi *dws);
  147. u32 current_freq; /* frequency in hz */
  148. u32 current_qspi_freq;
  149. int xfer_ret;
  150. /* DMA info */
  151. int dma_inited;
  152. struct dma_chan *txchan;
  153. struct dma_chan *rxchan;
  154. unsigned long dma_chan_busy;
  155. dma_addr_t dma_addr; /* phy address of the Data register */
  156. const struct dw_spi_dma_ops *dma_ops;
  157. int dma_tx_id;
  158. int dma_rx_id;
  159. /* Bus interface info */
  160. void *priv;
  161. struct chip_data *chip;
  162. };
  163. /*
  164. * Each SPI slave device to work with dw_api controller should
  165. * has such a structure claiming its working mode (poll or PIO/DMA),
  166. * which can be save in the "controller_data" member of the
  167. * struct spi_device.
  168. */
  169. struct dw_spi_chip {
  170. u8 poll_mode; /* 1 for controller polling mode */
  171. u8 type; /* SPI/SSP/MicroWire */
  172. void (*cs_control)(u32 command);
  173. };
  174. static const dw_spi_obj_t dw_spi0_obj = {
  175. .base = REGS_SPI0_BASE,
  176. .irqn = SPI0_IRQn,
  177. .software_n = softreset_ssp0,
  178. .cs_gpio = 127,
  179. .io0_gpio = -1,
  180. .gpio_mux_grpid = PGRP_SPI0,
  181. .clkid = CLK_SPI0,
  182. .mode = SPI_MODE_0,
  183. .driver_name = "dw_spi0",
  184. .dma_tx_id = SPI0_TX,
  185. .dma_rx_id = SPI0_RX,
  186. };
  187. static const dw_spi_obj_t dw_spi2_obj = {
  188. .base = REGS_SPI2_BASE,
  189. .irqn = SPI2_IRQn,
  190. .software_n = softreset_ssp2,
  191. .cs_gpio = 80,
  192. .io0_gpio = 82,
  193. .gpio_mux_grpid = PGRP_SPI2,
  194. .clkid = CLK_SPI2,
  195. .mode = SPI_MODE_0,
  196. .driver_name = "dw_spi2",
  197. .dma_tx_id = SPI2_TX,
  198. .dma_rx_id = SPI2_RX,
  199. };
  200. static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
  201. {
  202. return readl((u32)dws->regs + offset);
  203. }
  204. static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
  205. {
  206. writel(val, (u32)dws->regs + offset);
  207. }
  208. static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
  209. {
  210. return dw_readl(dws, offset);
  211. }
  212. static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
  213. {
  214. dw_writel(dws, offset, val);
  215. }
  216. static inline void spi_enable_chip(struct dw_spi *dws, int enable)
  217. {
  218. dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
  219. }
  220. static inline void spi_set_clk(struct dw_spi *dws, u16 div)
  221. {
  222. dw_writel(dws, DW_SPI_BAUDR, div);
  223. }
  224. /* Disable IRQ bits */
  225. static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
  226. {
  227. u32 new_mask;
  228. new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
  229. dw_writel(dws, DW_SPI_IMR, new_mask);
  230. }
  231. /* Enable IRQ bits */
  232. static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
  233. {
  234. u32 new_mask;
  235. new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
  236. dw_writel(dws, DW_SPI_IMR, new_mask);
  237. }
  238. /*
  239. * This does disable the SPI controller, interrupts, and re-enable the
  240. * controller back. Transmit and receive FIFO buffers are cleared when the
  241. * device is disabled.
  242. */
  243. static inline void spi_reset_chip(struct dw_spi *dws)
  244. {
  245. spi_enable_chip(dws, 0);
  246. spi_mask_intr(dws, 0xff);
  247. spi_enable_chip(dws, 1);
  248. }
  249. /* static inline void spi_shutdown_chip(struct dw_spi *dws)
  250. {
  251. spi_enable_chip(dws, 0);
  252. spi_set_clk(dws, 0);
  253. } */
  254. /* Return the max entries we can fill into tx fifo */
  255. static inline u32 tx_max(struct dw_spi *dws)
  256. {
  257. u32 tx_left, tx_room, rxtx_gap;
  258. tx_left = ((u32)dws->tx_end - (u32)dws->tx) / dws->n_bytes;
  259. tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
  260. /*
  261. * Another concern is about the tx/rx mismatch, we
  262. * though to use (dws->fifo_len - rxflr - txflr) as
  263. * one maximum value for tx, but it doesn't cover the
  264. * data which is out of tx/rx fifo and inside the
  265. * shift registers. So a control from sw point of
  266. * view is taken.
  267. */
  268. rxtx_gap = (((u32)dws->rx_end - (u32)dws->rx) - ((u32)dws->tx_end - (u32)dws->tx))
  269. / dws->n_bytes;
  270. return configMIN(tx_left, configMIN(tx_room, (u32) (dws->fifo_len - rxtx_gap)));
  271. }
  272. /* Return the max entries we should read out of rx fifo */
  273. static inline u32 rx_max(struct dw_spi *dws)
  274. {
  275. u32 rx_left = ((u32)dws->rx_end - (u32)dws->rx) / dws->n_bytes;
  276. return configMIN(rx_left, dw_readl(dws, DW_SPI_RXFLR));
  277. }
  278. static void dw_writer(struct dw_spi *dws)
  279. {
  280. u32 max = tx_max(dws);
  281. u32 txw = 0;
  282. while (max--) {
  283. /* Set the tx word if the transfer's original "tx" is not null */
  284. if ((u32)dws->tx_end - dws->len) {
  285. if (dws->n_bytes == 1)
  286. txw = *(u8 *)(dws->tx);
  287. else if (dws->n_bytes == 2)
  288. txw = *(u16 *)(dws->tx);
  289. else
  290. txw = *(u32 *)(dws->tx);
  291. }
  292. dw_write_io_reg(dws, DW_SPI_DR, txw);
  293. dws->tx = (u8*)dws->tx + dws->n_bytes;
  294. }
  295. }
  296. static void dw_reader(struct dw_spi *dws)
  297. {
  298. u32 max = rx_max(dws);
  299. u32 rxw;
  300. while (max--) {
  301. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  302. /* Care rx only if the transfer's original "rx" is not null */
  303. if ((u32)dws->rx_end - dws->len) {
  304. if (dws->n_bytes == 1)
  305. *(u8 *)(dws->rx) = rxw;
  306. else if (dws->n_bytes == 2)
  307. *(u16 *)(dws->rx) = rxw;
  308. else
  309. *(u32 *)(dws->rx) = rxw;
  310. }
  311. dws->rx = (u8*)dws->rx + dws->n_bytes;
  312. }
  313. }
  314. static void int_error_stop(struct dw_spi *dws, const char *msg)
  315. {
  316. spi_reset_chip(dws);
  317. dev_err(&dws->master->dev, "%s\n", msg);
  318. }
  319. static void interrupt_transfer(struct dw_spi *dws)
  320. {
  321. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  322. /* Error handling */
  323. if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
  324. dw_readl(dws, DW_SPI_ICR);
  325. int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
  326. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  327. return;
  328. }
  329. dw_reader(dws);
  330. if (dws->rx_end == dws->rx) {
  331. spi_mask_intr(dws, SPI_INT_TXEI);
  332. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  333. return;
  334. }
  335. if (irq_status & SPI_INT_TXEI) {
  336. spi_mask_intr(dws, SPI_INT_TXEI);
  337. dw_writer(dws);
  338. /* Enable TX irq always, it will be disabled when RX finished */
  339. spi_umask_intr(dws, SPI_INT_TXEI);
  340. }
  341. return;
  342. }
  343. static void qspi_read_interrupt(struct dw_spi *dws)
  344. {
  345. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  346. //u32 rxw;
  347. //int i;
  348. /* Error handling */
  349. if (irq_status & (SPI_INT_RXOI | SPI_INT_RXUI)) {
  350. dw_readl(dws, DW_SPI_ICR);
  351. int_error_stop(dws, "qspi_read_interrupt: fifo overrun/underrun");
  352. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  353. return;
  354. }
  355. if (irq_status & SPI_INT_RXFI) {
  356. /*for (i = 0; i < dws->rxlevel; i++) {
  357. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  358. if (dws->n_bytes == 1)
  359. *(u8 *)(dws->rx) = rxw;
  360. else if (dws->n_bytes == 2)
  361. *(u16 *)(dws->rx) = rxw;
  362. else
  363. *(u32 *)(dws->rx) = rxw;
  364. dws->rx = (u8*)dws->rx + dws->n_bytes;
  365. }*/
  366. dw_reader(dws);
  367. }
  368. if (dws->rx_end == dws->rx) {
  369. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  370. return;
  371. }
  372. return;
  373. }
  374. static void dma_transfer(struct dw_spi *dws)
  375. {
  376. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  377. printf("status=0x%x.\n", irq_status);
  378. if (!irq_status)
  379. return;
  380. dw_readl(dws, DW_SPI_ICR);
  381. int_error_stop(dws, "dma_transfer: fifo overrun/underrun");
  382. dws->xfer_ret = 1;
  383. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  384. return;
  385. }
  386. static void dw_spi_irq(void *param)
  387. {
  388. struct dw_spi *dws = param;
  389. u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
  390. if (!irq_status)
  391. return;
  392. dws->transfer_handler(dws);
  393. }
  394. /* Must be called inside pump_transfers() */
  395. static int poll_transfer(struct dw_spi *dws)
  396. {
  397. do {
  398. dw_writer(dws);
  399. dw_reader(dws);
  400. taskYIELD();
  401. } while (dws->rx_end > dws->rx);
  402. return 0;
  403. }
  404. static void dw_spi_chipselect(struct dw_spi *dws, int is_active)
  405. {
  406. int dev_is_lowactive = !(dws->slave.mode & SPI_CS_HIGH);
  407. if (dws->slave.mode & SPI_NO_CS)
  408. return;
  409. gpio_direction_output(dws->cs_gpio, is_active ^ dev_is_lowactive);
  410. }
  411. static int dw_spi_calculate_timeout(struct dw_spi *dws, int size)
  412. {
  413. unsigned long timeout = 0;
  414. /* Time with actual data transfer and CS change delay related to HW */
  415. timeout = (8 + 4) * size / dws->current_freq;
  416. /* Add extra second for scheduler related activities */
  417. timeout += 1;
  418. /* Double calculated timeout */
  419. return pdMS_TO_TICKS(2 * timeout * MSEC_PER_SEC);
  420. }
  421. static int dw_spi_transfer_one(struct spi_slave *slave, struct spi_message *message)
  422. {
  423. struct dw_spi *dws = (struct dw_spi *)slave;
  424. struct chip_data *chip = dws->chip;
  425. u8 imask = 0;
  426. u16 txlevel = 0;
  427. u32 cr0;
  428. u32 bits_per_word = 0;
  429. unsigned long transfer_timeout;
  430. int ret = 0;
  431. chip->tmode = SPI_TMOD_TR;
  432. dws->dma_mapped = 0;
  433. dws->tx = (void *)message->send_buf;
  434. dws->tx_end = (u8*)dws->tx + message->length;
  435. dws->rx = message->recv_buf;
  436. dws->rx_end = (u8*)dws->rx + message->length;
  437. dws->len = message->length;
  438. spi_enable_chip(dws, 0);
  439. spi_set_clk(dws, chip->clk_div);
  440. if (dws->current_freq >= 63500000)
  441. dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, 2);
  442. else if (dws->current_freq >= 50000000)
  443. dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, 1);
  444. if (message->cs_take)
  445. dw_spi_chipselect(dws, 1);
  446. if (message->length & 1 || (u32)dws->tx & 1 || (u32)dws->rx & 1)
  447. bits_per_word = 8;
  448. else if (message->length & 3 || (u32)dws->tx & 3 || (u32)dws->rx & 3)
  449. bits_per_word = 16;
  450. else
  451. bits_per_word = 32;
  452. //printk("len=%d, bits_per_word=%d.\n", transfer->len, bits_per_word);
  453. if (bits_per_word == 8) {
  454. dws->n_bytes = 1;
  455. dws->dma_width = DMA_BUSWIDTH_1_BYTE;
  456. } else if (bits_per_word == 16) {
  457. dws->n_bytes = 2;
  458. dws->dma_width = DMA_BUSWIDTH_2_BYTES;
  459. } else if (bits_per_word == 32) {
  460. dws->n_bytes = 4;
  461. dws->dma_width = DMA_BUSWIDTH_4_BYTES;
  462. } else {
  463. ret = -EINVAL;
  464. goto end;
  465. }
  466. /* Default SPI mode is SCPOL = 0, SCPH = 0 */
  467. cr0 = ((bits_per_word - 1) << SPI_DFS_OFFSET)
  468. | (chip->type << SPI_FRF_OFFSET)
  469. | ((dws->slave.mode & 3) << SPI_MODE_OFFSET)
  470. | (chip->tmode << SPI_TMOD_OFFSET);
  471. /*
  472. * Adjust transfer mode if necessary. Requires platform dependent
  473. * chipselect mechanism.
  474. */
  475. if (chip->cs_control) {
  476. if (dws->rx && dws->tx)
  477. chip->tmode = SPI_TMOD_TR;
  478. else if (dws->rx)
  479. chip->tmode = SPI_TMOD_RO;
  480. else
  481. chip->tmode = SPI_TMOD_TO;
  482. cr0 &= ~SPI_TMOD_MASK;
  483. cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
  484. }
  485. dw_writel(dws, DW_SPI_CTRL0, cr0);
  486. /* For poll mode just disable all interrupts */
  487. spi_mask_intr(dws, 0xff);
  488. /*
  489. * Interrupt mode
  490. * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
  491. */
  492. if (dws->dma_mapped) {
  493. ret = dws->dma_ops->dma_setup(dws, message);
  494. if (ret < 0) {
  495. spi_enable_chip(dws, 1);
  496. goto end;
  497. }
  498. } else if (!chip->poll_mode) {
  499. dw_writel(dws, DW_SPI_DMACR, 0);
  500. txlevel = configMIN(dws->fifo_len / 2, dws->len / dws->n_bytes);
  501. dw_writel(dws, DW_SPI_TXFLTR, txlevel);
  502. /* Set the interrupt mask */
  503. imask |= SPI_INT_TXEI | SPI_INT_TXOI |
  504. SPI_INT_RXUI | SPI_INT_RXOI;
  505. spi_umask_intr(dws, imask);
  506. dws->transfer_handler = interrupt_transfer;
  507. }
  508. xQueueReset(dws->xfer_done);
  509. dw_writel(dws, DW_SPI_SER, BIT(dws->slave.cs));
  510. spi_enable_chip(dws, 1);
  511. if (dws->dma_mapped) {
  512. ret = dws->dma_ops->dma_transfer(dws, message);
  513. if (ret < 0)
  514. goto end;
  515. }
  516. if (chip->poll_mode) {
  517. ret = poll_transfer(dws);
  518. goto end;
  519. }
  520. transfer_timeout = dw_spi_calculate_timeout(dws, message->length);
  521. if (xQueueReceive(dws->xfer_done, NULL, transfer_timeout) != pdTRUE) {
  522. int_error_stop(dws, "transfer timeout");
  523. ret = -ETIMEDOUT;
  524. goto end;
  525. }
  526. end:
  527. if (message->cs_release)
  528. dw_spi_chipselect(dws, 0);
  529. return ret;
  530. }
  531. static int dw_qspi_read(struct spi_slave *slave, struct qspi_message *qspi_message)
  532. {
  533. struct dw_spi *dws = (struct dw_spi *)slave;
  534. struct chip_data *chip = dws->chip;
  535. struct spi_message *message = (struct spi_message *)&qspi_message->message;
  536. u8 imask = 0;
  537. u16 rxlevel = 0;
  538. u32 cr0, qspi_cr0, ndf;
  539. u32 bits_per_word = 0;
  540. u32 addr;
  541. unsigned long transfer_timeout;
  542. u32 xfer_len = 0;
  543. int ret = 0;
  544. chip->tmode = SPI_TMOD_RO;
  545. if (message->length & 1)
  546. bits_per_word = 8;
  547. else if (message->length & 3)
  548. bits_per_word = 16;
  549. else
  550. bits_per_word = 32;
  551. if (bits_per_word == 8) {
  552. dws->n_bytes = 1;
  553. dws->dma_width = DMA_BUSWIDTH_1_BYTE;
  554. } else if (bits_per_word == 16) {
  555. dws->n_bytes = 2;
  556. dws->dma_width = DMA_BUSWIDTH_2_BYTES;
  557. } else if (bits_per_word == 32) {
  558. dws->n_bytes = 4;
  559. dws->dma_width = DMA_BUSWIDTH_4_BYTES;
  560. } else {
  561. ret = -EINVAL;
  562. goto end;
  563. }
  564. xfer_continue:
  565. ndf = (message->length - xfer_len) / dws->n_bytes - 1;
  566. if (ndf > 0xffff) ndf = 0xffff;
  567. dws->dma_mapped = 1;
  568. dws->xfer_ret = 0;
  569. dws->rx = (u8*)message->recv_buf + xfer_len;
  570. dws->len = (ndf + 1) * dws->n_bytes;
  571. dws->rx_end = (u8*)dws->rx + dws->len;
  572. spi_enable_chip(dws, 0);
  573. spi_set_clk(dws, chip->qspi_clk_div);
  574. if (dws->current_freq >= 63500000)
  575. dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, 2);
  576. else if (dws->current_freq >= 50000000)
  577. dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, 1);
  578. if (message->cs_take)
  579. dw_spi_chipselect(dws, 1);
  580. /* Default SPI mode is SCPOL = 0, SCPH = 0 */
  581. cr0 = ((bits_per_word - 1) << SPI_DFS_OFFSET)
  582. | (chip->type << SPI_FRF_OFFSET)
  583. | ((dws->slave.mode & 3) << SPI_MODE_OFFSET)
  584. | (chip->tmode << SPI_TMOD_OFFSET);
  585. if (qspi_message->qspi_data_lines == 4)
  586. cr0 |= SPI_DAF_QUAD << SPI_DAF_OFFSET;
  587. else if (qspi_message->qspi_data_lines == 2)
  588. cr0 |= SPI_DAF_DUAL << SPI_DAF_OFFSET;
  589. dw_writel(dws, DW_SPI_CTRL0, cr0);
  590. dw_writel(dws, DW_SPI_CTRL1, ndf);
  591. qspi_cr0 = ((qspi_message->dummy_cycles & 0xf) << SPI_WAIT_CYCLES_OFFSET) |
  592. (2 << SPI_INST_LENGTH_OFFSET) |
  593. ((qspi_message->address.size >> 2) << SPI_ADDR_LENGTH_OFFSET);
  594. if (qspi_message->instruction.qspi_lines == 1 && qspi_message->address.qspi_lines > 1)
  595. qspi_cr0 |= 1;
  596. else if (qspi_message->instruction.qspi_lines > 1 && qspi_message->address.qspi_lines > 1)
  597. qspi_cr0 |= 2;
  598. dw_writel(dws, DW_SPI_QSPI_CTRL0, qspi_cr0);
  599. /* For poll mode just disable all interrupts */
  600. spi_mask_intr(dws, 0xff);
  601. /*
  602. * Interrupt mode
  603. * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
  604. */
  605. if (dws->dma_mapped) {
  606. ret = dws->dma_ops->dma_setup(dws, message);
  607. if (ret < 0) {
  608. spi_enable_chip(dws, 1);
  609. goto end;
  610. }
  611. } else if (!chip->poll_mode) {
  612. dw_writel(dws, DW_SPI_DMACR, 0);
  613. rxlevel = 1 << __ffs(dws->len / dws->n_bytes);
  614. while (rxlevel > dws->fifo_len / 2)
  615. rxlevel >>= 1;
  616. dws->rxlevel = rxlevel;
  617. dw_writel(dws, DW_SPI_RXFLTR, rxlevel - 1);
  618. /* Set the interrupt mask */
  619. imask |= SPI_INT_RXUI | SPI_INT_RXOI | SPI_INT_RXFI;
  620. spi_umask_intr(dws, imask);
  621. dws->transfer_handler = qspi_read_interrupt;
  622. }
  623. xQueueReset(dws->xfer_done);
  624. dw_writel(dws, DW_SPI_SER, BIT(dws->slave.cs));
  625. spi_enable_chip(dws, 1);
  626. if (dws->dma_mapped) {
  627. ret = dws->dma_ops->dma_transfer(dws, message);
  628. if (ret < 0)
  629. goto end;
  630. }
  631. addr = qspi_message->address.content + xfer_len;
  632. if (qspi_message->address.size == 32) {
  633. addr = ((addr >> 24) & 0xff) | (((addr >> 16) & 0xff) << 8) |
  634. (((addr >> 8) & 0xff) << 16) | ((addr & 0xff) << 24);
  635. } else {
  636. addr = ((addr >> 16) & 0xff) | (((addr >> 8) & 0xff) << 8) | ((addr & 0xff) << 16);
  637. }
  638. dw_write_io_reg(dws, DW_SPI_DR, qspi_message->instruction.content);
  639. dw_write_io_reg(dws, DW_SPI_DR, addr);
  640. transfer_timeout = dw_spi_calculate_timeout(dws, message->length);
  641. if (xQueueReceive(dws->xfer_done, NULL, transfer_timeout) != pdTRUE) {
  642. int_error_stop(dws, "transfer timeout");
  643. ret = -ETIMEDOUT;
  644. if (dws->dma_mapped)
  645. dma_stop_channel(dws->rxchan);
  646. goto end;
  647. }
  648. if (dws->xfer_ret) {
  649. dws->xfer_ret = 0;
  650. ret = -1;
  651. if (dws->dma_mapped) {
  652. if (dws->rx_dummy_buffer) {
  653. vPortFree(dws->rx_dummy_buffer);
  654. dws->rx_dummy_buffer = NULL;
  655. }
  656. dma_stop_channel(dws->rxchan);
  657. }
  658. goto end;
  659. }
  660. if (dws->dma_mapped) {
  661. /* Invalidate cache after dma read, rx and len must align to cacheline(32bytes) */
  662. portDISABLE_INTERRUPTS();
  663. if (dws->rx_dummy_buffer)
  664. CP15_invalidate_dcache_for_dma((uint32_t)dws->rx_dummy_buffer,
  665. (uint32_t)dws->rx_dummy_buffer + dws->len);
  666. else
  667. CP15_invalidate_dcache_for_dma((uint32_t)dws->rx, (uint32_t)dws->rx + dws->len);
  668. portENABLE_INTERRUPTS();
  669. if (dws->rx_dummy_buffer) {
  670. memcpy(dws->rx, dws->rx_dummy_buffer, dws->len);
  671. vPortFree(dws->rx_dummy_buffer);
  672. dws->rx_dummy_buffer = NULL;
  673. }
  674. dma_stop_channel(dws->rxchan);
  675. }
  676. xfer_len += dws->len;
  677. if (xfer_len < message->length) {
  678. if (message->cs_release)
  679. dw_spi_chipselect(dws, 0);
  680. udelay(1);
  681. goto xfer_continue;
  682. }
  683. end:
  684. if (message->cs_release)
  685. dw_spi_chipselect(dws, 0);
  686. return ret;
  687. }
  688. /* This may be called twice for each spi dev */
  689. int dw_spi_setup(struct spi_slave *slave, struct spi_configuration *configuration)
  690. {
  691. struct dw_spi *dws = (struct dw_spi *)slave;
  692. struct chip_data *chip;
  693. /* Only alloc on first setup */
  694. chip = dws->chip;
  695. if (!chip) {
  696. chip = pvPortMalloc(sizeof(struct chip_data));
  697. if (!chip)
  698. return -ENOMEM;
  699. memset(chip, 0, sizeof(struct chip_data));
  700. dws->chip = chip;
  701. }
  702. dws->slave.mode = configuration->mode;
  703. chip->clk_div = (DIV_ROUND_UP(dws->max_freq, configuration->max_hz) + 1) & 0xfffe;
  704. chip->qspi_clk_div = (DIV_ROUND_UP(dws->max_freq, configuration->qspi_max_hz) + 1) & 0xfffe;
  705. dws->current_freq = dws->max_freq / chip->clk_div;
  706. dws->current_qspi_freq = dws->max_freq / chip->qspi_clk_div;
  707. printf("spi max_freq %u, current freq %u, qspi_freq %u.\n", dws->max_freq,
  708. dws->current_freq, dws->current_qspi_freq);
  709. gpio_direction_output(dws->cs_gpio,
  710. !(dws->slave.mode & SPI_CS_HIGH));
  711. return 0;
  712. }
  713. /* Restart the controller, disable all interrupts, clean rx fifo */
  714. static void spi_hw_init(struct dw_spi *dws)
  715. {
  716. spi_reset_chip(dws);
  717. /*
  718. * Try to detect the FIFO depth if not set by interface driver,
  719. * the depth could be from 2 to 256 from HW spec
  720. */
  721. if (!dws->fifo_len) {
  722. u32 fifo;
  723. for (fifo = 1; fifo < 256; fifo++) {
  724. dw_writel(dws, DW_SPI_TXFLTR, fifo);
  725. if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
  726. break;
  727. }
  728. dw_writel(dws, DW_SPI_TXFLTR, 0);
  729. dws->fifo_len = (fifo == 1) ? 0 : fifo;
  730. dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
  731. }
  732. }
  733. static int dw_spi_add_host(struct dw_spi *dws)
  734. {
  735. int ret;
  736. BUG_ON(dws == NULL);
  737. dws->type = SSI_MOTO_SPI;
  738. dws->dma_inited = 0;
  739. dws->dma_addr = (dma_addr_t)dws->regs + DW_SPI_DR;
  740. ret = request_irq(dws->irq, 0, dw_spi_irq, dws);
  741. if (ret < 0) {
  742. dev_err(dev, "can not get IRQ\n");
  743. goto err_exit;
  744. }
  745. /* Basic HW init */
  746. spi_hw_init(dws);
  747. if (dws->dma_ops && dws->dma_ops->dma_init) {
  748. ret = dws->dma_ops->dma_init(dws);
  749. if (ret) {
  750. dev_warn(dev, "DMA init failed\n");
  751. dws->dma_inited = 0;
  752. dws->dma_mapped = 1;
  753. }
  754. }
  755. return 0;
  756. err_exit:
  757. return ret;
  758. }
  759. /* static void dw_spi_remove_host(struct dw_spi *dws)
  760. {
  761. if (dws->dma_ops && dws->dma_ops->dma_exit)
  762. dws->dma_ops->dma_exit(dws);
  763. spi_shutdown_chip(dws);
  764. free_irq(dws->irq);
  765. } */
  766. static void dwspi_jedec252_reset(const dw_spi_obj_t *dw_spi_obj)
  767. {
  768. int i;
  769. int si = 0;
  770. if ((dw_spi_obj->cs_gpio < 0) || (dw_spi_obj->io0_gpio < 0))
  771. return;
  772. gpio_direction_output(dw_spi_obj->cs_gpio, 1);
  773. gpio_direction_output(dw_spi_obj->io0_gpio, 1);
  774. udelay(300);
  775. for (i = 0; i < 4; i++) {
  776. gpio_direction_output(dw_spi_obj->cs_gpio, 0);
  777. gpio_direction_output(dw_spi_obj->io0_gpio, si);
  778. si = !si;
  779. udelay(300);
  780. gpio_direction_output(dw_spi_obj->cs_gpio, 1);
  781. udelay(300);
  782. }
  783. }
  784. static void dw_spi_dma_complete_callback(void *param, unsigned int mask)
  785. {
  786. struct dw_spi *dws = param;
  787. xQueueSendFromISR(dws->xfer_done, NULL, 0);
  788. }
  789. static int dw_spi_dma_init(struct dw_spi *dws)
  790. {
  791. dws->rxchan = dma_request_channel(SPI0_RX_DMA_CH);
  792. if (!dws->rxchan) {
  793. printf("dwspi request dma channel fail.\n");
  794. return -1;
  795. }
  796. return 0;
  797. }
  798. static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_message *message)
  799. {
  800. dws->rxlevel = dws->fifo_len / 4;
  801. dw_writel(dws, DW_SPI_DMARDLR, dws->rxlevel - 1);
  802. dw_writel(dws, DW_SPI_DMACR, SPI_DMA_RDMAE);
  803. /* Set the interrupt mask */
  804. spi_umask_intr(dws, SPI_INT_RXUI | SPI_INT_RXOI);
  805. dws->transfer_handler = dma_transfer;
  806. return 0;
  807. }
  808. static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_message *message)
  809. {
  810. struct dma_config cfg = {0};
  811. int ret;
  812. /* Set external dma config: burst size, burst width */
  813. cfg.dst_addr_width = dws->dma_width;
  814. cfg.src_addr_width = dws->dma_width;
  815. /* Match burst msize with external dma config */
  816. cfg.dst_maxburst = dws->rxlevel;
  817. cfg.src_maxburst = dws->rxlevel;
  818. cfg.transfer_size = dws->len;
  819. cfg.direction = DMA_DEV_TO_MEM;
  820. cfg.src_addr = (unsigned int)(dws->regs) + DW_SPI_DR;
  821. //if (((u32)dws->rx/* | dws->len*/) & (ARCH_DMA_MINALIGN - 1)) {
  822. if ((u32)dws->rx & (ARCH_DMA_MINALIGN - 1)) {
  823. dws->rx_dummy_buffer = pvPortMalloc(dws->len);
  824. if (!dws->rx_dummy_buffer)
  825. return -ENOMEM;
  826. cfg.dst_addr = (u32)dws->rx_dummy_buffer;
  827. } else {
  828. cfg.dst_addr = (u32)dws->rx;
  829. }
  830. /* Invalidate cache before read */
  831. CP15_flush_dcache_for_dma(cfg.dst_addr,
  832. cfg.dst_addr + dws->len);
  833. cfg.src_id = dws->dma_rx_id;
  834. /* Read/write dst/src AHB select master0 */
  835. cfg.dst_master_id = 0;
  836. cfg.src_master_id = 0;
  837. ret = dma_config_channel(dws->rxchan, &cfg);
  838. if (ret) {
  839. printf("dwspi failed to config dma.\n");
  840. return -EBUSY;
  841. }
  842. /* Set dw_spi_dma_complete_callback as callback */
  843. dma_register_complete_callback(dws->rxchan, dw_spi_dma_complete_callback, dws);
  844. dma_start_channel(dws->rxchan);
  845. return 0;
  846. }
  847. static const struct dw_spi_dma_ops dw_dma_ops = {
  848. .dma_init = dw_spi_dma_init,
  849. .dma_setup = dw_spi_dma_setup,
  850. .dma_transfer = dw_spi_dma_transfer,
  851. };
  852. static int dw_spi_probe(const dw_spi_obj_t *dw_spi_obj)
  853. {
  854. struct dw_spi *dws;
  855. int ret;
  856. dwspi_jedec252_reset(dw_spi_obj);
  857. pinctrl_set_group(dw_spi_obj->gpio_mux_grpid);
  858. dws = pvPortMalloc(sizeof(struct dw_spi));
  859. if (!dws)
  860. return -ENOMEM;
  861. memset(dws, 0, sizeof(struct dw_spi));
  862. dws->xfer_done = xQueueCreate(1, 0);
  863. dws->regs = (void __iomem *)dw_spi_obj->base;
  864. dws->irq = dw_spi_obj->irqn;
  865. dws->dma_rx_id = dw_spi_obj->dma_rx_id;
  866. dws->dma_tx_id = dw_spi_obj->dma_tx_id;
  867. dws->bus_num = 0;
  868. dws->max_freq = ulClkGetRate(dw_spi_obj->clkid);
  869. vClkEnable(dw_spi_obj->clkid);
  870. dws->num_cs = 1;
  871. dws->cs_gpio = dw_spi_obj->cs_gpio;
  872. dws->slave.mode = dw_spi_obj->mode;
  873. dws->slave.cs = 0;
  874. dws->slave.xfer = dw_spi_transfer_one;
  875. dws->slave.qspi_read = dw_qspi_read;
  876. dws->slave.configure = dw_spi_setup;
  877. dws->dma_ops = &dw_dma_ops;
  878. ret = dw_spi_add_host(dws);
  879. if (ret)
  880. goto out;
  881. strncpy(dws->slave.name, dw_spi_obj->driver_name, 16);
  882. spi_add_slave(&dws->slave);
  883. return 0;
  884. out:
  885. return ret;
  886. }
  887. int dwspi_init(void)
  888. {
  889. #ifdef DW_SPI0_SUPPORT
  890. dw_spi_probe(&dw_spi0_obj);
  891. #endif
  892. #ifdef DW_SPI2_SUPPORT
  893. dw_spi_probe(&dw_spi2_obj);
  894. #endif
  895. return 0;
  896. }