spi-arke.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836
  1. #define DEBUG
  2. #include <linux/clk.h>
  3. #include <linux/err.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/platform_device.h>
  6. #include <linux/slab.h>
  7. #include <linux/spi/spi.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/of_gpio.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/property.h>
  14. #include <linux/io.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/gpio.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/highmem.h>
  19. #include <linux/delay.h>
  20. #define DRIVER_NAME "ark_dw_spi"
  21. /* Register offsets */
  22. #define DW_SPI_CTRL0 0x00
  23. #define DW_SPI_CTRL1 0x04
  24. #define DW_SPI_SSIENR 0x08
  25. #define DW_SPI_MWCR 0x0c
  26. #define DW_SPI_SER 0x10
  27. #define DW_SPI_BAUDR 0x14
  28. #define DW_SPI_TXFLTR 0x18
  29. #define DW_SPI_RXFLTR 0x1c
  30. #define DW_SPI_TXFLR 0x20
  31. #define DW_SPI_RXFLR 0x24
  32. #define DW_SPI_SR 0x28
  33. #define DW_SPI_IMR 0x2c
  34. #define DW_SPI_ISR 0x30
  35. #define DW_SPI_RISR 0x34
  36. #define DW_SPI_TXOICR 0x38
  37. #define DW_SPI_RXOICR 0x3c
  38. #define DW_SPI_RXUICR 0x40
  39. #define DW_SPI_MSTICR 0x44
  40. #define DW_SPI_ICR 0x48
  41. #define DW_SPI_DMACR 0x4c
  42. #define DW_SPI_DMATDLR 0x50
  43. #define DW_SPI_DMARDLR 0x54
  44. #define DW_SPI_IDR 0x58
  45. #define DW_SPI_VERSION 0x5c
  46. #define DW_SPI_DR 0x60
  47. /* Bit fields in CTRLR0 */
  48. #define SPI_DFS_OFFSET 16
  49. #define SPI_FRF_OFFSET 4
  50. #define SPI_FRF_SPI 0x0
  51. #define SPI_FRF_SSP 0x1
  52. #define SPI_FRF_MICROWIRE 0x2
  53. #define SPI_FRF_RESV 0x3
  54. #define SPI_MODE_OFFSET 6
  55. #define SPI_SCPH_OFFSET 6
  56. #define SPI_SCOL_OFFSET 7
  57. #define SPI_TMOD_OFFSET 8
  58. #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
  59. #define SPI_TMOD_TR 0x0 /* xmit & recv */
  60. #define SPI_TMOD_TO 0x1 /* xmit only */
  61. #define SPI_TMOD_RO 0x2 /* recv only */
  62. #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
  63. #define SPI_SLVOE_OFFSET 10
  64. #define SPI_SRL_OFFSET 11
  65. #define SPI_CFS_OFFSET 12
  66. /* Bit fields in SR, 7 bits */
  67. #define SR_MASK 0x7f /* cover 7 bits */
  68. #define SR_BUSY (1 << 0)
  69. #define SR_TF_NOT_FULL (1 << 1)
  70. #define SR_TF_EMPT (1 << 2)
  71. #define SR_RF_NOT_EMPT (1 << 3)
  72. #define SR_RF_FULL (1 << 4)
  73. #define SR_TX_ERR (1 << 5)
  74. #define SR_DCOL (1 << 6)
  75. /* Bit fields in ISR, IMR, RISR, 7 bits */
  76. #define SPI_INT_TXEI (1 << 0)
  77. #define SPI_INT_TXOI (1 << 1)
  78. #define SPI_INT_RXUI (1 << 2)
  79. #define SPI_INT_RXOI (1 << 3)
  80. #define SPI_INT_RXFI (1 << 4)
  81. #define SPI_INT_MSTI (1 << 5)
  82. /* Bit fields in DMACR */
  83. #define SPI_DMA_RDMAE (1 << 0)
  84. #define SPI_DMA_TDMAE (1 << 1)
  85. /* TX RX interrupt level threshold, max can be 256 */
  86. #define SPI_INT_THRESHOLD 32
  87. enum dw_ssi_type {
  88. SSI_MOTO_SPI = 0,
  89. SSI_TI_SSP,
  90. SSI_NS_MICROWIRE,
  91. };
  92. struct dw_spi;
  93. struct dw_spi_dma_ops {
  94. int (*dma_init)(struct dw_spi *dws);
  95. void (*dma_exit)(struct dw_spi *dws);
  96. int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
  97. bool (*can_dma)(struct spi_master *master, struct spi_device *spi,
  98. struct spi_transfer *xfer);
  99. int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer);
  100. void (*dma_stop)(struct dw_spi *dws);
  101. };
  102. struct dw_spi {
  103. struct spi_master *master;
  104. enum dw_ssi_type type;
  105. void __iomem *regs;
  106. struct clk *clk;
  107. unsigned long paddr;
  108. int irq;
  109. u32 fifo_len; /* depth of the FIFO buffer */
  110. u32 max_freq; /* max bus freq supported */
  111. u32 reg_io_width; /* DR I/O width in bytes */
  112. u16 bus_num;
  113. u16 num_cs; /* supported slave numbers */
  114. /* Current message transfer state info */
  115. size_t len;
  116. void *tx;
  117. void *tx_end;
  118. void *rx;
  119. void *rx_end;
  120. int dma_mapped;
  121. u8 n_bytes; /* current is a 1/2 bytes op */
  122. u32 dma_width;
  123. irqreturn_t (*transfer_handler)(struct dw_spi *dws);
  124. u32 current_freq; /* frequency in hz */
  125. /* DMA info */
  126. int dma_inited;
  127. struct dma_chan *txchan;
  128. struct dma_chan *rxchan;
  129. unsigned long dma_chan_busy;
  130. dma_addr_t dma_addr; /* phy address of the Data register */
  131. const struct dw_spi_dma_ops *dma_ops;
  132. void *dma_tx;
  133. void *dma_rx;
  134. /* Bus interface info */
  135. void *priv;
  136. #ifdef CONFIG_DEBUG_FS
  137. struct dentry *debugfs;
  138. #endif
  139. };
  140. /* Slave spi_dev related */
  141. struct chip_data {
  142. u8 cs; /* chip select pin */
  143. u8 tmode; /* TR/TO/RO/EEPROM */
  144. u8 type; /* SPI/SSP/MicroWire */
  145. u8 poll_mode; /* 1 means use poll mode */
  146. u8 enable_dma;
  147. u16 clk_div; /* baud rate divider */
  148. u32 speed_hz; /* baud rate */
  149. void (*cs_control)(u32 command);
  150. };
  151. /*
  152. * Each SPI slave device to work with dw_api controller should
  153. * has such a structure claiming its working mode (poll or PIO/DMA),
  154. * which can be save in the "controller_data" member of the
  155. * struct spi_device.
  156. */
  157. struct dw_spi_chip {
  158. u8 poll_mode; /* 1 for controller polling mode */
  159. u8 type; /* SPI/SSP/MicroWire */
  160. void (*cs_control)(u32 command);
  161. };
  162. static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
  163. {
  164. return __raw_readl(dws->regs + offset);
  165. }
  166. static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
  167. {
  168. return __raw_readw(dws->regs + offset);
  169. }
  170. static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
  171. {
  172. __raw_writel(val, dws->regs + offset);
  173. }
  174. static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
  175. {
  176. __raw_writew(val, dws->regs + offset);
  177. }
  178. static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
  179. {
  180. switch (dws->reg_io_width) {
  181. case 2:
  182. return dw_readw(dws, offset);
  183. case 4:
  184. default:
  185. return dw_readl(dws, offset);
  186. }
  187. }
  188. static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
  189. {
  190. switch (dws->reg_io_width) {
  191. case 2:
  192. dw_writew(dws, offset, val);
  193. break;
  194. case 4:
  195. default:
  196. dw_writel(dws, offset, val);
  197. break;
  198. }
  199. }
  200. static inline void spi_enable_chip(struct dw_spi *dws, int enable)
  201. {
  202. dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
  203. }
  204. static inline void spi_set_clk(struct dw_spi *dws, u16 div)
  205. {
  206. dw_writel(dws, DW_SPI_BAUDR, div);
  207. }
  208. /* Disable IRQ bits */
  209. static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
  210. {
  211. u32 new_mask;
  212. new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
  213. dw_writel(dws, DW_SPI_IMR, new_mask);
  214. }
  215. /* Enable IRQ bits */
  216. static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
  217. {
  218. u32 new_mask;
  219. new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
  220. dw_writel(dws, DW_SPI_IMR, new_mask);
  221. }
  222. /*
  223. * This does disable the SPI controller, interrupts, and re-enable the
  224. * controller back. Transmit and receive FIFO buffers are cleared when the
  225. * device is disabled.
  226. */
  227. static inline void spi_reset_chip(struct dw_spi *dws)
  228. {
  229. spi_enable_chip(dws, 0);
  230. spi_mask_intr(dws, 0xff);
  231. spi_enable_chip(dws, 1);
  232. }
  233. static inline void spi_shutdown_chip(struct dw_spi *dws)
  234. {
  235. spi_enable_chip(dws, 0);
  236. spi_set_clk(dws, 0);
  237. }
  238. static void dw_spi_set_cs(struct spi_device *spi, bool enable)
  239. {
  240. struct dw_spi *dws = spi_master_get_devdata(spi->master);
  241. struct chip_data *chip = spi_get_ctldata(spi);
  242. /* Chip select logic is inverted from spi_set_cs() */
  243. if (chip && chip->cs_control)
  244. chip->cs_control(!enable);
  245. if (!enable)
  246. dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
  247. }
  248. /* Return the max entries we can fill into tx fifo */
  249. static inline u32 tx_max(struct dw_spi *dws)
  250. {
  251. u32 tx_left, tx_room, rxtx_gap;
  252. tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
  253. tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
  254. /*
  255. * Another concern is about the tx/rx mismatch, we
  256. * though to use (dws->fifo_len - rxflr - txflr) as
  257. * one maximum value for tx, but it doesn't cover the
  258. * data which is out of tx/rx fifo and inside the
  259. * shift registers. So a control from sw point of
  260. * view is taken.
  261. */
  262. rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
  263. / dws->n_bytes;
  264. return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
  265. }
  266. /* Return the max entries we should read out of rx fifo */
  267. static inline u32 rx_max(struct dw_spi *dws)
  268. {
  269. u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
  270. return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
  271. }
  272. static void dw_writer(struct dw_spi *dws)
  273. {
  274. u32 max = tx_max(dws);
  275. u32 txw = 0;
  276. while (max--) {
  277. /* Set the tx word if the transfer's original "tx" is not null */
  278. if (dws->tx_end - dws->len) {
  279. if (dws->n_bytes == 1)
  280. txw = *(u8 *)(dws->tx);
  281. else if (dws->n_bytes == 2)
  282. txw = *(u16 *)(dws->tx);
  283. else
  284. txw = *(u32 *)(dws->tx);
  285. }
  286. dw_write_io_reg(dws, DW_SPI_DR, txw);
  287. dws->tx += dws->n_bytes;
  288. }
  289. }
  290. static void dw_reader(struct dw_spi *dws)
  291. {
  292. u32 max = rx_max(dws);
  293. u32 rxw;
  294. while (max--) {
  295. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  296. /* Care rx only if the transfer's original "rx" is not null */
  297. if (dws->rx_end - dws->len) {
  298. if (dws->n_bytes == 1)
  299. *(u8 *)(dws->rx) = rxw;
  300. else if (dws->n_bytes == 2)
  301. *(u16 *)(dws->rx) = rxw;
  302. else
  303. *(u32 *)(dws->rx) = rxw;
  304. }
  305. dws->rx += dws->n_bytes;
  306. }
  307. }
  308. static void int_error_stop(struct dw_spi *dws, const char *msg)
  309. {
  310. spi_reset_chip(dws);
  311. dev_err(&dws->master->dev, "%s\n", msg);
  312. dws->master->cur_msg->status = -EIO;
  313. spi_finalize_current_transfer(dws->master);
  314. }
  315. static irqreturn_t interrupt_transfer(struct dw_spi *dws)
  316. {
  317. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  318. /* Error handling */
  319. if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
  320. dw_readl(dws, DW_SPI_ICR);
  321. int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
  322. return IRQ_HANDLED;
  323. }
  324. dw_reader(dws);
  325. if (dws->rx_end == dws->rx) {
  326. spi_mask_intr(dws, SPI_INT_TXEI);
  327. spi_finalize_current_transfer(dws->master);
  328. return IRQ_HANDLED;
  329. }
  330. if (irq_status & SPI_INT_TXEI) {
  331. spi_mask_intr(dws, SPI_INT_TXEI);
  332. dw_writer(dws);
  333. /* Enable TX irq always, it will be disabled when RX finished */
  334. spi_umask_intr(dws, SPI_INT_TXEI);
  335. }
  336. return IRQ_HANDLED;
  337. }
  338. static irqreturn_t dw_spi_irq(int irq, void *dev_id)
  339. {
  340. struct spi_master *master = dev_id;
  341. struct dw_spi *dws = spi_master_get_devdata(master);
  342. u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
  343. if (!irq_status)
  344. return IRQ_NONE;
  345. if (!master->cur_msg) {
  346. spi_mask_intr(dws, SPI_INT_TXEI);
  347. return IRQ_HANDLED;
  348. }
  349. return dws->transfer_handler(dws);
  350. }
  351. /* Must be called inside pump_transfers() */
  352. static int poll_transfer(struct dw_spi *dws)
  353. {
  354. do {
  355. dw_writer(dws);
  356. dw_reader(dws);
  357. cpu_relax();
  358. } while (dws->rx_end > dws->rx);
  359. return 0;
  360. }
  361. static int dw_spi_transfer_one(struct spi_master *master,
  362. struct spi_device *spi, struct spi_transfer *transfer)
  363. {
  364. struct dw_spi *dws = spi_master_get_devdata(master);
  365. struct chip_data *chip = spi_get_ctldata(spi);
  366. u8 imask = 0;
  367. u16 txlevel = 0;
  368. u32 cr0;
  369. u32 bits_per_word = 0;
  370. int ret;
  371. dws->dma_mapped = 0;
  372. dws->tx = (void *)transfer->tx_buf;
  373. dws->tx_end = dws->tx + transfer->len;
  374. dws->rx = transfer->rx_buf;
  375. dws->rx_end = dws->rx + transfer->len;
  376. dws->len = transfer->len;
  377. spi_enable_chip(dws, 0);
  378. /* Handle per transfer options for bpw and speed */
  379. if (transfer->speed_hz != dws->current_freq) {
  380. if (transfer->speed_hz != chip->speed_hz) {
  381. /* clk_div doesn't support odd number */
  382. chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
  383. chip->speed_hz = transfer->speed_hz;
  384. }
  385. dws->current_freq = transfer->speed_hz;
  386. spi_set_clk(dws, chip->clk_div);
  387. }
  388. if (transfer->len & 1)
  389. bits_per_word = 8;
  390. else if (transfer->len & 3)
  391. bits_per_word = 16;
  392. else
  393. bits_per_word = 32;
  394. //printk("len=%d, bits_per_word=%d.\n", transfer->len, bits_per_word);
  395. if (bits_per_word == 8) {
  396. dws->n_bytes = 1;
  397. dws->dma_width = 1;
  398. } else if (bits_per_word == 16) {
  399. dws->n_bytes = 2;
  400. dws->dma_width = 2;
  401. } else if (bits_per_word == 32) {
  402. dws->n_bytes = 4;
  403. dws->dma_width = 4;
  404. } else {
  405. return -EINVAL;
  406. }
  407. /* Default SPI mode is SCPOL = 0, SCPH = 0 */
  408. cr0 = ((bits_per_word - 1) << SPI_DFS_OFFSET)
  409. | (chip->type << SPI_FRF_OFFSET)
  410. | (spi->mode << SPI_MODE_OFFSET)
  411. | (chip->tmode << SPI_TMOD_OFFSET);
  412. /*
  413. * Adjust transfer mode if necessary. Requires platform dependent
  414. * chipselect mechanism.
  415. */
  416. if (chip->cs_control) {
  417. if (dws->rx && dws->tx)
  418. chip->tmode = SPI_TMOD_TR;
  419. else if (dws->rx)
  420. chip->tmode = SPI_TMOD_RO;
  421. else
  422. chip->tmode = SPI_TMOD_TO;
  423. cr0 &= ~SPI_TMOD_MASK;
  424. cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
  425. }
  426. dw_writel(dws, DW_SPI_CTRL0, cr0);
  427. /* Check if current transfer is a DMA transaction */
  428. if (master->can_dma && master->can_dma(master, spi, transfer))
  429. dws->dma_mapped = master->cur_msg_mapped;
  430. /* For poll mode just disable all interrupts */
  431. spi_mask_intr(dws, 0xff);
  432. /*
  433. * Interrupt mode
  434. * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
  435. */
  436. if (dws->dma_mapped) {
  437. ret = dws->dma_ops->dma_setup(dws, transfer);
  438. if (ret < 0) {
  439. spi_enable_chip(dws, 1);
  440. return ret;
  441. }
  442. } else if (!chip->poll_mode) {
  443. txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
  444. dw_writel(dws, DW_SPI_TXFLTR, txlevel);
  445. /* Set the interrupt mask */
  446. imask |= SPI_INT_TXEI | SPI_INT_TXOI |
  447. SPI_INT_RXUI | SPI_INT_RXOI;
  448. spi_umask_intr(dws, imask);
  449. dws->transfer_handler = interrupt_transfer;
  450. }
  451. spi_enable_chip(dws, 1);
  452. if (dws->dma_mapped) {
  453. ret = dws->dma_ops->dma_transfer(dws, transfer);
  454. if (ret < 0)
  455. return ret;
  456. }
  457. if (chip->poll_mode) {
  458. return poll_transfer(dws);
  459. }
  460. return 1;
  461. }
  462. static void dw_spi_handle_err(struct spi_master *master,
  463. struct spi_message *msg)
  464. {
  465. struct dw_spi *dws = spi_master_get_devdata(master);
  466. if (dws->dma_mapped)
  467. dws->dma_ops->dma_stop(dws);
  468. spi_reset_chip(dws);
  469. }
  470. /* This may be called twice for each spi dev */
  471. static int dw_spi_setup(struct spi_device *spi)
  472. {
  473. struct dw_spi_chip *chip_info = NULL;
  474. struct chip_data *chip;
  475. int ret;
  476. /* Only alloc on first setup */
  477. chip = spi_get_ctldata(spi);
  478. if (!chip) {
  479. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  480. if (!chip)
  481. return -ENOMEM;
  482. spi_set_ctldata(spi, chip);
  483. }
  484. /*
  485. * Protocol drivers may change the chip settings, so...
  486. * if chip_info exists, use it
  487. */
  488. chip_info = spi->controller_data;
  489. /* chip_info doesn't always exist */
  490. if (chip_info) {
  491. if (chip_info->cs_control)
  492. chip->cs_control = chip_info->cs_control;
  493. chip->poll_mode = chip_info->poll_mode;
  494. chip->type = chip_info->type;
  495. }
  496. chip->tmode = SPI_TMOD_TR;
  497. if (gpio_is_valid(spi->cs_gpio)) {
  498. ret = gpio_direction_output(spi->cs_gpio,
  499. !(spi->mode & SPI_CS_HIGH));
  500. if (ret)
  501. return ret;
  502. }
  503. return 0;
  504. }
  505. static void dw_spi_cleanup(struct spi_device *spi)
  506. {
  507. struct chip_data *chip = spi_get_ctldata(spi);
  508. kfree(chip);
  509. spi_set_ctldata(spi, NULL);
  510. }
  511. /* Restart the controller, disable all interrupts, clean rx fifo */
  512. static void spi_hw_init(struct device *dev, struct dw_spi *dws)
  513. {
  514. spi_reset_chip(dws);
  515. /*
  516. * Try to detect the FIFO depth if not set by interface driver,
  517. * the depth could be from 2 to 256 from HW spec
  518. */
  519. if (!dws->fifo_len) {
  520. u32 fifo;
  521. for (fifo = 1; fifo < 256; fifo++) {
  522. dw_writel(dws, DW_SPI_TXFLTR, fifo);
  523. if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
  524. break;
  525. }
  526. dw_writel(dws, DW_SPI_TXFLTR, 0);
  527. dws->fifo_len = (fifo == 1) ? 0 : fifo;
  528. dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
  529. }
  530. }
  531. static int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
  532. {
  533. struct spi_master *master;
  534. int ret;
  535. BUG_ON(dws == NULL);
  536. master = spi_alloc_master(dev, 0);
  537. if (!master)
  538. return -ENOMEM;
  539. dws->master = master;
  540. dws->type = SSI_MOTO_SPI;
  541. dws->dma_inited = 0;
  542. dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
  543. ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
  544. master);
  545. if (ret < 0) {
  546. dev_err(dev, "can not get IRQ\n");
  547. goto err_free_master;
  548. }
  549. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH | SPI_NO_CS;
  550. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
  551. master->bus_num = dws->bus_num;
  552. master->num_chipselect = dws->num_cs;
  553. master->setup = dw_spi_setup;
  554. master->cleanup = dw_spi_cleanup;
  555. master->set_cs = dw_spi_set_cs;
  556. master->transfer_one = dw_spi_transfer_one;
  557. master->handle_err = dw_spi_handle_err;
  558. master->max_speed_hz = dws->max_freq;
  559. master->dev.of_node = dev->of_node;
  560. master->flags = SPI_MASTER_GPIO_SS;
  561. /* Basic HW init */
  562. spi_hw_init(dev, dws);
  563. if (dws->dma_ops && dws->dma_ops->dma_init) {
  564. ret = dws->dma_ops->dma_init(dws);
  565. if (ret) {
  566. dev_warn(dev, "DMA init failed\n");
  567. dws->dma_inited = 0;
  568. } else {
  569. master->can_dma = dws->dma_ops->can_dma;
  570. }
  571. }
  572. spi_master_set_devdata(master, dws);
  573. ret = devm_spi_register_master(dev, master);
  574. if (ret) {
  575. dev_err(&master->dev, "problem registering spi master\n");
  576. goto err_dma_exit;
  577. }
  578. return 0;
  579. err_dma_exit:
  580. if (dws->dma_ops && dws->dma_ops->dma_exit)
  581. dws->dma_ops->dma_exit(dws);
  582. spi_enable_chip(dws, 0);
  583. free_irq(dws->irq, master);
  584. err_free_master:
  585. spi_master_put(master);
  586. return ret;
  587. }
  588. static void dw_spi_remove_host(struct dw_spi *dws)
  589. {
  590. if (dws->dma_ops && dws->dma_ops->dma_exit)
  591. dws->dma_ops->dma_exit(dws);
  592. spi_shutdown_chip(dws);
  593. free_irq(dws->irq, dws->master);
  594. }
  595. /* static int dw_spi_suspend_host(struct dw_spi *dws)
  596. {
  597. int ret;
  598. ret = spi_master_suspend(dws->master);
  599. if (ret)
  600. return ret;
  601. spi_shutdown_chip(dws);
  602. return 0;
  603. }
  604. static int dw_spi_resume_host(struct dw_spi *dws)
  605. {
  606. int ret;
  607. spi_hw_init(&dws->master->dev, dws);
  608. ret = spi_master_resume(dws->master);
  609. if (ret)
  610. dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
  611. return ret;
  612. } */
  613. static int ark_dw_spi_probe(struct platform_device *pdev)
  614. {
  615. struct dw_spi *dws;
  616. struct resource *mem;
  617. int ret;
  618. int num_cs;
  619. dws = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi),
  620. GFP_KERNEL);
  621. if (!dws)
  622. return -ENOMEM;
  623. /* Get basic io resource and map it */
  624. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  625. dws->regs = devm_ioremap_resource(&pdev->dev, mem);
  626. if (IS_ERR(dws->regs)) {
  627. dev_err(&pdev->dev, "SPI region map failed\n");
  628. return PTR_ERR(dws->regs);
  629. }
  630. dws->irq = platform_get_irq(pdev, 0);
  631. if (dws->irq < 0) {
  632. dev_err(&pdev->dev, "no irq resource?\n");
  633. return dws->irq; /* -ENXIO */
  634. }
  635. dws->clk = devm_clk_get(&pdev->dev, NULL);
  636. if (IS_ERR(dws->clk))
  637. return PTR_ERR(dws->clk);
  638. ret = clk_prepare_enable(dws->clk);
  639. if (ret)
  640. return ret;
  641. dws->bus_num = pdev->id;
  642. dws->max_freq = clk_get_rate(dws->clk);
  643. device_property_read_u32(&pdev->dev, "reg-io-width", &dws->reg_io_width);
  644. num_cs = 4;
  645. device_property_read_u32(&pdev->dev, "num-cs", &num_cs);
  646. dws->num_cs = num_cs;
  647. if (pdev->dev.of_node) {
  648. int i;
  649. for (i = 0; i < dws->num_cs; i++) {
  650. int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
  651. "cs-gpios", i);
  652. if (cs_gpio == -EPROBE_DEFER) {
  653. ret = cs_gpio;
  654. goto out;
  655. }
  656. if (gpio_is_valid(cs_gpio)) {
  657. ret = devm_gpio_request(&pdev->dev, cs_gpio,
  658. dev_name(&pdev->dev));
  659. if (ret)
  660. goto out;
  661. }
  662. }
  663. }
  664. ret = dw_spi_add_host(&pdev->dev, dws);
  665. if (ret)
  666. goto out;
  667. platform_set_drvdata(pdev, dws);
  668. return 0;
  669. out:
  670. clk_disable_unprepare(dws->clk);
  671. return ret;
  672. }
  673. static int ark_dw_spi_remove(struct platform_device *pdev)
  674. {
  675. struct dw_spi *dws = platform_get_drvdata(pdev);
  676. dw_spi_remove_host(dws);
  677. clk_disable_unprepare(dws->clk);
  678. return 0;
  679. }
  680. static const struct of_device_id ark_dw_spi_of_match[] = {
  681. { .compatible = "arkmicro,ark-dw-ssi", },
  682. { /* end of table */}
  683. };
  684. MODULE_DEVICE_TABLE(of, ark_dw_spi_of_match);
  685. static struct platform_driver ark_dw_spi_driver = {
  686. .probe = ark_dw_spi_probe,
  687. .remove = ark_dw_spi_remove,
  688. .driver = {
  689. .name = DRIVER_NAME,
  690. .of_match_table = ark_dw_spi_of_match,
  691. },
  692. };
  693. module_platform_driver(ark_dw_spi_driver);
  694. MODULE_AUTHOR("Sim");
  695. MODULE_DESCRIPTION("Arkmicro new dw spi controller driver");
  696. MODULE_LICENSE("GPL v2");