spi-ark.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. /*
  2. * arkmicro spi driver
  3. *
  4. * Licensed under GPLv2 or later.
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/completion.h>
  8. #include <linux/delay.h>
  9. #include <linux/dmaengine.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/err.h>
  12. #include <linux/gpio.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/irq.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slab.h>
  20. #include <linux/spi/spi.h>
  21. #include <linux/spi/spi_bitbang.h>
  22. #include <linux/types.h>
  23. #include <linux/of.h>
  24. #include <linux/of_device.h>
  25. #include <linux/of_gpio.h>
  26. #include <linux/scatterlist.h>
  27. #define DRIVER_NAME "spi_ark"
  28. #define USE_DMA_THRESHOLD 32
  29. #define ARK_ECSPI_RXDATA 0x50
  30. #define ARK_ECSPI_TXDATA 0x460
  31. /* generic defines to abstract from the different register layouts */
  32. #define ARK_INT_RR (1 << 0) /* Receive data ready interrupt */
  33. #define ARK_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
  34. /* The maximum bytes that a sdma BD can transfer.*/
  35. #define MAX_SDMA_BD_BYTES (1 << 15)
  36. #define ARK_ECSPI_CTRL_MAX_BURST 512
  37. struct spi_ark_data;
  38. struct spi_ark_devtype_data {
  39. void (*intctrl)(struct spi_ark_data *, int);
  40. int (*config)(struct spi_device *);
  41. void (*trigger)(struct spi_ark_data *);
  42. int (*rx_available)(struct spi_ark_data *);
  43. void (*reset)(struct spi_ark_data *);
  44. bool has_dmamode;
  45. unsigned int fifo_size;
  46. };
  47. struct spi_ark_data {
  48. struct spi_bitbang bitbang;
  49. struct device *dev;
  50. struct completion xfer_done;
  51. void __iomem *base;
  52. unsigned long base_phys;
  53. struct clk *clk_per;
  54. struct clk *clk_ipg;
  55. unsigned long spi_clk;
  56. unsigned int spi_bus_clk;
  57. unsigned int speed_hz;
  58. unsigned int bits_per_word;
  59. unsigned int spi_drctl;
  60. unsigned int count, remainder;
  61. void (*tx)(struct spi_ark_data *);
  62. void (*rx)(struct spi_ark_data *);
  63. void *rx_buf;
  64. const void *tx_buf;
  65. unsigned int txfifo; /* number of words pushed in tx FIFO */
  66. unsigned int read_u32;
  67. unsigned int word_mask;
  68. bool is_arke;
  69. /* DMA */
  70. bool usedma;
  71. u32 wml;
  72. struct completion dma_rx_completion;
  73. struct completion dma_tx_completion;
  74. struct sg_table *dma_rx_sg;
  75. struct spi_transfer dma_transfer;
  76. struct spi_transfer pio_transfer;
  77. const struct spi_ark_devtype_data *devtype_data;
  78. };
  79. static void spi_ark_buf_rx_u8(struct spi_ark_data *spi_ark)
  80. {
  81. unsigned int val = readl(spi_ark->base + ARK_ECSPI_RXDATA);
  82. if (spi_ark->rx_buf) {
  83. if (spi_ark->is_arke)
  84. *(u8*)spi_ark->rx_buf = val & 0xff;
  85. else
  86. *(u8*)spi_ark->rx_buf = (val >> 24) & 0xff;
  87. spi_ark->rx_buf += 1;
  88. }
  89. }
  90. static void spi_ark_buf_rx_u16(struct spi_ark_data *spi_ark)
  91. {
  92. unsigned int val = readl(spi_ark->base + ARK_ECSPI_RXDATA);
  93. if (spi_ark->rx_buf) {
  94. if (spi_ark->is_arke)
  95. *(u16*)spi_ark->rx_buf = val & 0xffff;
  96. else
  97. *(u16*)spi_ark->rx_buf = (val >> 16) & 0xffff;
  98. spi_ark->rx_buf += 2;
  99. }
  100. }
  101. static void spi_ark_buf_tx_u8(struct spi_ark_data *spi_ark)
  102. {
  103. u32 val = 0;
  104. if (spi_ark->tx_buf) {
  105. if (spi_ark->is_arke)
  106. val = *(u8 *)spi_ark->tx_buf;
  107. else
  108. val = *(u8 *)spi_ark->tx_buf << 24;
  109. spi_ark->tx_buf += 1;
  110. }
  111. spi_ark->count -= 1;
  112. writel(val, spi_ark->base + ARK_ECSPI_TXDATA);
  113. }
  114. static void spi_ark_buf_tx_u16(struct spi_ark_data *spi_ark)
  115. {
  116. u32 val = 0;
  117. if (spi_ark->tx_buf) {
  118. if (spi_ark->is_arke)
  119. val = *(u16 *)spi_ark->tx_buf;
  120. else
  121. val = *(u16 *)spi_ark->tx_buf << 16;
  122. spi_ark->tx_buf += 2;
  123. }
  124. spi_ark->count -=2;
  125. writel(val, spi_ark->base + ARK_ECSPI_TXDATA);
  126. }
  127. static int spi_ark_bytes_per_word(const int bits_per_word)
  128. {
  129. return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE);
  130. }
  131. static bool spi_ark_can_dma(struct spi_master *master, struct spi_device *spi,
  132. struct spi_transfer *transfer)
  133. {
  134. struct spi_ark_data *spi_ark = spi_master_get_devdata(master);
  135. const u32 mszs[] = {1, 4, 8, 16};
  136. int idx = ARRAY_SIZE(mszs) - 1;
  137. struct sg_table *tx;
  138. struct sg_table *rx;
  139. struct spi_transfer *dma_xfer = &spi_ark->dma_transfer;
  140. struct spi_transfer *pio_xfer = &spi_ark->pio_transfer;
  141. int len, remainder;
  142. if (!master->dma_rx)
  143. return false;
  144. pio_xfer->len = 0;
  145. memcpy(dma_xfer, transfer, sizeof(struct spi_transfer));
  146. tx = &dma_xfer->tx_sg;
  147. rx = &dma_xfer->rx_sg;
  148. remainder = transfer->len & 3;
  149. len = transfer->len - remainder;
  150. if (len < USE_DMA_THRESHOLD)
  151. return false;
  152. if (remainder) {
  153. if (tx->nents && tx->sgl[tx->nents - 1].length > remainder &&
  154. rx->nents && rx->sgl[rx->nents - 1].length > remainder) {
  155. tx->sgl[tx->nents - 1].length -= remainder;
  156. rx->sgl[rx->nents - 1].length -= remainder;
  157. dma_xfer->len = len;
  158. memcpy(pio_xfer, transfer, sizeof(struct spi_transfer));
  159. pio_xfer->len = remainder;
  160. if (pio_xfer->tx_buf)
  161. pio_xfer->tx_buf += len;
  162. if (pio_xfer->rx_buf)
  163. pio_xfer->rx_buf += len;
  164. } else return false;
  165. }
  166. /* dw dma busrt should be 16,8,4,1 */
  167. for (; idx >= 0; idx--) {
  168. if (!(len % (mszs[idx] * 4)))
  169. break;
  170. }
  171. spi_ark->wml = mszs[idx];
  172. return true;
  173. }
  174. #define ARK_ECSPI_CTRL 0x08
  175. #define ARK_ECSPI_CTRL_ENABLE (1 << 0)
  176. #define ARK_ECSPI_CTRL_XCH (1 << 2)
  177. #define ARK_ECSPI_CTRL_SMC (1 << 3)
  178. #define ARK_ECSPI_CTRL_MODE_MASK (0xf << 4)
  179. #define ARK_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
  180. #define ARK_ECSPI_CTRL_POSTDIV_OFFSET 8
  181. #define ARK_ECSPI_CTRL_PREDIV_OFFSET 12
  182. #define ARK_ECSPI_CTRL_CS(cs) ((cs) << 18)
  183. #define ARK_ECSPI_CTRL_BL_OFFSET 20
  184. #define ARK_ECSPI_CTRL_BL_MASK (0xfff << 20)
  185. #define ARK_ECSPI_CONFIG 0x0c
  186. #define ARK_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
  187. #define ARK_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
  188. #define ARK_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
  189. #define ARK_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
  190. #define ARK_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
  191. #define ARK_ECSPI_INT 0x10
  192. #define ARK_ECSPI_INT_TEEN (1 << 0)
  193. #define ARK_ECSPI_INT_RREN (1 << 3)
  194. #define ARK_ECSPI_DMA 0x14
  195. #define ARK_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
  196. #define ARK_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
  197. #define ARK_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
  198. #define ARK_ECSPI_DMA_TEDEN (1 << 7)
  199. #define ARK_ECSPI_DMA_RXDEN (1 << 23)
  200. #define ARK_ECSPI_DMA_RXTDEN (1 << 31)
  201. #define ARK_ECSPI_STAT 0x18
  202. #define ARK_ECSPI_STAT_REN (1 << 8)
  203. #define ARK_ECSPI_STAT_RR (1 << 3)
  204. #define ARK_ECSPI_TESTREG 0x20
  205. #define ARK_ECSPI_TESTREG_LBC BIT(31)
  206. static void spi_ark_buf_rx_swap_u32(struct spi_ark_data *spi_ark)
  207. {
  208. unsigned int val = readl(spi_ark->base + ARK_ECSPI_RXDATA);
  209. if (spi_ark->rx_buf) {
  210. val &= spi_ark->word_mask;
  211. *(u32 *)spi_ark->rx_buf = val;
  212. spi_ark->rx_buf += sizeof(u32);
  213. }
  214. }
  215. static void spi_ark_buf_rx_swap(struct spi_ark_data *spi_ark)
  216. {
  217. unsigned int bytes_per_word;
  218. bytes_per_word = spi_ark_bytes_per_word(spi_ark->bits_per_word);
  219. if (spi_ark->read_u32) {
  220. spi_ark_buf_rx_swap_u32(spi_ark);
  221. return;
  222. }
  223. if (bytes_per_word == 1)
  224. spi_ark_buf_rx_u8(spi_ark);
  225. else if (bytes_per_word == 2)
  226. spi_ark_buf_rx_u16(spi_ark);
  227. }
  228. static void spi_ark_buf_tx_swap_u32(struct spi_ark_data *spi_ark)
  229. {
  230. u32 val = 0;
  231. if (spi_ark->tx_buf) {
  232. val = *(u32 *)spi_ark->tx_buf;
  233. val &= spi_ark->word_mask;
  234. spi_ark->tx_buf += sizeof(u32);
  235. }
  236. spi_ark->count -= sizeof(u32);
  237. writel(val, spi_ark->base + ARK_ECSPI_TXDATA);
  238. }
  239. static void spi_ark_buf_tx_swap(struct spi_ark_data *spi_ark)
  240. {
  241. u32 ctrl, val;
  242. unsigned int bytes_per_word;
  243. if (spi_ark->count == spi_ark->remainder) {
  244. ctrl = readl(spi_ark->base + ARK_ECSPI_CTRL);
  245. ctrl &= ~ARK_ECSPI_CTRL_BL_MASK;
  246. if (spi_ark->count > ARK_ECSPI_CTRL_MAX_BURST) {
  247. spi_ark->remainder = spi_ark->count %
  248. ARK_ECSPI_CTRL_MAX_BURST;
  249. val = ARK_ECSPI_CTRL_MAX_BURST * 8 - 1;
  250. } else if (spi_ark->count >= sizeof(u32)) {
  251. spi_ark->remainder = spi_ark->count % sizeof(u32);
  252. val = (spi_ark->count - spi_ark->remainder) * 8 - 1;
  253. } else {
  254. spi_ark->remainder = 0;
  255. val = spi_ark->bits_per_word - 1;
  256. spi_ark->read_u32 = 0;
  257. }
  258. ctrl |= (val << ARK_ECSPI_CTRL_BL_OFFSET);
  259. writel(ctrl, spi_ark->base + ARK_ECSPI_CTRL);
  260. }
  261. if (spi_ark->count >= sizeof(u32)) {
  262. spi_ark_buf_tx_swap_u32(spi_ark);
  263. return;
  264. }
  265. bytes_per_word = spi_ark_bytes_per_word(spi_ark->bits_per_word);
  266. if (bytes_per_word == 1)
  267. spi_ark_buf_tx_u8(spi_ark);
  268. else if (bytes_per_word == 2)
  269. spi_ark_buf_tx_u16(spi_ark);
  270. }
  271. /* ARK eCSPI */
  272. static unsigned int ark_ecspi_clkdiv(struct spi_ark_data *spi_ark,
  273. unsigned int fspi, unsigned int *fres)
  274. {
  275. /*
  276. * there are two 4-bit dividers, the pre-divider divides by
  277. * $pre, the post-divider by 2^$post
  278. */
  279. unsigned int pre, post;
  280. unsigned int fin = spi_ark->spi_clk;
  281. if (unlikely(fspi > fin))
  282. return 0;
  283. post = fls(fin) - fls(fspi);
  284. if (fin > fspi << post)
  285. post++;
  286. /* now we have: (fin <= fspi << post) with post being minimal */
  287. post = max(4U, post) - 4;
  288. if (unlikely(post > 0xf)) {
  289. dev_err(spi_ark->dev, "cannot set clock freq: %u (base freq: %u)\n",
  290. fspi, fin);
  291. return 0xff;
  292. }
  293. pre = DIV_ROUND_UP(fin, fspi << post) - 1;
  294. dev_dbg(spi_ark->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
  295. __func__, fin, fspi, post, pre);
  296. /* Resulting frequency for the SCLK line. */
  297. *fres = (fin / (pre + 1)) >> post;
  298. return (pre << ARK_ECSPI_CTRL_PREDIV_OFFSET) |
  299. (post << ARK_ECSPI_CTRL_POSTDIV_OFFSET);
  300. }
  301. static void ark_ecspi_intctrl(struct spi_ark_data *spi_ark, int enable)
  302. {
  303. unsigned val = 0;
  304. if (enable & ARK_INT_TE)
  305. val |= ARK_ECSPI_INT_TEEN;
  306. if (enable & ARK_INT_RR)
  307. val |= ARK_ECSPI_INT_RREN;
  308. writel(val, spi_ark->base + ARK_ECSPI_INT);
  309. }
  310. static void ark_ecspi_trigger(struct spi_ark_data *spi_ark)
  311. {
  312. u32 reg;
  313. reg = readl(spi_ark->base + ARK_ECSPI_CTRL);
  314. reg |= ARK_ECSPI_CTRL_XCH;
  315. writel(reg, spi_ark->base + ARK_ECSPI_CTRL);
  316. }
  317. static int ark_ecspi_config(struct spi_device *spi)
  318. {
  319. struct spi_ark_data *spi_ark = spi_master_get_devdata(spi->master);
  320. u32 ctrl = ARK_ECSPI_CTRL_ENABLE;
  321. u32 clk = spi_ark->speed_hz, delay, reg;
  322. u32 cfg = readl(spi_ark->base + ARK_ECSPI_CONFIG);
  323. /*
  324. * The hardware seems to have a race condition when changing modes. The
  325. * current assumption is that the selection of the channel arrives
  326. * earlier in the hardware than the mode bits when they are written at
  327. * the same time.
  328. * So set master mode for all channels as we do not support slave mode.
  329. */
  330. ctrl |= ARK_ECSPI_CTRL_MODE_MASK;
  331. /*
  332. * Enable SPI_RDY handling (falling edge/level triggered).
  333. */
  334. if (spi->mode & SPI_READY)
  335. ctrl |= ARK_ECSPI_CTRL_DRCTL(spi_ark->spi_drctl);
  336. /* set clock speed */
  337. ctrl |= ark_ecspi_clkdiv(spi_ark, spi_ark->speed_hz, &clk);
  338. spi_ark->spi_bus_clk = clk;
  339. /* set chip select to use */
  340. ctrl |= ARK_ECSPI_CTRL_CS(spi->chip_select);
  341. ctrl |= (spi_ark->bits_per_word - 1) << ARK_ECSPI_CTRL_BL_OFFSET;
  342. cfg |= ARK_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
  343. if (spi->mode & SPI_CPHA)
  344. cfg |= ARK_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
  345. else
  346. cfg &= ~ARK_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
  347. if (spi->mode & SPI_CPOL) {
  348. cfg |= ARK_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
  349. cfg |= ARK_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
  350. } else {
  351. cfg &= ~ARK_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
  352. cfg &= ~ARK_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
  353. }
  354. if (spi->mode & SPI_CS_HIGH)
  355. cfg |= ARK_ECSPI_CONFIG_SSBPOL(spi->chip_select);
  356. else
  357. cfg &= ~ARK_ECSPI_CONFIG_SSBPOL(spi->chip_select);
  358. if (spi_ark->usedma) {
  359. ctrl |= ARK_ECSPI_CTRL_SMC;
  360. }
  361. /* CTRL register always go first to bring out controller from reset */
  362. writel(ctrl, spi_ark->base + ARK_ECSPI_CTRL);
  363. reg = readl(spi_ark->base + ARK_ECSPI_TESTREG);
  364. if (spi->mode & SPI_LOOP)
  365. reg |= ARK_ECSPI_TESTREG_LBC;
  366. else
  367. reg &= ~ARK_ECSPI_TESTREG_LBC;
  368. writel(reg, spi_ark->base + ARK_ECSPI_TESTREG);
  369. writel(cfg, spi_ark->base + ARK_ECSPI_CONFIG);
  370. /*
  371. * Wait until the changes in the configuration register CONFIGREG
  372. * propagate into the hardware. It takes exactly one tick of the
  373. * SCLK clock, but we will wait two SCLK clock just to be sure. The
  374. * effect of the delay it takes for the hardware to apply changes
  375. * is noticable if the SCLK clock run very slow. In such a case, if
  376. * the polarity of SCLK should be inverted, the GPIO ChipSelect might
  377. * be asserted before the SCLK polarity changes, which would disrupt
  378. * the SPI communication as the device on the other end would consider
  379. * the change of SCLK polarity as a clock tick already.
  380. */
  381. delay = (2 * 1000000) / clk;
  382. if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
  383. udelay(delay);
  384. else /* SCLK is _very_ slow */
  385. usleep_range(delay, delay + 10);
  386. /* enable rx fifo */
  387. writel(ARK_ECSPI_STAT_REN, spi_ark->base + ARK_ECSPI_STAT);
  388. /*
  389. * Configure the DMA register: setup the watermark
  390. * and enable DMA request.
  391. */
  392. if (spi_ark->usedma)
  393. writel(ARK_ECSPI_DMA_RX_WML(spi_ark->wml) |
  394. ARK_ECSPI_DMA_TX_WML(spi_ark->wml) |
  395. ARK_ECSPI_DMA_RXT_WML(spi_ark->wml) |
  396. ARK_ECSPI_DMA_TEDEN | ARK_ECSPI_DMA_RXDEN |
  397. ARK_ECSPI_DMA_RXTDEN, spi_ark->base + ARK_ECSPI_DMA);
  398. else writel(0, spi_ark->base + ARK_ECSPI_DMA);
  399. return 0;
  400. }
  401. static int ark_ecspi_rx_available(struct spi_ark_data *spi_ark)
  402. {
  403. return readl(spi_ark->base + ARK_ECSPI_STAT) & ARK_ECSPI_STAT_RR;
  404. }
  405. static void ark_ecspi_reset(struct spi_ark_data *spi_ark)
  406. {
  407. /* drain receive buffer */
  408. while (ark_ecspi_rx_available(spi_ark))
  409. readl(spi_ark->base + ARK_ECSPI_RXDATA);
  410. }
  411. static struct spi_ark_devtype_data ark_ecspi_devtype_data = {
  412. .intctrl = ark_ecspi_intctrl,
  413. .config = ark_ecspi_config,
  414. .trigger = ark_ecspi_trigger,
  415. .rx_available = ark_ecspi_rx_available,
  416. .reset = ark_ecspi_reset,
  417. .fifo_size = 64,
  418. .has_dmamode = true,
  419. };
  420. static const struct of_device_id spi_ark_dt_ids[] = {
  421. { .compatible = "arkmicro,ark-ecspi", .data = &ark_ecspi_devtype_data, },
  422. { .compatible = "arkmicro,arke-ecspi", .data = &ark_ecspi_devtype_data, },
  423. { /* sentinel */ }
  424. };
  425. MODULE_DEVICE_TABLE(of, spi_ark_dt_ids);
  426. static void spi_ark_chipselect(struct spi_device *spi, int is_active)
  427. {
  428. int active = is_active != BITBANG_CS_INACTIVE;
  429. int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
  430. if (spi->mode & SPI_NO_CS)
  431. return;
  432. if (!gpio_is_valid(spi->cs_gpio))
  433. return;
  434. gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
  435. }
  436. static void spi_ark_push(struct spi_ark_data *spi_ark)
  437. {
  438. while (spi_ark->txfifo < spi_ark->devtype_data->fifo_size) {
  439. if (!spi_ark->count)
  440. break;
  441. if (spi_ark->txfifo && (spi_ark->count == spi_ark->remainder))
  442. break;
  443. spi_ark->tx(spi_ark);
  444. spi_ark->txfifo++;
  445. }
  446. spi_ark->devtype_data->trigger(spi_ark);
  447. }
  448. static irqreturn_t spi_ark_isr(int irq, void *dev_id)
  449. {
  450. struct spi_ark_data *spi_ark = dev_id;
  451. while (spi_ark->devtype_data->rx_available(spi_ark)) {
  452. spi_ark->rx(spi_ark);
  453. spi_ark->txfifo--;
  454. }
  455. if (spi_ark->count) {
  456. spi_ark_push(spi_ark);
  457. return IRQ_HANDLED;
  458. }
  459. if (spi_ark->txfifo) {
  460. /* No data left to push, but still waiting for rx data,
  461. * enable receive data available interrupt.
  462. */
  463. spi_ark->devtype_data->intctrl(
  464. spi_ark, ARK_INT_RR);
  465. return IRQ_HANDLED;
  466. }
  467. spi_ark->devtype_data->intctrl(spi_ark, 0);
  468. complete(&spi_ark->xfer_done);
  469. return IRQ_HANDLED;
  470. }
  471. static int spi_ark_dma_configure(struct spi_master *master)
  472. {
  473. int ret;
  474. struct dma_slave_config rx = {}, tx = {};
  475. struct spi_ark_data *spi_ark = spi_master_get_devdata(master);
  476. spi_ark->bits_per_word = 32;
  477. tx.direction = DMA_MEM_TO_DEV;
  478. tx.dst_addr = spi_ark->base_phys + ARK_ECSPI_TXDATA;
  479. tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  480. tx.dst_maxburst = spi_ark->wml;
  481. tx.device_fc = false;
  482. ret = dmaengine_slave_config(master->dma_tx, &tx);
  483. if (ret) {
  484. dev_err(spi_ark->dev, "TX dma configuration failed with %d\n", ret);
  485. return ret;
  486. }
  487. rx.direction = DMA_DEV_TO_MEM;
  488. rx.src_addr = spi_ark->base_phys + ARK_ECSPI_RXDATA;
  489. rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  490. rx.src_maxburst = spi_ark->wml;
  491. rx.device_fc = false;
  492. ret = dmaengine_slave_config(master->dma_rx, &rx);
  493. if (ret) {
  494. dev_err(spi_ark->dev, "RX dma configuration failed with %d\n", ret);
  495. return ret;
  496. }
  497. return 0;
  498. }
  499. static int spi_ark_setupxfer(struct spi_device *spi,
  500. struct spi_transfer *t)
  501. {
  502. struct spi_ark_data *spi_ark = spi_master_get_devdata(spi->master);
  503. u32 mask;
  504. int ret;
  505. if (!t)
  506. return 0;
  507. spi_ark->bits_per_word = t->bits_per_word;
  508. spi_ark->speed_hz = t->speed_hz;
  509. /* Initialize the functions for transfer */
  510. spi_ark->remainder = 0;
  511. spi_ark->read_u32 = 1;
  512. mask = (1 << spi_ark->bits_per_word) - 1;
  513. spi_ark->rx = spi_ark_buf_rx_swap;
  514. spi_ark->tx = spi_ark_buf_tx_swap;
  515. spi_ark->remainder = t->len;
  516. if (spi_ark->bits_per_word <= 8)
  517. spi_ark->word_mask = mask << 24 | mask << 16
  518. | mask << 8 | mask;
  519. else if (spi_ark->bits_per_word <= 16)
  520. spi_ark->word_mask = mask << 16 | mask;
  521. else
  522. spi_ark->word_mask = mask;
  523. if (spi_ark_can_dma(spi_ark->bitbang.master, spi, t))
  524. spi_ark->usedma = 1;
  525. else
  526. spi_ark->usedma = 0;
  527. if (spi_ark->usedma) {
  528. ret = spi_ark_dma_configure(spi->master);
  529. if (ret)
  530. return ret;
  531. }
  532. spi_ark->devtype_data->config(spi);
  533. return 0;
  534. }
  535. static void spi_ark_sdma_exit(struct spi_ark_data *spi_ark)
  536. {
  537. struct spi_master *master = spi_ark->bitbang.master;
  538. if (master->dma_rx) {
  539. dma_release_channel(master->dma_rx);
  540. master->dma_rx = NULL;
  541. }
  542. if (master->dma_tx) {
  543. dma_release_channel(master->dma_tx);
  544. master->dma_tx = NULL;
  545. }
  546. }
  547. static int spi_ark_sdma_init(struct device *dev, struct spi_ark_data *spi_ark,
  548. struct spi_master *master)
  549. {
  550. int ret;
  551. spi_ark->wml = spi_ark->devtype_data->fifo_size / 2;
  552. /* Prepare for TX DMA: */
  553. master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
  554. if (IS_ERR(master->dma_tx)) {
  555. ret = PTR_ERR(master->dma_tx);
  556. dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
  557. master->dma_tx = NULL;
  558. goto err;
  559. }
  560. /* Prepare for RX : */
  561. master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
  562. if (IS_ERR(master->dma_rx)) {
  563. ret = PTR_ERR(master->dma_rx);
  564. dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
  565. master->dma_rx = NULL;
  566. goto err;
  567. }
  568. init_completion(&spi_ark->dma_rx_completion);
  569. init_completion(&spi_ark->dma_tx_completion);
  570. master->can_dma = spi_ark_can_dma;
  571. master->max_dma_len = MAX_SDMA_BD_BYTES;
  572. spi_ark->bitbang.master->flags = SPI_MASTER_MUST_RX |
  573. SPI_MASTER_MUST_TX;
  574. return 0;
  575. err:
  576. spi_ark_sdma_exit(spi_ark);
  577. return ret;
  578. }
  579. static void spi_ark_dma_rx_callback(void *cookie)
  580. {
  581. struct spi_ark_data *spi_ark = (struct spi_ark_data *)cookie;
  582. /* Invalidate cache after read */
  583. dma_sync_sg_for_cpu(spi_ark->dev,
  584. spi_ark->dma_rx_sg->sgl,
  585. spi_ark->dma_rx_sg->nents,
  586. DMA_FROM_DEVICE);
  587. complete(&spi_ark->dma_rx_completion);
  588. }
  589. static void spi_ark_dma_tx_callback(void *cookie)
  590. {
  591. struct spi_ark_data *spi_ark = (struct spi_ark_data *)cookie;
  592. complete(&spi_ark->dma_tx_completion);
  593. }
  594. static int spi_ark_calculate_timeout(struct spi_ark_data *spi_ark, int size)
  595. {
  596. unsigned long timeout = 0;
  597. /* Time with actual data transfer and CS change delay related to HW */
  598. timeout = (8 + 4) * size / spi_ark->spi_bus_clk;
  599. /* Add extra second for scheduler related activities */
  600. timeout += 1;
  601. /* Double calculated timeout */
  602. return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
  603. }
  604. static int spi_ark_dma_transfer(struct spi_ark_data *spi_ark,
  605. struct spi_transfer *transfer)
  606. {
  607. struct dma_async_tx_descriptor *desc_tx, *desc_rx;
  608. unsigned long transfer_timeout;
  609. unsigned long timeout;
  610. struct spi_master *master = spi_ark->bitbang.master;
  611. struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
  612. /*
  613. * The TX DMA setup starts the transfer, so make sure RX is configured
  614. * before TX.
  615. */
  616. spi_ark->dma_rx_sg = rx;
  617. desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
  618. rx->sgl, rx->nents, DMA_DEV_TO_MEM,
  619. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  620. if (!desc_rx)
  621. return -EINVAL;
  622. desc_rx->callback = spi_ark_dma_rx_callback;
  623. desc_rx->callback_param = (void *)spi_ark;
  624. dmaengine_submit(desc_rx);
  625. reinit_completion(&spi_ark->dma_rx_completion);
  626. dma_async_issue_pending(master->dma_rx);
  627. desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
  628. tx->sgl, tx->nents, DMA_MEM_TO_DEV,
  629. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  630. if (!desc_tx) {
  631. dmaengine_terminate_all(master->dma_tx);
  632. return -EINVAL;
  633. }
  634. desc_tx->callback = spi_ark_dma_tx_callback;
  635. desc_tx->callback_param = (void *)spi_ark;
  636. dmaengine_submit(desc_tx);
  637. reinit_completion(&spi_ark->dma_tx_completion);
  638. /* Flush cache before write */
  639. dma_sync_sg_for_device(spi_ark->dev, tx->sgl,
  640. tx->nents, DMA_TO_DEVICE);
  641. dma_async_issue_pending(master->dma_tx);
  642. transfer_timeout = spi_ark_calculate_timeout(spi_ark, transfer->len);
  643. /* Wait SDMA to finish the data transfer.*/
  644. timeout = wait_for_completion_timeout(&spi_ark->dma_tx_completion,
  645. transfer_timeout);
  646. if (!timeout) {
  647. dev_err(spi_ark->dev, "I/O Error in DMA TX\n");
  648. dmaengine_terminate_all(master->dma_tx);
  649. dmaengine_terminate_all(master->dma_rx);
  650. return -ETIMEDOUT;
  651. }
  652. timeout = wait_for_completion_timeout(&spi_ark->dma_rx_completion,
  653. transfer_timeout);
  654. if (!timeout) {
  655. dev_err(&master->dev, "I/O Error in DMA RX\n");
  656. spi_ark->devtype_data->reset(spi_ark);
  657. dmaengine_terminate_all(master->dma_rx);
  658. return -ETIMEDOUT;
  659. }
  660. return transfer->len;
  661. }
  662. static int spi_ark_pio_xfer(struct spi_device *spi,
  663. struct spi_transfer *transfer)
  664. {
  665. struct spi_ark_data *spi_ark = spi_master_get_devdata(spi->master);
  666. unsigned long transfer_timeout;
  667. unsigned long timeout;
  668. spi_ark->tx_buf = transfer->tx_buf;
  669. spi_ark->rx_buf = transfer->rx_buf;
  670. spi_ark->count = transfer->len;
  671. spi_ark->txfifo = 0;
  672. reinit_completion(&spi_ark->xfer_done);
  673. spi_ark_push(spi_ark);
  674. spi_ark->devtype_data->intctrl(spi_ark, ARK_INT_TE);
  675. transfer_timeout = spi_ark_calculate_timeout(spi_ark, transfer->len);
  676. timeout = wait_for_completion_timeout(&spi_ark->xfer_done,
  677. transfer_timeout);
  678. if (!timeout) {
  679. dev_err(&spi->dev, "I/O Error in PIO\n");
  680. spi_ark->devtype_data->reset(spi_ark);
  681. return -ETIMEDOUT;
  682. }
  683. return transfer->len;
  684. }
  685. static int spi_ark_transfer(struct spi_device *spi,
  686. struct spi_transfer *transfer)
  687. {
  688. struct spi_ark_data *spi_ark = spi_master_get_devdata(spi->master);
  689. int ret;
  690. if (spi_ark->usedma) {
  691. if ((ret = spi_ark_dma_transfer(spi_ark, &spi_ark->dma_transfer)) < 0)
  692. return ret;
  693. if (spi_ark->pio_transfer.len > 0 &&
  694. (ret = spi_ark_pio_xfer(spi, &spi_ark->pio_transfer)) < 0)
  695. return ret;
  696. return transfer->len;
  697. }
  698. else {
  699. return spi_ark_pio_xfer(spi, transfer);
  700. }
  701. }
  702. static int spi_ark_setup(struct spi_device *spi)
  703. {
  704. dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
  705. spi->mode, spi->bits_per_word, spi->max_speed_hz);
  706. if (spi->mode & SPI_NO_CS)
  707. return 0;
  708. if (gpio_is_valid(spi->cs_gpio))
  709. gpio_direction_output(spi->cs_gpio,
  710. spi->mode & SPI_CS_HIGH ? 0 : 1);
  711. spi_ark_chipselect(spi, BITBANG_CS_INACTIVE);
  712. return 0;
  713. }
  714. static void spi_ark_cleanup(struct spi_device *spi)
  715. {
  716. }
  717. static int
  718. spi_ark_prepare_message(struct spi_master *master, struct spi_message *msg)
  719. {
  720. struct spi_ark_data *spi_ark = spi_master_get_devdata(master);
  721. int ret;
  722. ret = clk_enable(spi_ark->clk_per);
  723. if (ret)
  724. return ret;
  725. ret = clk_enable(spi_ark->clk_ipg);
  726. if (ret) {
  727. clk_disable(spi_ark->clk_per);
  728. return ret;
  729. }
  730. return 0;
  731. }
  732. static int
  733. spi_ark_unprepare_message(struct spi_master *master, struct spi_message *msg)
  734. {
  735. struct spi_ark_data *spi_ark = spi_master_get_devdata(master);
  736. clk_disable(spi_ark->clk_ipg);
  737. clk_disable(spi_ark->clk_per);
  738. return 0;
  739. }
  740. static int spi_ark_probe(struct platform_device *pdev)
  741. {
  742. struct device_node *np = pdev->dev.of_node;
  743. const struct of_device_id *of_id =
  744. of_match_device(spi_ark_dt_ids, &pdev->dev);
  745. struct spi_master *master;
  746. struct spi_ark_data *spi_ark;
  747. struct resource *res;
  748. int i, ret, irq, spi_drctl;
  749. u32 num_chipselect, cs_gpio;
  750. if (!np) {
  751. dev_err(&pdev->dev, "can't get the platform data\n");
  752. return -EINVAL;
  753. }
  754. master = spi_alloc_master(&pdev->dev, sizeof(struct spi_ark_data));
  755. if (!master)
  756. return -ENOMEM;
  757. ret = of_property_read_u32(np, "ark,spi-rdy-drctl", &spi_drctl);
  758. if ((ret < 0) || (spi_drctl >= 0x3)) {
  759. /* '11' is reserved */
  760. spi_drctl = 0;
  761. }
  762. platform_set_drvdata(pdev, master);
  763. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
  764. master->bus_num = np ? -1 : pdev->id;
  765. spi_ark = spi_master_get_devdata(master);
  766. spi_ark->bitbang.master = master;
  767. spi_ark->dev = &pdev->dev;
  768. spi_ark->devtype_data = of_id ? of_id->data :
  769. (struct spi_ark_devtype_data *)pdev->id_entry->driver_data;
  770. ret = of_property_read_u32(np, "num-chipselect", &num_chipselect);
  771. if (ret < 0) {
  772. dev_err(&pdev->dev, "can't get num-chipselect: %d\n", ret);
  773. goto out_master_put;
  774. }
  775. master->num_chipselect = num_chipselect;
  776. master->cs_gpios = devm_kzalloc(&master->dev,
  777. sizeof(int) * master->num_chipselect, GFP_KERNEL);
  778. if (!master->cs_gpios)
  779. return -ENOMEM;
  780. if (of_device_is_compatible(pdev->dev.of_node, "arkmicro,arke-ecspi"))
  781. spi_ark->is_arke = true;
  782. else
  783. spi_ark->is_arke = false;
  784. for (i = 0; i < master->num_chipselect; i++) {
  785. of_property_read_u32_index(np, "chipselects", i, &cs_gpio);
  786. master->cs_gpios[i] = cs_gpio;
  787. }
  788. spi_ark->bitbang.chipselect = spi_ark_chipselect;
  789. spi_ark->bitbang.setup_transfer = spi_ark_setupxfer;
  790. spi_ark->bitbang.txrx_bufs = spi_ark_transfer;
  791. spi_ark->bitbang.master->setup = spi_ark_setup;
  792. spi_ark->bitbang.master->cleanup = spi_ark_cleanup;
  793. spi_ark->bitbang.master->prepare_message = spi_ark_prepare_message;
  794. spi_ark->bitbang.master->unprepare_message = spi_ark_unprepare_message;
  795. spi_ark->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
  796. | SPI_NO_CS;
  797. spi_ark->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
  798. spi_ark->spi_drctl = spi_drctl;
  799. init_completion(&spi_ark->xfer_done);
  800. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  801. spi_ark->base = devm_ioremap_resource(&pdev->dev, res);
  802. if (IS_ERR(spi_ark->base)) {
  803. ret = PTR_ERR(spi_ark->base);
  804. goto out_master_put;
  805. }
  806. spi_ark->base_phys = res->start;
  807. irq = platform_get_irq(pdev, 0);
  808. if (irq < 0) {
  809. ret = irq;
  810. goto out_master_put;
  811. }
  812. ret = devm_request_irq(&pdev->dev, irq, spi_ark_isr, 0,
  813. dev_name(&pdev->dev), spi_ark);
  814. if (ret) {
  815. dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
  816. goto out_master_put;
  817. }
  818. spi_ark->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
  819. if (IS_ERR(spi_ark->clk_ipg)) {
  820. ret = PTR_ERR(spi_ark->clk_ipg);
  821. goto out_master_put;
  822. }
  823. spi_ark->clk_per = devm_clk_get(&pdev->dev, "per");
  824. if (IS_ERR(spi_ark->clk_per)) {
  825. ret = PTR_ERR(spi_ark->clk_per);
  826. goto out_master_put;
  827. }
  828. ret = clk_prepare_enable(spi_ark->clk_per);
  829. if (ret)
  830. goto out_master_put;
  831. ret = clk_prepare_enable(spi_ark->clk_ipg);
  832. if (ret)
  833. goto out_put_per;
  834. spi_ark->spi_clk = clk_get_rate(spi_ark->clk_per);
  835. if (spi_ark->devtype_data->has_dmamode) {
  836. ret = spi_ark_sdma_init(&pdev->dev, spi_ark, master);
  837. if (ret == -EPROBE_DEFER)
  838. goto out_clk_put;
  839. if (ret < 0)
  840. dev_err(&pdev->dev, "dma setup error %d, use pio\n",
  841. ret);
  842. }
  843. spi_ark->devtype_data->reset(spi_ark);
  844. spi_ark->devtype_data->intctrl(spi_ark, 0);
  845. master->dev.of_node = pdev->dev.of_node;
  846. ret = spi_bitbang_start(&spi_ark->bitbang);
  847. if (ret) {
  848. dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
  849. goto out_clk_put;
  850. }
  851. if (!master->cs_gpios) {
  852. dev_err(&pdev->dev, "No CS GPIOs available\n");
  853. ret = -EINVAL;
  854. goto out_clk_put;
  855. }
  856. for (i = 0; i < master->num_chipselect; i++) {
  857. if (!gpio_is_valid(master->cs_gpios[i]))
  858. continue;
  859. ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
  860. DRIVER_NAME);
  861. if (ret) {
  862. dev_err(&pdev->dev, "Can't get CS GPIO %i err = %d.\n",
  863. master->cs_gpios[i], ret);
  864. goto out_clk_put;
  865. }
  866. }
  867. dev_info(&pdev->dev, "probed\n");
  868. clk_disable(spi_ark->clk_ipg);
  869. clk_disable(spi_ark->clk_per);
  870. return ret;
  871. out_clk_put:
  872. clk_disable_unprepare(spi_ark->clk_ipg);
  873. out_put_per:
  874. clk_disable_unprepare(spi_ark->clk_per);
  875. out_master_put:
  876. spi_master_put(master);
  877. return ret;
  878. }
  879. static int spi_ark_remove(struct platform_device *pdev)
  880. {
  881. struct spi_master *master = platform_get_drvdata(pdev);
  882. struct spi_ark_data *spi_ark = spi_master_get_devdata(master);
  883. int ret;
  884. spi_bitbang_stop(&spi_ark->bitbang);
  885. ret = clk_enable(spi_ark->clk_per);
  886. if (ret)
  887. return ret;
  888. ret = clk_enable(spi_ark->clk_ipg);
  889. if (ret) {
  890. clk_disable(spi_ark->clk_per);
  891. return ret;
  892. }
  893. writel(0, spi_ark->base + ARK_ECSPI_CTRL);
  894. clk_disable_unprepare(spi_ark->clk_ipg);
  895. clk_disable_unprepare(spi_ark->clk_per);
  896. spi_ark_sdma_exit(spi_ark);
  897. spi_master_put(master);
  898. return 0;
  899. }
  900. static struct platform_driver spi_ark_driver = {
  901. .driver = {
  902. .name = DRIVER_NAME,
  903. .of_match_table = spi_ark_dt_ids,
  904. },
  905. .probe = spi_ark_probe,
  906. .remove = spi_ark_remove,
  907. };
  908. module_platform_driver(spi_ark_driver);
  909. MODULE_DESCRIPTION("SPI Master Controller driver");
  910. MODULE_AUTHOR("Sim");
  911. MODULE_LICENSE("GPL v2");
  912. MODULE_ALIAS("platform:" DRIVER_NAME);