spi-s3c64xx.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright (c) 2009 Samsung Electronics Co., Ltd.
  4. // Jaswinder Singh <jassi.brar@samsung.com>
  5. #include <linux/init.h>
  6. #include <linux/module.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/delay.h>
  9. #include <linux/clk.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/pm_runtime.h>
  14. #include <linux/spi/spi.h>
  15. #include <linux/gpio.h>
  16. #include <linux/of.h>
  17. #include <linux/of_gpio.h>
  18. #include <linux/platform_data/spi-s3c64xx.h>
  19. #define MAX_SPI_PORTS 6
  20. #define S3C64XX_SPI_QUIRK_POLL (1 << 0)
  21. #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
  22. #define AUTOSUSPEND_TIMEOUT 2000
  23. /* Registers and bit-fields */
  24. #define S3C64XX_SPI_CH_CFG 0x00
  25. #define S3C64XX_SPI_CLK_CFG 0x04
  26. #define S3C64XX_SPI_MODE_CFG 0x08
  27. #define S3C64XX_SPI_SLAVE_SEL 0x0C
  28. #define S3C64XX_SPI_INT_EN 0x10
  29. #define S3C64XX_SPI_STATUS 0x14
  30. #define S3C64XX_SPI_TX_DATA 0x18
  31. #define S3C64XX_SPI_RX_DATA 0x1C
  32. #define S3C64XX_SPI_PACKET_CNT 0x20
  33. #define S3C64XX_SPI_PENDING_CLR 0x24
  34. #define S3C64XX_SPI_SWAP_CFG 0x28
  35. #define S3C64XX_SPI_FB_CLK 0x2C
  36. #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
  37. #define S3C64XX_SPI_CH_SW_RST (1<<5)
  38. #define S3C64XX_SPI_CH_SLAVE (1<<4)
  39. #define S3C64XX_SPI_CPOL_L (1<<3)
  40. #define S3C64XX_SPI_CPHA_B (1<<2)
  41. #define S3C64XX_SPI_CH_RXCH_ON (1<<1)
  42. #define S3C64XX_SPI_CH_TXCH_ON (1<<0)
  43. #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
  44. #define S3C64XX_SPI_CLKSEL_SRCSHFT 9
  45. #define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
  46. #define S3C64XX_SPI_PSR_MASK 0xff
  47. #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
  48. #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
  49. #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
  50. #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
  51. #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
  52. #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
  53. #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
  54. #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
  55. #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
  56. #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
  57. #define S3C64XX_SPI_MODE_4BURST (1<<0)
  58. #define S3C64XX_SPI_SLAVE_AUTO (1<<1)
  59. #define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
  60. #define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
  61. #define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
  62. #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
  63. #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
  64. #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
  65. #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
  66. #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
  67. #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
  68. #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
  69. #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
  70. #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
  71. #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
  72. #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
  73. #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
  74. #define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
  75. #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
  76. #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
  77. #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
  78. #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
  79. #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
  80. #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
  81. #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
  82. #define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
  83. #define S3C64XX_SPI_SWAP_RX_EN (1<<4)
  84. #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
  85. #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
  86. #define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
  87. #define S3C64XX_SPI_SWAP_TX_EN (1<<0)
  88. #define S3C64XX_SPI_FBCLK_MSK (3<<0)
  89. #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
  90. #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
  91. (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
  92. #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
  93. #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
  94. FIFO_LVL_MASK(i))
  95. #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
  96. #define S3C64XX_SPI_TRAILCNT_OFF 19
  97. #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
  98. #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
  99. #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
  100. #define RXBUSY (1<<2)
  101. #define TXBUSY (1<<3)
  102. struct s3c64xx_spi_dma_data {
  103. struct dma_chan *ch;
  104. dma_cookie_t cookie;
  105. enum dma_transfer_direction direction;
  106. };
  107. /**
  108. * struct s3c64xx_spi_info - SPI Controller hardware info
  109. * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
  110. * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
  111. * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
  112. * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
  113. * @clk_from_cmu: True, if the controller does not include a clock mux and
  114. * prescaler unit.
  115. *
  116. * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
  117. * differ in some aspects such as the size of the fifo and spi bus clock
  118. * setup. Such differences are specified to the driver using this structure
  119. * which is provided as driver data to the driver.
  120. */
  121. struct s3c64xx_spi_port_config {
  122. int fifo_lvl_mask[MAX_SPI_PORTS];
  123. int rx_lvl_offset;
  124. int tx_st_done;
  125. int quirks;
  126. bool high_speed;
  127. bool clk_from_cmu;
  128. bool clk_ioclk;
  129. };
  130. /**
  131. * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
  132. * @clk: Pointer to the spi clock.
  133. * @src_clk: Pointer to the clock used to generate SPI signals.
  134. * @ioclk: Pointer to the i/o clock between master and slave
  135. * @master: Pointer to the SPI Protocol master.
  136. * @cntrlr_info: Platform specific data for the controller this driver manages.
  137. * @lock: Controller specific lock.
  138. * @state: Set of FLAGS to indicate status.
  139. * @rx_dmach: Controller's DMA channel for Rx.
  140. * @tx_dmach: Controller's DMA channel for Tx.
  141. * @sfr_start: BUS address of SPI controller regs.
  142. * @regs: Pointer to ioremap'ed controller registers.
  143. * @irq: interrupt
  144. * @xfer_completion: To indicate completion of xfer task.
  145. * @cur_mode: Stores the active configuration of the controller.
  146. * @cur_bpw: Stores the active bits per word settings.
  147. * @cur_speed: Stores the active xfer clock speed.
  148. */
  149. struct s3c64xx_spi_driver_data {
  150. void __iomem *regs;
  151. struct clk *clk;
  152. struct clk *src_clk;
  153. struct clk *ioclk;
  154. struct platform_device *pdev;
  155. struct spi_master *master;
  156. struct s3c64xx_spi_info *cntrlr_info;
  157. spinlock_t lock;
  158. unsigned long sfr_start;
  159. struct completion xfer_completion;
  160. unsigned state;
  161. unsigned cur_mode, cur_bpw;
  162. unsigned cur_speed;
  163. struct s3c64xx_spi_dma_data rx_dma;
  164. struct s3c64xx_spi_dma_data tx_dma;
  165. struct s3c64xx_spi_port_config *port_conf;
  166. unsigned int port_id;
  167. };
  168. static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
  169. {
  170. void __iomem *regs = sdd->regs;
  171. unsigned long loops;
  172. u32 val;
  173. writel(0, regs + S3C64XX_SPI_PACKET_CNT);
  174. val = readl(regs + S3C64XX_SPI_CH_CFG);
  175. val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
  176. writel(val, regs + S3C64XX_SPI_CH_CFG);
  177. val = readl(regs + S3C64XX_SPI_CH_CFG);
  178. val |= S3C64XX_SPI_CH_SW_RST;
  179. val &= ~S3C64XX_SPI_CH_HS_EN;
  180. writel(val, regs + S3C64XX_SPI_CH_CFG);
  181. /* Flush TxFIFO*/
  182. loops = msecs_to_loops(1);
  183. do {
  184. val = readl(regs + S3C64XX_SPI_STATUS);
  185. } while (TX_FIFO_LVL(val, sdd) && loops--);
  186. if (loops == 0)
  187. dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
  188. /* Flush RxFIFO*/
  189. loops = msecs_to_loops(1);
  190. do {
  191. val = readl(regs + S3C64XX_SPI_STATUS);
  192. if (RX_FIFO_LVL(val, sdd))
  193. readl(regs + S3C64XX_SPI_RX_DATA);
  194. else
  195. break;
  196. } while (loops--);
  197. if (loops == 0)
  198. dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
  199. val = readl(regs + S3C64XX_SPI_CH_CFG);
  200. val &= ~S3C64XX_SPI_CH_SW_RST;
  201. writel(val, regs + S3C64XX_SPI_CH_CFG);
  202. val = readl(regs + S3C64XX_SPI_MODE_CFG);
  203. val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
  204. writel(val, regs + S3C64XX_SPI_MODE_CFG);
  205. }
  206. static void s3c64xx_spi_dmacb(void *data)
  207. {
  208. struct s3c64xx_spi_driver_data *sdd;
  209. struct s3c64xx_spi_dma_data *dma = data;
  210. unsigned long flags;
  211. if (dma->direction == DMA_DEV_TO_MEM)
  212. sdd = container_of(data,
  213. struct s3c64xx_spi_driver_data, rx_dma);
  214. else
  215. sdd = container_of(data,
  216. struct s3c64xx_spi_driver_data, tx_dma);
  217. spin_lock_irqsave(&sdd->lock, flags);
  218. if (dma->direction == DMA_DEV_TO_MEM) {
  219. sdd->state &= ~RXBUSY;
  220. if (!(sdd->state & TXBUSY))
  221. complete(&sdd->xfer_completion);
  222. } else {
  223. sdd->state &= ~TXBUSY;
  224. if (!(sdd->state & RXBUSY))
  225. complete(&sdd->xfer_completion);
  226. }
  227. spin_unlock_irqrestore(&sdd->lock, flags);
  228. }
  229. static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
  230. struct sg_table *sgt)
  231. {
  232. struct s3c64xx_spi_driver_data *sdd;
  233. struct dma_slave_config config;
  234. struct dma_async_tx_descriptor *desc;
  235. int ret;
  236. memset(&config, 0, sizeof(config));
  237. if (dma->direction == DMA_DEV_TO_MEM) {
  238. sdd = container_of((void *)dma,
  239. struct s3c64xx_spi_driver_data, rx_dma);
  240. config.direction = dma->direction;
  241. config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
  242. config.src_addr_width = sdd->cur_bpw / 8;
  243. config.src_maxburst = 1;
  244. dmaengine_slave_config(dma->ch, &config);
  245. } else {
  246. sdd = container_of((void *)dma,
  247. struct s3c64xx_spi_driver_data, tx_dma);
  248. config.direction = dma->direction;
  249. config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
  250. config.dst_addr_width = sdd->cur_bpw / 8;
  251. config.dst_maxburst = 1;
  252. dmaengine_slave_config(dma->ch, &config);
  253. }
  254. desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
  255. dma->direction, DMA_PREP_INTERRUPT);
  256. if (!desc) {
  257. dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
  258. dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
  259. return -ENOMEM;
  260. }
  261. desc->callback = s3c64xx_spi_dmacb;
  262. desc->callback_param = dma;
  263. dma->cookie = dmaengine_submit(desc);
  264. ret = dma_submit_error(dma->cookie);
  265. if (ret) {
  266. dev_err(&sdd->pdev->dev, "DMA submission failed");
  267. return -EIO;
  268. }
  269. dma_async_issue_pending(dma->ch);
  270. return 0;
  271. }
  272. static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
  273. {
  274. struct s3c64xx_spi_driver_data *sdd =
  275. spi_master_get_devdata(spi->master);
  276. if (sdd->cntrlr_info->no_cs)
  277. return;
  278. if (enable) {
  279. if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
  280. writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
  281. } else {
  282. u32 ssel = readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL);
  283. ssel |= (S3C64XX_SPI_SLAVE_AUTO |
  284. S3C64XX_SPI_SLAVE_NSC_CNT_2);
  285. writel(ssel, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
  286. }
  287. } else {
  288. if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
  289. writel(S3C64XX_SPI_SLAVE_SIG_INACT,
  290. sdd->regs + S3C64XX_SPI_SLAVE_SEL);
  291. }
  292. }
  293. static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
  294. {
  295. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
  296. if (is_polling(sdd))
  297. return 0;
  298. spi->dma_rx = sdd->rx_dma.ch;
  299. spi->dma_tx = sdd->tx_dma.ch;
  300. return 0;
  301. }
  302. static bool s3c64xx_spi_can_dma(struct spi_master *master,
  303. struct spi_device *spi,
  304. struct spi_transfer *xfer)
  305. {
  306. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  307. return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
  308. }
  309. static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
  310. struct spi_transfer *xfer, int dma_mode)
  311. {
  312. void __iomem *regs = sdd->regs;
  313. u32 modecfg, chcfg;
  314. int ret = 0;
  315. modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
  316. modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
  317. chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
  318. chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
  319. if (dma_mode) {
  320. chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
  321. } else {
  322. /* Always shift in data in FIFO, even if xfer is Tx only,
  323. * this helps setting PCKT_CNT value for generating clocks
  324. * as exactly needed.
  325. */
  326. chcfg |= S3C64XX_SPI_CH_RXCH_ON;
  327. writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
  328. | S3C64XX_SPI_PACKET_CNT_EN,
  329. regs + S3C64XX_SPI_PACKET_CNT);
  330. }
  331. if (xfer->tx_buf != NULL) {
  332. sdd->state |= TXBUSY;
  333. chcfg |= S3C64XX_SPI_CH_TXCH_ON;
  334. if (dma_mode) {
  335. modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
  336. ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
  337. } else {
  338. switch (sdd->cur_bpw) {
  339. case 32:
  340. iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
  341. xfer->tx_buf, xfer->len / 4);
  342. break;
  343. case 16:
  344. iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
  345. xfer->tx_buf, xfer->len / 2);
  346. break;
  347. default:
  348. iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
  349. xfer->tx_buf, xfer->len);
  350. break;
  351. }
  352. }
  353. }
  354. if (xfer->rx_buf != NULL) {
  355. sdd->state |= RXBUSY;
  356. if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
  357. && !(sdd->cur_mode & SPI_CPHA))
  358. chcfg |= S3C64XX_SPI_CH_HS_EN;
  359. if (dma_mode) {
  360. modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
  361. chcfg |= S3C64XX_SPI_CH_RXCH_ON;
  362. writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
  363. | S3C64XX_SPI_PACKET_CNT_EN,
  364. regs + S3C64XX_SPI_PACKET_CNT);
  365. ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
  366. }
  367. }
  368. if (ret)
  369. return ret;
  370. writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
  371. writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
  372. return 0;
  373. }
  374. static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
  375. int timeout_ms)
  376. {
  377. void __iomem *regs = sdd->regs;
  378. unsigned long val = 1;
  379. u32 status;
  380. /* max fifo depth available */
  381. u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
  382. if (timeout_ms)
  383. val = msecs_to_loops(timeout_ms);
  384. do {
  385. status = readl(regs + S3C64XX_SPI_STATUS);
  386. } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
  387. /* return the actual received data length */
  388. return RX_FIFO_LVL(status, sdd);
  389. }
  390. static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
  391. struct spi_transfer *xfer)
  392. {
  393. void __iomem *regs = sdd->regs;
  394. unsigned long val;
  395. u32 status;
  396. int ms;
  397. /* millisecs to xfer 'len' bytes @ 'cur_speed' */
  398. ms = xfer->len * 8 * 1000 / sdd->cur_speed;
  399. ms += 10; /* some tolerance */
  400. val = msecs_to_jiffies(ms) + 10;
  401. val = wait_for_completion_timeout(&sdd->xfer_completion, val);
  402. /*
  403. * If the previous xfer was completed within timeout, then
  404. * proceed further else return -EIO.
  405. * DmaTx returns after simply writing data in the FIFO,
  406. * w/o waiting for real transmission on the bus to finish.
  407. * DmaRx returns only after Dma read data from FIFO which
  408. * needs bus transmission to finish, so we don't worry if
  409. * Xfer involved Rx(with or without Tx).
  410. */
  411. if (val && !xfer->rx_buf) {
  412. val = msecs_to_loops(10);
  413. status = readl(regs + S3C64XX_SPI_STATUS);
  414. while ((TX_FIFO_LVL(status, sdd)
  415. || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
  416. && --val) {
  417. cpu_relax();
  418. status = readl(regs + S3C64XX_SPI_STATUS);
  419. }
  420. }
  421. /* If timed out while checking rx/tx status return error */
  422. if (!val)
  423. return -EIO;
  424. return 0;
  425. }
  426. static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
  427. struct spi_transfer *xfer)
  428. {
  429. void __iomem *regs = sdd->regs;
  430. unsigned long val;
  431. u32 status;
  432. int loops;
  433. u32 cpy_len;
  434. u8 *buf;
  435. int ms;
  436. /* millisecs to xfer 'len' bytes @ 'cur_speed' */
  437. ms = xfer->len * 8 * 1000 / sdd->cur_speed;
  438. ms += 10; /* some tolerance */
  439. val = msecs_to_loops(ms);
  440. do {
  441. status = readl(regs + S3C64XX_SPI_STATUS);
  442. } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
  443. if (!val)
  444. return -EIO;
  445. /* If it was only Tx */
  446. if (!xfer->rx_buf) {
  447. sdd->state &= ~TXBUSY;
  448. return 0;
  449. }
  450. /*
  451. * If the receive length is bigger than the controller fifo
  452. * size, calculate the loops and read the fifo as many times.
  453. * loops = length / max fifo size (calculated by using the
  454. * fifo mask).
  455. * For any size less than the fifo size the below code is
  456. * executed atleast once.
  457. */
  458. loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
  459. buf = xfer->rx_buf;
  460. do {
  461. /* wait for data to be received in the fifo */
  462. cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
  463. (loops ? ms : 0));
  464. switch (sdd->cur_bpw) {
  465. case 32:
  466. ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
  467. buf, cpy_len / 4);
  468. break;
  469. case 16:
  470. ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
  471. buf, cpy_len / 2);
  472. break;
  473. default:
  474. ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
  475. buf, cpy_len);
  476. break;
  477. }
  478. buf = buf + cpy_len;
  479. } while (loops--);
  480. sdd->state &= ~RXBUSY;
  481. return 0;
  482. }
  483. static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
  484. {
  485. void __iomem *regs = sdd->regs;
  486. int ret;
  487. u32 val;
  488. /* Disable Clock */
  489. if (!sdd->port_conf->clk_from_cmu) {
  490. val = readl(regs + S3C64XX_SPI_CLK_CFG);
  491. val &= ~S3C64XX_SPI_ENCLK_ENABLE;
  492. writel(val, regs + S3C64XX_SPI_CLK_CFG);
  493. }
  494. /* Set Polarity and Phase */
  495. val = readl(regs + S3C64XX_SPI_CH_CFG);
  496. val &= ~(S3C64XX_SPI_CH_SLAVE |
  497. S3C64XX_SPI_CPOL_L |
  498. S3C64XX_SPI_CPHA_B);
  499. if (sdd->cur_mode & SPI_CPOL)
  500. val |= S3C64XX_SPI_CPOL_L;
  501. if (sdd->cur_mode & SPI_CPHA)
  502. val |= S3C64XX_SPI_CPHA_B;
  503. writel(val, regs + S3C64XX_SPI_CH_CFG);
  504. /* Set Channel & DMA Mode */
  505. val = readl(regs + S3C64XX_SPI_MODE_CFG);
  506. val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
  507. | S3C64XX_SPI_MODE_CH_TSZ_MASK);
  508. switch (sdd->cur_bpw) {
  509. case 32:
  510. val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
  511. val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
  512. break;
  513. case 16:
  514. val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
  515. val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
  516. break;
  517. default:
  518. val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
  519. val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
  520. break;
  521. }
  522. writel(val, regs + S3C64XX_SPI_MODE_CFG);
  523. if (sdd->port_conf->clk_from_cmu) {
  524. /* The src_clk clock is divided internally by 2 */
  525. ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
  526. if (ret)
  527. return ret;
  528. } else {
  529. /* Configure Clock */
  530. val = readl(regs + S3C64XX_SPI_CLK_CFG);
  531. val &= ~S3C64XX_SPI_PSR_MASK;
  532. val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
  533. & S3C64XX_SPI_PSR_MASK);
  534. writel(val, regs + S3C64XX_SPI_CLK_CFG);
  535. /* Enable Clock */
  536. val = readl(regs + S3C64XX_SPI_CLK_CFG);
  537. val |= S3C64XX_SPI_ENCLK_ENABLE;
  538. writel(val, regs + S3C64XX_SPI_CLK_CFG);
  539. }
  540. return 0;
  541. }
  542. #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
  543. static int s3c64xx_spi_prepare_message(struct spi_master *master,
  544. struct spi_message *msg)
  545. {
  546. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  547. struct spi_device *spi = msg->spi;
  548. struct s3c64xx_spi_csinfo *cs = spi->controller_data;
  549. /* Configure feedback delay */
  550. writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
  551. return 0;
  552. }
  553. static int s3c64xx_spi_transfer_one(struct spi_master *master,
  554. struct spi_device *spi,
  555. struct spi_transfer *xfer)
  556. {
  557. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  558. const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
  559. const void *tx_buf = NULL;
  560. void *rx_buf = NULL;
  561. int target_len = 0, origin_len = 0;
  562. int use_dma = 0;
  563. int status;
  564. u32 speed;
  565. u8 bpw;
  566. unsigned long flags;
  567. reinit_completion(&sdd->xfer_completion);
  568. /* Only BPW and Speed may change across transfers */
  569. bpw = xfer->bits_per_word;
  570. speed = xfer->speed_hz;
  571. if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
  572. sdd->cur_bpw = bpw;
  573. sdd->cur_speed = speed;
  574. sdd->cur_mode = spi->mode;
  575. status = s3c64xx_spi_config(sdd);
  576. if (status)
  577. return status;
  578. }
  579. if (!is_polling(sdd) && (xfer->len > fifo_len) &&
  580. sdd->rx_dma.ch && sdd->tx_dma.ch) {
  581. use_dma = 1;
  582. } else if (is_polling(sdd) && xfer->len > fifo_len) {
  583. tx_buf = xfer->tx_buf;
  584. rx_buf = xfer->rx_buf;
  585. origin_len = xfer->len;
  586. target_len = xfer->len;
  587. if (xfer->len > fifo_len)
  588. xfer->len = fifo_len;
  589. }
  590. do {
  591. spin_lock_irqsave(&sdd->lock, flags);
  592. /* Pending only which is to be done */
  593. sdd->state &= ~RXBUSY;
  594. sdd->state &= ~TXBUSY;
  595. /* Start the signals */
  596. s3c64xx_spi_set_cs(spi, true);
  597. status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
  598. spin_unlock_irqrestore(&sdd->lock, flags);
  599. if (status) {
  600. dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
  601. break;
  602. }
  603. if (use_dma)
  604. status = s3c64xx_wait_for_dma(sdd, xfer);
  605. else
  606. status = s3c64xx_wait_for_pio(sdd, xfer);
  607. if (status) {
  608. dev_err(&spi->dev,
  609. "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
  610. xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
  611. (sdd->state & RXBUSY) ? 'f' : 'p',
  612. (sdd->state & TXBUSY) ? 'f' : 'p',
  613. xfer->len);
  614. if (use_dma) {
  615. if (xfer->tx_buf && (sdd->state & TXBUSY))
  616. dmaengine_terminate_all(sdd->tx_dma.ch);
  617. if (xfer->rx_buf && (sdd->state & RXBUSY))
  618. dmaengine_terminate_all(sdd->rx_dma.ch);
  619. }
  620. } else {
  621. s3c64xx_flush_fifo(sdd);
  622. }
  623. if (target_len > 0) {
  624. target_len -= xfer->len;
  625. if (xfer->tx_buf)
  626. xfer->tx_buf += xfer->len;
  627. if (xfer->rx_buf)
  628. xfer->rx_buf += xfer->len;
  629. if (target_len > fifo_len)
  630. xfer->len = fifo_len;
  631. else
  632. xfer->len = target_len;
  633. }
  634. } while (target_len > 0);
  635. if (origin_len) {
  636. /* Restore original xfer buffers and length */
  637. xfer->tx_buf = tx_buf;
  638. xfer->rx_buf = rx_buf;
  639. xfer->len = origin_len;
  640. }
  641. return status;
  642. }
  643. static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
  644. struct spi_device *spi)
  645. {
  646. struct s3c64xx_spi_csinfo *cs;
  647. struct device_node *slave_np, *data_np = NULL;
  648. u32 fb_delay = 0;
  649. slave_np = spi->dev.of_node;
  650. if (!slave_np) {
  651. dev_err(&spi->dev, "device node not found\n");
  652. return ERR_PTR(-EINVAL);
  653. }
  654. data_np = of_get_child_by_name(slave_np, "controller-data");
  655. if (!data_np) {
  656. dev_err(&spi->dev, "child node 'controller-data' not found\n");
  657. return ERR_PTR(-EINVAL);
  658. }
  659. cs = kzalloc(sizeof(*cs), GFP_KERNEL);
  660. if (!cs) {
  661. of_node_put(data_np);
  662. return ERR_PTR(-ENOMEM);
  663. }
  664. of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
  665. cs->fb_delay = fb_delay;
  666. of_node_put(data_np);
  667. return cs;
  668. }
  669. /*
  670. * Here we only check the validity of requested configuration
  671. * and save the configuration in a local data-structure.
  672. * The controller is actually configured only just before we
  673. * get a message to transfer.
  674. */
  675. static int s3c64xx_spi_setup(struct spi_device *spi)
  676. {
  677. struct s3c64xx_spi_csinfo *cs = spi->controller_data;
  678. struct s3c64xx_spi_driver_data *sdd;
  679. int err;
  680. sdd = spi_master_get_devdata(spi->master);
  681. if (spi->dev.of_node) {
  682. cs = s3c64xx_get_slave_ctrldata(spi);
  683. spi->controller_data = cs;
  684. } else if (cs) {
  685. /* On non-DT platforms the SPI core will set spi->cs_gpio
  686. * to -ENOENT. The GPIO pin used to drive the chip select
  687. * is defined by using platform data so spi->cs_gpio value
  688. * has to be override to have the proper GPIO pin number.
  689. */
  690. spi->cs_gpio = cs->line;
  691. }
  692. if (IS_ERR_OR_NULL(cs)) {
  693. dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
  694. return -ENODEV;
  695. }
  696. if (!spi_get_ctldata(spi)) {
  697. if (gpio_is_valid(spi->cs_gpio)) {
  698. err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
  699. dev_name(&spi->dev));
  700. if (err) {
  701. dev_err(&spi->dev,
  702. "Failed to get /CS gpio [%d]: %d\n",
  703. spi->cs_gpio, err);
  704. goto err_gpio_req;
  705. }
  706. }
  707. spi_set_ctldata(spi, cs);
  708. }
  709. pm_runtime_get_sync(&sdd->pdev->dev);
  710. /* Check if we can provide the requested rate */
  711. if (!sdd->port_conf->clk_from_cmu) {
  712. u32 psr, speed;
  713. /* Max possible */
  714. speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
  715. if (spi->max_speed_hz > speed)
  716. spi->max_speed_hz = speed;
  717. psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
  718. psr &= S3C64XX_SPI_PSR_MASK;
  719. if (psr == S3C64XX_SPI_PSR_MASK)
  720. psr--;
  721. speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
  722. if (spi->max_speed_hz < speed) {
  723. if (psr+1 < S3C64XX_SPI_PSR_MASK) {
  724. psr++;
  725. } else {
  726. err = -EINVAL;
  727. goto setup_exit;
  728. }
  729. }
  730. speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
  731. if (spi->max_speed_hz >= speed) {
  732. spi->max_speed_hz = speed;
  733. } else {
  734. dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
  735. spi->max_speed_hz);
  736. err = -EINVAL;
  737. goto setup_exit;
  738. }
  739. }
  740. pm_runtime_mark_last_busy(&sdd->pdev->dev);
  741. pm_runtime_put_autosuspend(&sdd->pdev->dev);
  742. s3c64xx_spi_set_cs(spi, false);
  743. return 0;
  744. setup_exit:
  745. pm_runtime_mark_last_busy(&sdd->pdev->dev);
  746. pm_runtime_put_autosuspend(&sdd->pdev->dev);
  747. /* setup() returns with device de-selected */
  748. s3c64xx_spi_set_cs(spi, false);
  749. if (gpio_is_valid(spi->cs_gpio))
  750. gpio_free(spi->cs_gpio);
  751. spi_set_ctldata(spi, NULL);
  752. err_gpio_req:
  753. if (spi->dev.of_node)
  754. kfree(cs);
  755. return err;
  756. }
  757. static void s3c64xx_spi_cleanup(struct spi_device *spi)
  758. {
  759. struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
  760. if (gpio_is_valid(spi->cs_gpio)) {
  761. gpio_free(spi->cs_gpio);
  762. if (spi->dev.of_node)
  763. kfree(cs);
  764. else {
  765. /* On non-DT platforms, the SPI core sets
  766. * spi->cs_gpio to -ENOENT and .setup()
  767. * overrides it with the GPIO pin value
  768. * passed using platform data.
  769. */
  770. spi->cs_gpio = -ENOENT;
  771. }
  772. }
  773. spi_set_ctldata(spi, NULL);
  774. }
  775. static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
  776. {
  777. struct s3c64xx_spi_driver_data *sdd = data;
  778. struct spi_master *spi = sdd->master;
  779. unsigned int val, clr = 0;
  780. val = readl(sdd->regs + S3C64XX_SPI_STATUS);
  781. if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
  782. clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
  783. dev_err(&spi->dev, "RX overrun\n");
  784. }
  785. if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
  786. clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
  787. dev_err(&spi->dev, "RX underrun\n");
  788. }
  789. if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
  790. clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
  791. dev_err(&spi->dev, "TX overrun\n");
  792. }
  793. if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
  794. clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
  795. dev_err(&spi->dev, "TX underrun\n");
  796. }
  797. /* Clear the pending irq by setting and then clearing it */
  798. writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
  799. writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
  800. return IRQ_HANDLED;
  801. }
  802. static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
  803. {
  804. struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
  805. void __iomem *regs = sdd->regs;
  806. unsigned int val;
  807. sdd->cur_speed = 0;
  808. if (sci->no_cs)
  809. writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
  810. else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
  811. writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
  812. /* Disable Interrupts - we use Polling if not DMA mode */
  813. writel(0, regs + S3C64XX_SPI_INT_EN);
  814. if (!sdd->port_conf->clk_from_cmu)
  815. writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
  816. regs + S3C64XX_SPI_CLK_CFG);
  817. writel(0, regs + S3C64XX_SPI_MODE_CFG);
  818. writel(0, regs + S3C64XX_SPI_PACKET_CNT);
  819. /* Clear any irq pending bits, should set and clear the bits */
  820. val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
  821. S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
  822. S3C64XX_SPI_PND_TX_OVERRUN_CLR |
  823. S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
  824. writel(val, regs + S3C64XX_SPI_PENDING_CLR);
  825. writel(0, regs + S3C64XX_SPI_PENDING_CLR);
  826. writel(0, regs + S3C64XX_SPI_SWAP_CFG);
  827. val = readl(regs + S3C64XX_SPI_MODE_CFG);
  828. val &= ~S3C64XX_SPI_MODE_4BURST;
  829. val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
  830. val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
  831. writel(val, regs + S3C64XX_SPI_MODE_CFG);
  832. s3c64xx_flush_fifo(sdd);
  833. }
  834. #ifdef CONFIG_OF
  835. static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
  836. {
  837. struct s3c64xx_spi_info *sci;
  838. u32 temp;
  839. sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
  840. if (!sci)
  841. return ERR_PTR(-ENOMEM);
  842. if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
  843. dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
  844. sci->src_clk_nr = 0;
  845. } else {
  846. sci->src_clk_nr = temp;
  847. }
  848. if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
  849. dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
  850. sci->num_cs = 1;
  851. } else {
  852. sci->num_cs = temp;
  853. }
  854. sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
  855. return sci;
  856. }
  857. #else
  858. static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
  859. {
  860. return dev_get_platdata(dev);
  861. }
  862. #endif
  863. static const struct of_device_id s3c64xx_spi_dt_match[];
  864. static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
  865. struct platform_device *pdev)
  866. {
  867. #ifdef CONFIG_OF
  868. if (pdev->dev.of_node) {
  869. const struct of_device_id *match;
  870. match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
  871. return (struct s3c64xx_spi_port_config *)match->data;
  872. }
  873. #endif
  874. return (struct s3c64xx_spi_port_config *)
  875. platform_get_device_id(pdev)->driver_data;
  876. }
  877. static int s3c64xx_spi_probe(struct platform_device *pdev)
  878. {
  879. struct resource *mem_res;
  880. struct s3c64xx_spi_driver_data *sdd;
  881. struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
  882. struct spi_master *master;
  883. int ret, irq;
  884. char clk_name[16];
  885. if (!sci && pdev->dev.of_node) {
  886. sci = s3c64xx_spi_parse_dt(&pdev->dev);
  887. if (IS_ERR(sci))
  888. return PTR_ERR(sci);
  889. }
  890. if (!sci) {
  891. dev_err(&pdev->dev, "platform_data missing!\n");
  892. return -ENODEV;
  893. }
  894. mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  895. if (mem_res == NULL) {
  896. dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
  897. return -ENXIO;
  898. }
  899. irq = platform_get_irq(pdev, 0);
  900. if (irq < 0) {
  901. dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
  902. return irq;
  903. }
  904. master = spi_alloc_master(&pdev->dev,
  905. sizeof(struct s3c64xx_spi_driver_data));
  906. if (master == NULL) {
  907. dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
  908. return -ENOMEM;
  909. }
  910. platform_set_drvdata(pdev, master);
  911. sdd = spi_master_get_devdata(master);
  912. sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
  913. sdd->master = master;
  914. sdd->cntrlr_info = sci;
  915. sdd->pdev = pdev;
  916. sdd->sfr_start = mem_res->start;
  917. if (pdev->dev.of_node) {
  918. ret = of_alias_get_id(pdev->dev.of_node, "spi");
  919. if (ret < 0) {
  920. dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
  921. ret);
  922. goto err_deref_master;
  923. }
  924. sdd->port_id = ret;
  925. } else {
  926. sdd->port_id = pdev->id;
  927. }
  928. sdd->cur_bpw = 8;
  929. sdd->tx_dma.direction = DMA_MEM_TO_DEV;
  930. sdd->rx_dma.direction = DMA_DEV_TO_MEM;
  931. master->dev.of_node = pdev->dev.of_node;
  932. master->bus_num = sdd->port_id;
  933. master->setup = s3c64xx_spi_setup;
  934. master->cleanup = s3c64xx_spi_cleanup;
  935. master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
  936. master->prepare_message = s3c64xx_spi_prepare_message;
  937. master->transfer_one = s3c64xx_spi_transfer_one;
  938. master->num_chipselect = sci->num_cs;
  939. master->dma_alignment = 8;
  940. master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
  941. SPI_BPW_MASK(8);
  942. /* the spi->mode bits understood by this driver: */
  943. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  944. master->auto_runtime_pm = true;
  945. if (!is_polling(sdd))
  946. master->can_dma = s3c64xx_spi_can_dma;
  947. sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
  948. if (IS_ERR(sdd->regs)) {
  949. ret = PTR_ERR(sdd->regs);
  950. goto err_deref_master;
  951. }
  952. if (sci->cfg_gpio && sci->cfg_gpio()) {
  953. dev_err(&pdev->dev, "Unable to config gpio\n");
  954. ret = -EBUSY;
  955. goto err_deref_master;
  956. }
  957. /* Setup clocks */
  958. sdd->clk = devm_clk_get(&pdev->dev, "spi");
  959. if (IS_ERR(sdd->clk)) {
  960. dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
  961. ret = PTR_ERR(sdd->clk);
  962. goto err_deref_master;
  963. }
  964. ret = clk_prepare_enable(sdd->clk);
  965. if (ret) {
  966. dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
  967. goto err_deref_master;
  968. }
  969. sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
  970. sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
  971. if (IS_ERR(sdd->src_clk)) {
  972. dev_err(&pdev->dev,
  973. "Unable to acquire clock '%s'\n", clk_name);
  974. ret = PTR_ERR(sdd->src_clk);
  975. goto err_disable_clk;
  976. }
  977. ret = clk_prepare_enable(sdd->src_clk);
  978. if (ret) {
  979. dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
  980. goto err_disable_clk;
  981. }
  982. if (sdd->port_conf->clk_ioclk) {
  983. sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
  984. if (IS_ERR(sdd->ioclk)) {
  985. dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
  986. ret = PTR_ERR(sdd->ioclk);
  987. goto err_disable_src_clk;
  988. }
  989. ret = clk_prepare_enable(sdd->ioclk);
  990. if (ret) {
  991. dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
  992. goto err_disable_src_clk;
  993. }
  994. }
  995. if (!is_polling(sdd)) {
  996. /* Acquire DMA channels */
  997. sdd->rx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
  998. "rx");
  999. if (IS_ERR(sdd->rx_dma.ch)) {
  1000. dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
  1001. ret = PTR_ERR(sdd->rx_dma.ch);
  1002. goto err_disable_io_clk;
  1003. }
  1004. sdd->tx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
  1005. "tx");
  1006. if (IS_ERR(sdd->tx_dma.ch)) {
  1007. dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
  1008. ret = PTR_ERR(sdd->tx_dma.ch);
  1009. goto err_release_rx_dma;
  1010. }
  1011. }
  1012. pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
  1013. pm_runtime_use_autosuspend(&pdev->dev);
  1014. pm_runtime_set_active(&pdev->dev);
  1015. pm_runtime_enable(&pdev->dev);
  1016. pm_runtime_get_sync(&pdev->dev);
  1017. /* Setup Deufult Mode */
  1018. s3c64xx_spi_hwinit(sdd);
  1019. spin_lock_init(&sdd->lock);
  1020. init_completion(&sdd->xfer_completion);
  1021. ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
  1022. "spi-s3c64xx", sdd);
  1023. if (ret != 0) {
  1024. dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
  1025. irq, ret);
  1026. goto err_pm_put;
  1027. }
  1028. writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
  1029. S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
  1030. sdd->regs + S3C64XX_SPI_INT_EN);
  1031. ret = devm_spi_register_master(&pdev->dev, master);
  1032. if (ret != 0) {
  1033. dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
  1034. goto err_pm_put;
  1035. }
  1036. dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
  1037. sdd->port_id, master->num_chipselect);
  1038. dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
  1039. mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
  1040. pm_runtime_mark_last_busy(&pdev->dev);
  1041. pm_runtime_put_autosuspend(&pdev->dev);
  1042. return 0;
  1043. err_pm_put:
  1044. pm_runtime_put_noidle(&pdev->dev);
  1045. pm_runtime_disable(&pdev->dev);
  1046. pm_runtime_set_suspended(&pdev->dev);
  1047. if (!is_polling(sdd))
  1048. dma_release_channel(sdd->tx_dma.ch);
  1049. err_release_rx_dma:
  1050. if (!is_polling(sdd))
  1051. dma_release_channel(sdd->rx_dma.ch);
  1052. err_disable_io_clk:
  1053. clk_disable_unprepare(sdd->ioclk);
  1054. err_disable_src_clk:
  1055. clk_disable_unprepare(sdd->src_clk);
  1056. err_disable_clk:
  1057. clk_disable_unprepare(sdd->clk);
  1058. err_deref_master:
  1059. spi_master_put(master);
  1060. return ret;
  1061. }
  1062. static int s3c64xx_spi_remove(struct platform_device *pdev)
  1063. {
  1064. struct spi_master *master = platform_get_drvdata(pdev);
  1065. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  1066. pm_runtime_get_sync(&pdev->dev);
  1067. writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
  1068. if (!is_polling(sdd)) {
  1069. dma_release_channel(sdd->rx_dma.ch);
  1070. dma_release_channel(sdd->tx_dma.ch);
  1071. }
  1072. clk_disable_unprepare(sdd->ioclk);
  1073. clk_disable_unprepare(sdd->src_clk);
  1074. clk_disable_unprepare(sdd->clk);
  1075. pm_runtime_put_noidle(&pdev->dev);
  1076. pm_runtime_disable(&pdev->dev);
  1077. pm_runtime_set_suspended(&pdev->dev);
  1078. return 0;
  1079. }
  1080. #ifdef CONFIG_PM_SLEEP
  1081. static int s3c64xx_spi_suspend(struct device *dev)
  1082. {
  1083. struct spi_master *master = dev_get_drvdata(dev);
  1084. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  1085. int ret = spi_master_suspend(master);
  1086. if (ret)
  1087. return ret;
  1088. ret = pm_runtime_force_suspend(dev);
  1089. if (ret < 0)
  1090. return ret;
  1091. sdd->cur_speed = 0; /* Output Clock is stopped */
  1092. return 0;
  1093. }
  1094. static int s3c64xx_spi_resume(struct device *dev)
  1095. {
  1096. struct spi_master *master = dev_get_drvdata(dev);
  1097. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  1098. struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
  1099. int ret;
  1100. if (sci->cfg_gpio)
  1101. sci->cfg_gpio();
  1102. ret = pm_runtime_force_resume(dev);
  1103. if (ret < 0)
  1104. return ret;
  1105. return spi_master_resume(master);
  1106. }
  1107. #endif /* CONFIG_PM_SLEEP */
  1108. #ifdef CONFIG_PM
  1109. static int s3c64xx_spi_runtime_suspend(struct device *dev)
  1110. {
  1111. struct spi_master *master = dev_get_drvdata(dev);
  1112. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  1113. clk_disable_unprepare(sdd->clk);
  1114. clk_disable_unprepare(sdd->src_clk);
  1115. clk_disable_unprepare(sdd->ioclk);
  1116. return 0;
  1117. }
  1118. static int s3c64xx_spi_runtime_resume(struct device *dev)
  1119. {
  1120. struct spi_master *master = dev_get_drvdata(dev);
  1121. struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
  1122. int ret;
  1123. if (sdd->port_conf->clk_ioclk) {
  1124. ret = clk_prepare_enable(sdd->ioclk);
  1125. if (ret != 0)
  1126. return ret;
  1127. }
  1128. ret = clk_prepare_enable(sdd->src_clk);
  1129. if (ret != 0)
  1130. goto err_disable_ioclk;
  1131. ret = clk_prepare_enable(sdd->clk);
  1132. if (ret != 0)
  1133. goto err_disable_src_clk;
  1134. s3c64xx_spi_hwinit(sdd);
  1135. return 0;
  1136. err_disable_src_clk:
  1137. clk_disable_unprepare(sdd->src_clk);
  1138. err_disable_ioclk:
  1139. clk_disable_unprepare(sdd->ioclk);
  1140. return ret;
  1141. }
  1142. #endif /* CONFIG_PM */
  1143. static const struct dev_pm_ops s3c64xx_spi_pm = {
  1144. SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
  1145. SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
  1146. s3c64xx_spi_runtime_resume, NULL)
  1147. };
  1148. static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
  1149. .fifo_lvl_mask = { 0x7f },
  1150. .rx_lvl_offset = 13,
  1151. .tx_st_done = 21,
  1152. .high_speed = true,
  1153. };
  1154. static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
  1155. .fifo_lvl_mask = { 0x7f, 0x7F },
  1156. .rx_lvl_offset = 13,
  1157. .tx_st_done = 21,
  1158. };
  1159. static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
  1160. .fifo_lvl_mask = { 0x1ff, 0x7F },
  1161. .rx_lvl_offset = 15,
  1162. .tx_st_done = 25,
  1163. .high_speed = true,
  1164. };
  1165. static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
  1166. .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
  1167. .rx_lvl_offset = 15,
  1168. .tx_st_done = 25,
  1169. .high_speed = true,
  1170. .clk_from_cmu = true,
  1171. };
  1172. static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
  1173. .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
  1174. .rx_lvl_offset = 15,
  1175. .tx_st_done = 25,
  1176. .high_speed = true,
  1177. .clk_from_cmu = true,
  1178. .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
  1179. };
  1180. static struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
  1181. .fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
  1182. .rx_lvl_offset = 15,
  1183. .tx_st_done = 25,
  1184. .high_speed = true,
  1185. .clk_from_cmu = true,
  1186. .clk_ioclk = true,
  1187. .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
  1188. };
  1189. static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
  1190. {
  1191. .name = "s3c2443-spi",
  1192. .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
  1193. }, {
  1194. .name = "s3c6410-spi",
  1195. .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
  1196. },
  1197. { },
  1198. };
  1199. static const struct of_device_id s3c64xx_spi_dt_match[] = {
  1200. { .compatible = "samsung,s3c2443-spi",
  1201. .data = (void *)&s3c2443_spi_port_config,
  1202. },
  1203. { .compatible = "samsung,s3c6410-spi",
  1204. .data = (void *)&s3c6410_spi_port_config,
  1205. },
  1206. { .compatible = "samsung,s5pv210-spi",
  1207. .data = (void *)&s5pv210_spi_port_config,
  1208. },
  1209. { .compatible = "samsung,exynos4210-spi",
  1210. .data = (void *)&exynos4_spi_port_config,
  1211. },
  1212. { .compatible = "samsung,exynos7-spi",
  1213. .data = (void *)&exynos7_spi_port_config,
  1214. },
  1215. { .compatible = "samsung,exynos5433-spi",
  1216. .data = (void *)&exynos5433_spi_port_config,
  1217. },
  1218. { },
  1219. };
  1220. MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
  1221. static struct platform_driver s3c64xx_spi_driver = {
  1222. .driver = {
  1223. .name = "s3c64xx-spi",
  1224. .pm = &s3c64xx_spi_pm,
  1225. .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
  1226. },
  1227. .probe = s3c64xx_spi_probe,
  1228. .remove = s3c64xx_spi_remove,
  1229. .id_table = s3c64xx_spi_driver_ids,
  1230. };
  1231. MODULE_ALIAS("platform:s3c64xx-spi");
  1232. module_platform_driver(s3c64xx_spi_driver);
  1233. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  1234. MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
  1235. MODULE_LICENSE("GPL");