rmi_spi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * Copyright (c) 2011-2016 Synaptics Incorporated
  3. * Copyright (c) 2011 Unixphere
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/rmi.h>
  12. #include <linux/slab.h>
  13. #include <linux/spi/spi.h>
  14. #include <linux/of.h>
  15. #include "rmi_driver.h"
  16. #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
  17. #define RMI_PAGE_SELECT_REGISTER 0x00FF
  18. #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
  19. #define RMI_SPI_XFER_SIZE_LIMIT 255
  20. #define BUFFER_SIZE_INCREMENT 32
  21. enum rmi_spi_op {
  22. RMI_SPI_WRITE = 0,
  23. RMI_SPI_READ,
  24. RMI_SPI_V2_READ_UNIFIED,
  25. RMI_SPI_V2_READ_SPLIT,
  26. RMI_SPI_V2_WRITE,
  27. };
  28. struct rmi_spi_cmd {
  29. enum rmi_spi_op op;
  30. u16 addr;
  31. };
  32. struct rmi_spi_xport {
  33. struct rmi_transport_dev xport;
  34. struct spi_device *spi;
  35. struct mutex page_mutex;
  36. int page;
  37. u8 *rx_buf;
  38. u8 *tx_buf;
  39. int xfer_buf_size;
  40. struct spi_transfer *rx_xfers;
  41. struct spi_transfer *tx_xfers;
  42. int rx_xfer_count;
  43. int tx_xfer_count;
  44. };
  45. static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
  46. {
  47. struct spi_device *spi = rmi_spi->spi;
  48. int buf_size = rmi_spi->xfer_buf_size
  49. ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
  50. struct spi_transfer *xfer_buf;
  51. void *buf;
  52. void *tmp;
  53. while (buf_size < len)
  54. buf_size *= 2;
  55. if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
  56. buf_size = RMI_SPI_XFER_SIZE_LIMIT;
  57. tmp = rmi_spi->rx_buf;
  58. buf = devm_kcalloc(&spi->dev, buf_size, 2,
  59. GFP_KERNEL | GFP_DMA);
  60. if (!buf)
  61. return -ENOMEM;
  62. rmi_spi->rx_buf = buf;
  63. rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
  64. rmi_spi->xfer_buf_size = buf_size;
  65. if (tmp)
  66. devm_kfree(&spi->dev, tmp);
  67. if (rmi_spi->xport.pdata.spi_data.read_delay_us)
  68. rmi_spi->rx_xfer_count = buf_size;
  69. else
  70. rmi_spi->rx_xfer_count = 1;
  71. if (rmi_spi->xport.pdata.spi_data.write_delay_us)
  72. rmi_spi->tx_xfer_count = buf_size;
  73. else
  74. rmi_spi->tx_xfer_count = 1;
  75. /*
  76. * Allocate a pool of spi_transfer buffers for devices which need
  77. * per byte delays.
  78. */
  79. tmp = rmi_spi->rx_xfers;
  80. xfer_buf = devm_kcalloc(&spi->dev,
  81. rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count,
  82. sizeof(struct spi_transfer),
  83. GFP_KERNEL);
  84. if (!xfer_buf)
  85. return -ENOMEM;
  86. rmi_spi->rx_xfers = xfer_buf;
  87. rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
  88. if (tmp)
  89. devm_kfree(&spi->dev, tmp);
  90. return 0;
  91. }
  92. static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
  93. const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
  94. int tx_len, u8 *rx_buf, int rx_len)
  95. {
  96. struct spi_device *spi = rmi_spi->spi;
  97. struct rmi_device_platform_data_spi *spi_data =
  98. &rmi_spi->xport.pdata.spi_data;
  99. struct spi_message msg;
  100. struct spi_transfer *xfer;
  101. int ret = 0;
  102. int len;
  103. int cmd_len = 0;
  104. int total_tx_len;
  105. int i;
  106. u16 addr = cmd->addr;
  107. spi_message_init(&msg);
  108. switch (cmd->op) {
  109. case RMI_SPI_WRITE:
  110. case RMI_SPI_READ:
  111. cmd_len += 2;
  112. break;
  113. case RMI_SPI_V2_READ_UNIFIED:
  114. case RMI_SPI_V2_READ_SPLIT:
  115. case RMI_SPI_V2_WRITE:
  116. cmd_len += 4;
  117. break;
  118. }
  119. total_tx_len = cmd_len + tx_len;
  120. len = max(total_tx_len, rx_len);
  121. if (len > RMI_SPI_XFER_SIZE_LIMIT)
  122. return -EINVAL;
  123. if (rmi_spi->xfer_buf_size < len) {
  124. ret = rmi_spi_manage_pools(rmi_spi, len);
  125. if (ret < 0)
  126. return ret;
  127. }
  128. if (addr == 0)
  129. /*
  130. * SPI needs an address. Use 0x7FF if we want to keep
  131. * reading from the last position of the register pointer.
  132. */
  133. addr = 0x7FF;
  134. switch (cmd->op) {
  135. case RMI_SPI_WRITE:
  136. rmi_spi->tx_buf[0] = (addr >> 8);
  137. rmi_spi->tx_buf[1] = addr & 0xFF;
  138. break;
  139. case RMI_SPI_READ:
  140. rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
  141. rmi_spi->tx_buf[1] = addr & 0xFF;
  142. break;
  143. case RMI_SPI_V2_READ_UNIFIED:
  144. break;
  145. case RMI_SPI_V2_READ_SPLIT:
  146. break;
  147. case RMI_SPI_V2_WRITE:
  148. rmi_spi->tx_buf[0] = 0x40;
  149. rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
  150. rmi_spi->tx_buf[2] = addr & 0xFF;
  151. rmi_spi->tx_buf[3] = tx_len;
  152. break;
  153. }
  154. if (tx_buf)
  155. memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
  156. if (rmi_spi->tx_xfer_count > 1) {
  157. for (i = 0; i < total_tx_len; i++) {
  158. xfer = &rmi_spi->tx_xfers[i];
  159. memset(xfer, 0, sizeof(struct spi_transfer));
  160. xfer->tx_buf = &rmi_spi->tx_buf[i];
  161. xfer->len = 1;
  162. xfer->delay_usecs = spi_data->write_delay_us;
  163. spi_message_add_tail(xfer, &msg);
  164. }
  165. } else {
  166. xfer = rmi_spi->tx_xfers;
  167. memset(xfer, 0, sizeof(struct spi_transfer));
  168. xfer->tx_buf = rmi_spi->tx_buf;
  169. xfer->len = total_tx_len;
  170. spi_message_add_tail(xfer, &msg);
  171. }
  172. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
  173. __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
  174. total_tx_len, total_tx_len, rmi_spi->tx_buf);
  175. if (rx_buf) {
  176. if (rmi_spi->rx_xfer_count > 1) {
  177. for (i = 0; i < rx_len; i++) {
  178. xfer = &rmi_spi->rx_xfers[i];
  179. memset(xfer, 0, sizeof(struct spi_transfer));
  180. xfer->rx_buf = &rmi_spi->rx_buf[i];
  181. xfer->len = 1;
  182. xfer->delay_usecs = spi_data->read_delay_us;
  183. spi_message_add_tail(xfer, &msg);
  184. }
  185. } else {
  186. xfer = rmi_spi->rx_xfers;
  187. memset(xfer, 0, sizeof(struct spi_transfer));
  188. xfer->rx_buf = rmi_spi->rx_buf;
  189. xfer->len = rx_len;
  190. spi_message_add_tail(xfer, &msg);
  191. }
  192. }
  193. ret = spi_sync(spi, &msg);
  194. if (ret < 0) {
  195. dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
  196. return ret;
  197. }
  198. if (rx_buf) {
  199. memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
  200. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
  201. __func__, rx_len, rx_len, rx_buf);
  202. }
  203. return 0;
  204. }
  205. /*
  206. * rmi_set_page - Set RMI page
  207. * @xport: The pointer to the rmi_transport_dev struct
  208. * @page: The new page address.
  209. *
  210. * RMI devices have 16-bit addressing, but some of the transport
  211. * implementations (like SMBus) only have 8-bit addressing. So RMI implements
  212. * a page address at 0xff of every page so we can reliable page addresses
  213. * every 256 registers.
  214. *
  215. * The page_mutex lock must be held when this function is entered.
  216. *
  217. * Returns zero on success, non-zero on failure.
  218. */
  219. static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
  220. {
  221. struct rmi_spi_cmd cmd;
  222. int ret;
  223. cmd.op = RMI_SPI_WRITE;
  224. cmd.addr = RMI_PAGE_SELECT_REGISTER;
  225. ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
  226. if (ret)
  227. rmi_spi->page = page;
  228. return ret;
  229. }
  230. static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
  231. const void *buf, size_t len)
  232. {
  233. struct rmi_spi_xport *rmi_spi =
  234. container_of(xport, struct rmi_spi_xport, xport);
  235. struct rmi_spi_cmd cmd;
  236. int ret;
  237. mutex_lock(&rmi_spi->page_mutex);
  238. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  239. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  240. if (ret)
  241. goto exit;
  242. }
  243. cmd.op = RMI_SPI_WRITE;
  244. cmd.addr = addr;
  245. ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
  246. exit:
  247. mutex_unlock(&rmi_spi->page_mutex);
  248. return ret;
  249. }
  250. static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
  251. void *buf, size_t len)
  252. {
  253. struct rmi_spi_xport *rmi_spi =
  254. container_of(xport, struct rmi_spi_xport, xport);
  255. struct rmi_spi_cmd cmd;
  256. int ret;
  257. mutex_lock(&rmi_spi->page_mutex);
  258. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  259. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  260. if (ret)
  261. goto exit;
  262. }
  263. cmd.op = RMI_SPI_READ;
  264. cmd.addr = addr;
  265. ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
  266. exit:
  267. mutex_unlock(&rmi_spi->page_mutex);
  268. return ret;
  269. }
  270. static const struct rmi_transport_ops rmi_spi_ops = {
  271. .write_block = rmi_spi_write_block,
  272. .read_block = rmi_spi_read_block,
  273. };
  274. #ifdef CONFIG_OF
  275. static int rmi_spi_of_probe(struct spi_device *spi,
  276. struct rmi_device_platform_data *pdata)
  277. {
  278. struct device *dev = &spi->dev;
  279. int retval;
  280. retval = rmi_of_property_read_u32(dev,
  281. &pdata->spi_data.read_delay_us,
  282. "spi-rx-delay-us", 1);
  283. if (retval)
  284. return retval;
  285. retval = rmi_of_property_read_u32(dev,
  286. &pdata->spi_data.write_delay_us,
  287. "spi-tx-delay-us", 1);
  288. if (retval)
  289. return retval;
  290. return 0;
  291. }
  292. static const struct of_device_id rmi_spi_of_match[] = {
  293. { .compatible = "syna,rmi4-spi" },
  294. {},
  295. };
  296. MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
  297. #else
  298. static inline int rmi_spi_of_probe(struct spi_device *spi,
  299. struct rmi_device_platform_data *pdata)
  300. {
  301. return -ENODEV;
  302. }
  303. #endif
  304. static void rmi_spi_unregister_transport(void *data)
  305. {
  306. struct rmi_spi_xport *rmi_spi = data;
  307. rmi_unregister_transport_device(&rmi_spi->xport);
  308. }
  309. static int rmi_spi_probe(struct spi_device *spi)
  310. {
  311. struct rmi_spi_xport *rmi_spi;
  312. struct rmi_device_platform_data *pdata;
  313. struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
  314. int error;
  315. if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
  316. return -EINVAL;
  317. rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
  318. GFP_KERNEL);
  319. if (!rmi_spi)
  320. return -ENOMEM;
  321. pdata = &rmi_spi->xport.pdata;
  322. if (spi->dev.of_node) {
  323. error = rmi_spi_of_probe(spi, pdata);
  324. if (error)
  325. return error;
  326. } else if (spi_pdata) {
  327. *pdata = *spi_pdata;
  328. }
  329. if (pdata->spi_data.bits_per_word)
  330. spi->bits_per_word = pdata->spi_data.bits_per_word;
  331. if (pdata->spi_data.mode)
  332. spi->mode = pdata->spi_data.mode;
  333. error = spi_setup(spi);
  334. if (error < 0) {
  335. dev_err(&spi->dev, "spi_setup failed!\n");
  336. return error;
  337. }
  338. pdata->irq = spi->irq;
  339. rmi_spi->spi = spi;
  340. mutex_init(&rmi_spi->page_mutex);
  341. rmi_spi->xport.dev = &spi->dev;
  342. rmi_spi->xport.proto_name = "spi";
  343. rmi_spi->xport.ops = &rmi_spi_ops;
  344. spi_set_drvdata(spi, rmi_spi);
  345. error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
  346. if (error)
  347. return error;
  348. /*
  349. * Setting the page to zero will (a) make sure the PSR is in a
  350. * known state, and (b) make sure we can talk to the device.
  351. */
  352. error = rmi_set_page(rmi_spi, 0);
  353. if (error) {
  354. dev_err(&spi->dev, "Failed to set page select to 0.\n");
  355. return error;
  356. }
  357. dev_info(&spi->dev, "registering SPI-connected sensor\n");
  358. error = rmi_register_transport_device(&rmi_spi->xport);
  359. if (error) {
  360. dev_err(&spi->dev, "failed to register sensor: %d\n", error);
  361. return error;
  362. }
  363. error = devm_add_action_or_reset(&spi->dev,
  364. rmi_spi_unregister_transport,
  365. rmi_spi);
  366. if (error)
  367. return error;
  368. return 0;
  369. }
  370. #ifdef CONFIG_PM_SLEEP
  371. static int rmi_spi_suspend(struct device *dev)
  372. {
  373. struct spi_device *spi = to_spi_device(dev);
  374. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  375. int ret;
  376. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
  377. if (ret)
  378. dev_warn(dev, "Failed to resume device: %d\n", ret);
  379. return ret;
  380. }
  381. static int rmi_spi_resume(struct device *dev)
  382. {
  383. struct spi_device *spi = to_spi_device(dev);
  384. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  385. int ret;
  386. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
  387. if (ret)
  388. dev_warn(dev, "Failed to resume device: %d\n", ret);
  389. return ret;
  390. }
  391. #endif
  392. #ifdef CONFIG_PM
  393. static int rmi_spi_runtime_suspend(struct device *dev)
  394. {
  395. struct spi_device *spi = to_spi_device(dev);
  396. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  397. int ret;
  398. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
  399. if (ret)
  400. dev_warn(dev, "Failed to resume device: %d\n", ret);
  401. return 0;
  402. }
  403. static int rmi_spi_runtime_resume(struct device *dev)
  404. {
  405. struct spi_device *spi = to_spi_device(dev);
  406. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  407. int ret;
  408. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
  409. if (ret)
  410. dev_warn(dev, "Failed to resume device: %d\n", ret);
  411. return 0;
  412. }
  413. #endif
  414. static const struct dev_pm_ops rmi_spi_pm = {
  415. SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
  416. SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
  417. NULL)
  418. };
  419. static const struct spi_device_id rmi_id[] = {
  420. { "rmi4_spi", 0 },
  421. { }
  422. };
  423. MODULE_DEVICE_TABLE(spi, rmi_id);
  424. static struct spi_driver rmi_spi_driver = {
  425. .driver = {
  426. .name = "rmi4_spi",
  427. .pm = &rmi_spi_pm,
  428. .of_match_table = of_match_ptr(rmi_spi_of_match),
  429. },
  430. .id_table = rmi_id,
  431. .probe = rmi_spi_probe,
  432. };
  433. module_spi_driver(rmi_spi_driver);
  434. MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
  435. MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
  436. MODULE_DESCRIPTION("RMI SPI driver");
  437. MODULE_LICENSE("GPL");