qcom_glink_smem.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016, Linaro Ltd
  4. */
  5. #include <linux/io.h>
  6. #include <linux/module.h>
  7. #include <linux/of.h>
  8. #include <linux/of_address.h>
  9. #include <linux/of_irq.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/mailbox_client.h>
  13. #include <linux/mfd/syscon.h>
  14. #include <linux/slab.h>
  15. #include <linux/rpmsg.h>
  16. #include <linux/idr.h>
  17. #include <linux/circ_buf.h>
  18. #include <linux/soc/qcom/smem.h>
  19. #include <linux/sizes.h>
  20. #include <linux/delay.h>
  21. #include <linux/regmap.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/list.h>
  24. #include <linux/rpmsg/qcom_glink.h>
  25. #include "qcom_glink_native.h"
  26. #define FIFO_FULL_RESERVE 8
  27. #define FIFO_ALIGNMENT 8
  28. #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
  29. #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478
  30. #define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479
  31. #define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480
  32. struct qcom_glink_smem {
  33. struct device dev;
  34. int irq;
  35. struct qcom_glink *glink;
  36. struct mbox_client mbox_client;
  37. struct mbox_chan *mbox_chan;
  38. u32 remote_pid;
  39. };
  40. struct glink_smem_pipe {
  41. struct qcom_glink_pipe native;
  42. __le32 *tail;
  43. __le32 *head;
  44. void *fifo;
  45. struct qcom_glink_smem *smem;
  46. };
  47. #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
  48. static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
  49. {
  50. struct glink_smem_pipe *pipe = to_smem_pipe(np);
  51. struct qcom_glink_smem *smem = pipe->smem;
  52. size_t len;
  53. void *fifo;
  54. u32 head;
  55. u32 tail;
  56. if (!pipe->fifo) {
  57. fifo = qcom_smem_get(smem->remote_pid,
  58. SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len);
  59. if (IS_ERR(fifo)) {
  60. pr_err("failed to acquire RX fifo handle: %ld\n",
  61. PTR_ERR(fifo));
  62. return 0;
  63. }
  64. pipe->fifo = fifo;
  65. pipe->native.length = len;
  66. }
  67. head = le32_to_cpu(*pipe->head);
  68. tail = le32_to_cpu(*pipe->tail);
  69. if (head < tail)
  70. return pipe->native.length - tail + head;
  71. else
  72. return head - tail;
  73. }
  74. static void glink_smem_rx_peek(struct qcom_glink_pipe *np,
  75. void *data, unsigned int offset, size_t count)
  76. {
  77. struct glink_smem_pipe *pipe = to_smem_pipe(np);
  78. size_t len;
  79. u32 tail;
  80. tail = le32_to_cpu(*pipe->tail);
  81. tail += offset;
  82. if (tail >= pipe->native.length)
  83. tail -= pipe->native.length;
  84. len = min_t(size_t, count, pipe->native.length - tail);
  85. if (len)
  86. memcpy_fromio(data, pipe->fifo + tail, len);
  87. if (len != count)
  88. memcpy_fromio(data + len, pipe->fifo, (count - len));
  89. }
  90. static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
  91. size_t count)
  92. {
  93. struct glink_smem_pipe *pipe = to_smem_pipe(np);
  94. u32 tail;
  95. tail = le32_to_cpu(*pipe->tail);
  96. tail += count;
  97. if (tail >= pipe->native.length)
  98. tail -= pipe->native.length;
  99. *pipe->tail = cpu_to_le32(tail);
  100. }
  101. static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np)
  102. {
  103. struct glink_smem_pipe *pipe = to_smem_pipe(np);
  104. u32 head;
  105. u32 tail;
  106. u32 avail;
  107. head = le32_to_cpu(*pipe->head);
  108. tail = le32_to_cpu(*pipe->tail);
  109. if (tail <= head)
  110. avail = pipe->native.length - head + tail;
  111. else
  112. avail = tail - head;
  113. if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE))
  114. avail = 0;
  115. else
  116. avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
  117. return avail;
  118. }
  119. static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe,
  120. unsigned int head,
  121. const void *data, size_t count)
  122. {
  123. size_t len;
  124. len = min_t(size_t, count, pipe->native.length - head);
  125. if (len)
  126. memcpy(pipe->fifo + head, data, len);
  127. if (len != count)
  128. memcpy(pipe->fifo, data + len, count - len);
  129. head += count;
  130. if (head >= pipe->native.length)
  131. head -= pipe->native.length;
  132. return head;
  133. }
  134. static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe,
  135. const void *hdr, size_t hlen,
  136. const void *data, size_t dlen)
  137. {
  138. struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
  139. unsigned int head;
  140. head = le32_to_cpu(*pipe->head);
  141. head = glink_smem_tx_write_one(pipe, head, hdr, hlen);
  142. head = glink_smem_tx_write_one(pipe, head, data, dlen);
  143. /* Ensure head is always aligned to 8 bytes */
  144. head = ALIGN(head, 8);
  145. if (head >= pipe->native.length)
  146. head -= pipe->native.length;
  147. /* Ensure ordering of fifo and head update */
  148. wmb();
  149. *pipe->head = cpu_to_le32(head);
  150. }
  151. static void glink_smem_tx_kick(struct qcom_glink_pipe *glink_pipe)
  152. {
  153. struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
  154. struct qcom_glink_smem *smem = pipe->smem;
  155. mbox_send_message(smem->mbox_chan, NULL);
  156. mbox_client_txdone(smem->mbox_chan, 0);
  157. }
  158. static irqreturn_t qcom_glink_smem_intr(int irq, void *data)
  159. {
  160. struct qcom_glink_smem *smem = data;
  161. qcom_glink_native_rx(smem->glink);
  162. return IRQ_HANDLED;
  163. }
  164. static void qcom_glink_smem_release(struct device *dev)
  165. {
  166. struct qcom_glink_smem *smem = container_of(dev, struct qcom_glink_smem, dev);
  167. kfree(smem);
  168. }
  169. struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
  170. struct device_node *node)
  171. {
  172. struct glink_smem_pipe *rx_pipe;
  173. struct glink_smem_pipe *tx_pipe;
  174. struct qcom_glink_smem *smem;
  175. struct qcom_glink *glink;
  176. struct device *dev;
  177. u32 remote_pid;
  178. __le32 *descs;
  179. size_t size;
  180. int ret;
  181. smem = kzalloc(sizeof(*smem), GFP_KERNEL);
  182. if (!smem)
  183. return ERR_PTR(-ENOMEM);
  184. dev = &smem->dev;
  185. dev->parent = parent;
  186. dev->of_node = node;
  187. dev->release = qcom_glink_smem_release;
  188. dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
  189. ret = device_register(dev);
  190. if (ret) {
  191. pr_err("failed to register glink edge\n");
  192. put_device(dev);
  193. return ERR_PTR(ret);
  194. }
  195. ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
  196. &remote_pid);
  197. if (ret) {
  198. dev_err(dev, "failed to parse qcom,remote-pid\n");
  199. goto err_put_dev;
  200. }
  201. smem->remote_pid = remote_pid;
  202. rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL);
  203. tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL);
  204. if (!rx_pipe || !tx_pipe) {
  205. ret = -ENOMEM;
  206. goto err_put_dev;
  207. }
  208. ret = qcom_smem_alloc(remote_pid,
  209. SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32);
  210. if (ret && ret != -EEXIST) {
  211. dev_err(dev, "failed to allocate glink descriptors\n");
  212. goto err_put_dev;
  213. }
  214. descs = qcom_smem_get(remote_pid,
  215. SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
  216. if (IS_ERR(descs)) {
  217. dev_err(dev, "failed to acquire xprt descriptor\n");
  218. ret = PTR_ERR(descs);
  219. goto err_put_dev;
  220. }
  221. if (size != 32) {
  222. dev_err(dev, "glink descriptor of invalid size\n");
  223. ret = -EINVAL;
  224. goto err_put_dev;
  225. }
  226. tx_pipe->tail = &descs[0];
  227. tx_pipe->head = &descs[1];
  228. rx_pipe->tail = &descs[2];
  229. rx_pipe->head = &descs[3];
  230. ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
  231. SZ_16K);
  232. if (ret && ret != -EEXIST) {
  233. dev_err(dev, "failed to allocate TX fifo\n");
  234. goto err_put_dev;
  235. }
  236. tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
  237. &tx_pipe->native.length);
  238. if (IS_ERR(tx_pipe->fifo)) {
  239. dev_err(dev, "failed to acquire TX fifo\n");
  240. ret = PTR_ERR(tx_pipe->fifo);
  241. goto err_put_dev;
  242. }
  243. smem->irq = of_irq_get(smem->dev.of_node, 0);
  244. ret = devm_request_irq(&smem->dev, smem->irq, qcom_glink_smem_intr,
  245. IRQF_NO_SUSPEND | IRQF_NO_AUTOEN,
  246. "glink-smem", smem);
  247. if (ret) {
  248. dev_err(&smem->dev, "failed to request IRQ\n");
  249. goto err_put_dev;
  250. }
  251. smem->mbox_client.dev = &smem->dev;
  252. smem->mbox_client.knows_txdone = true;
  253. smem->mbox_chan = mbox_request_channel(&smem->mbox_client, 0);
  254. if (IS_ERR(smem->mbox_chan)) {
  255. ret = dev_err_probe(&smem->dev, PTR_ERR(smem->mbox_chan),
  256. "failed to acquire IPC channel\n");
  257. goto err_put_dev;
  258. }
  259. rx_pipe->smem = smem;
  260. rx_pipe->native.avail = glink_smem_rx_avail;
  261. rx_pipe->native.peek = glink_smem_rx_peek;
  262. rx_pipe->native.advance = glink_smem_rx_advance;
  263. tx_pipe->smem = smem;
  264. tx_pipe->native.avail = glink_smem_tx_avail;
  265. tx_pipe->native.write = glink_smem_tx_write;
  266. tx_pipe->native.kick = glink_smem_tx_kick;
  267. *rx_pipe->tail = 0;
  268. *tx_pipe->head = 0;
  269. glink = qcom_glink_native_probe(dev,
  270. GLINK_FEATURE_INTENT_REUSE,
  271. &rx_pipe->native, &tx_pipe->native,
  272. false);
  273. if (IS_ERR(glink)) {
  274. ret = PTR_ERR(glink);
  275. goto err_free_mbox;
  276. }
  277. smem->glink = glink;
  278. enable_irq(smem->irq);
  279. return smem;
  280. err_free_mbox:
  281. mbox_free_channel(smem->mbox_chan);
  282. err_put_dev:
  283. device_unregister(dev);
  284. return ERR_PTR(ret);
  285. }
  286. EXPORT_SYMBOL_GPL(qcom_glink_smem_register);
  287. void qcom_glink_smem_unregister(struct qcom_glink_smem *smem)
  288. {
  289. struct qcom_glink *glink = smem->glink;
  290. disable_irq(smem->irq);
  291. qcom_glink_native_remove(glink);
  292. mbox_free_channel(smem->mbox_chan);
  293. device_unregister(&smem->dev);
  294. }
  295. EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister);
  296. MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>");
  297. MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver");
  298. MODULE_LICENSE("GPL v2");