dma.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. // SPDX-License-Identifier: BSD-3-Clause
  2. /*
  3. * Copyright (c) 2020, MIPI Alliance, Inc.
  4. *
  5. * Author: Nicolas Pitre <npitre@baylibre.com>
  6. *
  7. * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
  8. * v1.x of the spec and v2.0 will likely be split out.
  9. */
  10. #include <linux/bitfield.h>
  11. #include <linux/device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/errno.h>
  14. #include <linux/i3c/master.h>
  15. #include <linux/io.h>
  16. #include "hci.h"
  17. #include "cmd.h"
  18. #include "ibi.h"
  19. /*
  20. * Software Parameter Values (somewhat arb itrary for now).
  21. * Some of them could be determined at run time eventually.
  22. */
  23. #define XFER_RINGS 1 /* max: 8 */
  24. #define XFER_RING_ENTRIES 16 /* max: 255 */
  25. #define IBI_RINGS 1 /* max: 8 */
  26. #define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
  27. #define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
  28. #define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
  29. /*
  30. * Ring Header Preamble
  31. */
  32. #define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
  33. #define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
  34. #define RHS_CONTROL 0x00
  35. #define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
  36. #define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
  37. #define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
  38. #define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
  39. #define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
  40. /*
  41. * Ring Header (Per-Ring Bundle)
  42. */
  43. #define rh_reg_read(r) readl(rh->regs + (RH_##r))
  44. #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
  45. #define RH_CR_SETUP 0x00 /* Command/Response Ring */
  46. #define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
  47. #define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
  48. #define CR_RING_SIZE GENMASK(8, 0)
  49. #define RH_IBI_SETUP 0x04
  50. #define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
  51. #define IBI_STATUS_RING_SIZE GENMASK(23, 16)
  52. #define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
  53. #define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
  54. #define RH_CHUNK_CONTROL 0x08
  55. #define RH_INTR_STATUS 0x10
  56. #define RH_INTR_STATUS_ENABLE 0x14
  57. #define RH_INTR_SIGNAL_ENABLE 0x18
  58. #define RH_INTR_FORCE 0x1c
  59. #define INTR_IBI_READY BIT(12)
  60. #define INTR_TRANSFER_COMPLETION BIT(11)
  61. #define INTR_RING_OP BIT(10)
  62. #define INTR_TRANSFER_ERR BIT(9)
  63. #define INTR_WARN_INS_STOP_MODE BIT(7)
  64. #define INTR_IBI_RING_FULL BIT(6)
  65. #define INTR_TRANSFER_ABORT BIT(5)
  66. #define RH_RING_STATUS 0x20
  67. #define RING_STATUS_LOCKED BIT(3)
  68. #define RING_STATUS_ABORTED BIT(2)
  69. #define RING_STATUS_RUNNING BIT(1)
  70. #define RING_STATUS_ENABLED BIT(0)
  71. #define RH_RING_CONTROL 0x24
  72. #define RING_CTRL_ABORT BIT(2)
  73. #define RING_CTRL_RUN_STOP BIT(1)
  74. #define RING_CTRL_ENABLE BIT(0)
  75. #define RH_RING_OPERATION1 0x28
  76. #define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
  77. #define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
  78. #define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
  79. #define RH_RING_OPERATION2 0x2c
  80. #define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
  81. #define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
  82. #define RH_CMD_RING_BASE_LO 0x30
  83. #define RH_CMD_RING_BASE_HI 0x34
  84. #define RH_RESP_RING_BASE_LO 0x38
  85. #define RH_RESP_RING_BASE_HI 0x3c
  86. #define RH_IBI_STATUS_RING_BASE_LO 0x40
  87. #define RH_IBI_STATUS_RING_BASE_HI 0x44
  88. #define RH_IBI_DATA_RING_BASE_LO 0x48
  89. #define RH_IBI_DATA_RING_BASE_HI 0x4c
  90. #define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
  91. #define RH_RESP_RING_SG 0x54
  92. #define RH_IBI_STATUS_RING_SG 0x58
  93. #define RH_IBI_DATA_RING_SG 0x5c
  94. #define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
  95. #define RING_SG_LIST_SIZE GENMASK(15, 0)
  96. /*
  97. * Data Buffer Descriptor (in memory)
  98. */
  99. #define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
  100. #define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
  101. #define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
  102. struct hci_rh_data {
  103. void __iomem *regs;
  104. void *xfer, *resp, *ibi_status, *ibi_data;
  105. dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
  106. unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
  107. unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
  108. unsigned int done_ptr, ibi_chunk_ptr;
  109. struct hci_xfer **src_xfers;
  110. spinlock_t lock;
  111. struct completion op_done;
  112. };
  113. struct hci_rings_data {
  114. unsigned int total;
  115. struct hci_rh_data headers[] __counted_by(total);
  116. };
  117. struct hci_dma_dev_ibi_data {
  118. struct i3c_generic_ibi_pool *pool;
  119. unsigned int max_len;
  120. };
  121. static void hci_dma_cleanup(struct i3c_hci *hci)
  122. {
  123. struct hci_rings_data *rings = hci->io_data;
  124. struct hci_rh_data *rh;
  125. unsigned int i;
  126. if (!rings)
  127. return;
  128. for (i = 0; i < rings->total; i++) {
  129. rh = &rings->headers[i];
  130. rh_reg_write(INTR_SIGNAL_ENABLE, 0);
  131. rh_reg_write(RING_CONTROL, 0);
  132. rh_reg_write(CR_SETUP, 0);
  133. rh_reg_write(IBI_SETUP, 0);
  134. if (rh->xfer)
  135. dma_free_coherent(&hci->master.dev,
  136. rh->xfer_struct_sz * rh->xfer_entries,
  137. rh->xfer, rh->xfer_dma);
  138. if (rh->resp)
  139. dma_free_coherent(&hci->master.dev,
  140. rh->resp_struct_sz * rh->xfer_entries,
  141. rh->resp, rh->resp_dma);
  142. kfree(rh->src_xfers);
  143. if (rh->ibi_status)
  144. dma_free_coherent(&hci->master.dev,
  145. rh->ibi_status_sz * rh->ibi_status_entries,
  146. rh->ibi_status, rh->ibi_status_dma);
  147. if (rh->ibi_data_dma)
  148. dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
  149. rh->ibi_chunk_sz * rh->ibi_chunks_total,
  150. DMA_FROM_DEVICE);
  151. kfree(rh->ibi_data);
  152. }
  153. rhs_reg_write(CONTROL, 0);
  154. kfree(rings);
  155. hci->io_data = NULL;
  156. }
  157. static int hci_dma_init(struct i3c_hci *hci)
  158. {
  159. struct hci_rings_data *rings;
  160. struct hci_rh_data *rh;
  161. u32 regval;
  162. unsigned int i, nr_rings, xfers_sz, resps_sz;
  163. unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
  164. int ret;
  165. regval = rhs_reg_read(CONTROL);
  166. nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
  167. dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
  168. if (unlikely(nr_rings > 8)) {
  169. dev_err(&hci->master.dev, "number of rings should be <= 8\n");
  170. nr_rings = 8;
  171. }
  172. if (nr_rings > XFER_RINGS)
  173. nr_rings = XFER_RINGS;
  174. rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
  175. if (!rings)
  176. return -ENOMEM;
  177. hci->io_data = rings;
  178. rings->total = nr_rings;
  179. regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
  180. rhs_reg_write(CONTROL, regval);
  181. for (i = 0; i < rings->total; i++) {
  182. u32 offset = rhs_reg_read(RHn_OFFSET(i));
  183. dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
  184. ret = -EINVAL;
  185. if (!offset)
  186. goto err_out;
  187. rh = &rings->headers[i];
  188. rh->regs = hci->base_regs + offset;
  189. spin_lock_init(&rh->lock);
  190. init_completion(&rh->op_done);
  191. rh->xfer_entries = XFER_RING_ENTRIES;
  192. regval = rh_reg_read(CR_SETUP);
  193. rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
  194. rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
  195. DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
  196. rh->xfer_struct_sz, rh->resp_struct_sz);
  197. xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
  198. resps_sz = rh->resp_struct_sz * rh->xfer_entries;
  199. rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
  200. &rh->xfer_dma, GFP_KERNEL);
  201. rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
  202. &rh->resp_dma, GFP_KERNEL);
  203. rh->src_xfers =
  204. kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
  205. GFP_KERNEL);
  206. ret = -ENOMEM;
  207. if (!rh->xfer || !rh->resp || !rh->src_xfers)
  208. goto err_out;
  209. rh_reg_write(CMD_RING_BASE_LO, lower_32_bits(rh->xfer_dma));
  210. rh_reg_write(CMD_RING_BASE_HI, upper_32_bits(rh->xfer_dma));
  211. rh_reg_write(RESP_RING_BASE_LO, lower_32_bits(rh->resp_dma));
  212. rh_reg_write(RESP_RING_BASE_HI, upper_32_bits(rh->resp_dma));
  213. regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
  214. rh_reg_write(CR_SETUP, regval);
  215. rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
  216. rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
  217. INTR_TRANSFER_COMPLETION |
  218. INTR_RING_OP |
  219. INTR_TRANSFER_ERR |
  220. INTR_WARN_INS_STOP_MODE |
  221. INTR_IBI_RING_FULL |
  222. INTR_TRANSFER_ABORT);
  223. /* IBIs */
  224. if (i >= IBI_RINGS)
  225. goto ring_ready;
  226. regval = rh_reg_read(IBI_SETUP);
  227. rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
  228. rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
  229. rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
  230. rh->ibi_chunk_sz = dma_get_cache_alignment();
  231. rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
  232. /*
  233. * Round IBI data chunk size to number of bytes supported by
  234. * the HW. Chunk size can be 2^n number of DWORDs which is the
  235. * same as 2^(n+2) bytes, where n is 0..6.
  236. */
  237. rh->ibi_chunk_sz = umax(4, rh->ibi_chunk_sz);
  238. rh->ibi_chunk_sz = roundup_pow_of_two(rh->ibi_chunk_sz);
  239. if (rh->ibi_chunk_sz > 256) {
  240. ret = -EINVAL;
  241. goto err_out;
  242. }
  243. ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
  244. ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
  245. rh->ibi_status =
  246. dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
  247. &rh->ibi_status_dma, GFP_KERNEL);
  248. rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
  249. ret = -ENOMEM;
  250. if (!rh->ibi_status || !rh->ibi_data)
  251. goto err_out;
  252. rh->ibi_data_dma =
  253. dma_map_single(&hci->master.dev, rh->ibi_data,
  254. ibi_data_ring_sz, DMA_FROM_DEVICE);
  255. if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
  256. rh->ibi_data_dma = 0;
  257. ret = -ENOMEM;
  258. goto err_out;
  259. }
  260. rh_reg_write(IBI_STATUS_RING_BASE_LO, lower_32_bits(rh->ibi_status_dma));
  261. rh_reg_write(IBI_STATUS_RING_BASE_HI, upper_32_bits(rh->ibi_status_dma));
  262. rh_reg_write(IBI_DATA_RING_BASE_LO, lower_32_bits(rh->ibi_data_dma));
  263. rh_reg_write(IBI_DATA_RING_BASE_HI, upper_32_bits(rh->ibi_data_dma));
  264. regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
  265. rh->ibi_status_entries) |
  266. FIELD_PREP(IBI_DATA_CHUNK_SIZE,
  267. ilog2(rh->ibi_chunk_sz) - 2) |
  268. FIELD_PREP(IBI_DATA_CHUNK_COUNT,
  269. rh->ibi_chunks_total);
  270. rh_reg_write(IBI_SETUP, regval);
  271. regval = rh_reg_read(INTR_SIGNAL_ENABLE);
  272. regval |= INTR_IBI_READY;
  273. rh_reg_write(INTR_SIGNAL_ENABLE, regval);
  274. ring_ready:
  275. rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
  276. RING_CTRL_RUN_STOP);
  277. }
  278. return 0;
  279. err_out:
  280. hci_dma_cleanup(hci);
  281. return ret;
  282. }
  283. static void hci_dma_unmap_xfer(struct i3c_hci *hci,
  284. struct hci_xfer *xfer_list, unsigned int n)
  285. {
  286. struct hci_xfer *xfer;
  287. unsigned int i;
  288. for (i = 0; i < n; i++) {
  289. xfer = xfer_list + i;
  290. if (!xfer->data)
  291. continue;
  292. dma_unmap_single(&hci->master.dev,
  293. xfer->data_dma, xfer->data_len,
  294. xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  295. }
  296. }
  297. static int hci_dma_queue_xfer(struct i3c_hci *hci,
  298. struct hci_xfer *xfer_list, int n)
  299. {
  300. struct hci_rings_data *rings = hci->io_data;
  301. struct hci_rh_data *rh;
  302. unsigned int i, ring, enqueue_ptr;
  303. u32 op1_val, op2_val;
  304. void *buf;
  305. /* For now we only use ring 0 */
  306. ring = 0;
  307. rh = &rings->headers[ring];
  308. op1_val = rh_reg_read(RING_OPERATION1);
  309. enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
  310. for (i = 0; i < n; i++) {
  311. struct hci_xfer *xfer = xfer_list + i;
  312. u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
  313. /* store cmd descriptor */
  314. *ring_data++ = xfer->cmd_desc[0];
  315. *ring_data++ = xfer->cmd_desc[1];
  316. if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
  317. *ring_data++ = xfer->cmd_desc[2];
  318. *ring_data++ = xfer->cmd_desc[3];
  319. }
  320. /* first word of Data Buffer Descriptor Structure */
  321. if (!xfer->data)
  322. xfer->data_len = 0;
  323. *ring_data++ =
  324. FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
  325. ((i == n - 1) ? DATA_BUF_IOC : 0);
  326. /* 2nd and 3rd words of Data Buffer Descriptor Structure */
  327. if (xfer->data) {
  328. buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
  329. xfer->data_dma =
  330. dma_map_single(&hci->master.dev,
  331. buf,
  332. xfer->data_len,
  333. xfer->rnw ?
  334. DMA_FROM_DEVICE :
  335. DMA_TO_DEVICE);
  336. if (dma_mapping_error(&hci->master.dev,
  337. xfer->data_dma)) {
  338. hci_dma_unmap_xfer(hci, xfer_list, i);
  339. return -ENOMEM;
  340. }
  341. *ring_data++ = lower_32_bits(xfer->data_dma);
  342. *ring_data++ = upper_32_bits(xfer->data_dma);
  343. } else {
  344. *ring_data++ = 0;
  345. *ring_data++ = 0;
  346. }
  347. /* remember corresponding xfer struct */
  348. rh->src_xfers[enqueue_ptr] = xfer;
  349. /* remember corresponding ring/entry for this xfer structure */
  350. xfer->ring_number = ring;
  351. xfer->ring_entry = enqueue_ptr;
  352. enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
  353. /*
  354. * We may update the hardware view of the enqueue pointer
  355. * only if we didn't reach its dequeue pointer.
  356. */
  357. op2_val = rh_reg_read(RING_OPERATION2);
  358. if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
  359. /* the ring is full */
  360. hci_dma_unmap_xfer(hci, xfer_list, i + 1);
  361. return -EBUSY;
  362. }
  363. }
  364. /* take care to update the hardware enqueue pointer atomically */
  365. spin_lock_irq(&rh->lock);
  366. op1_val = rh_reg_read(RING_OPERATION1);
  367. op1_val &= ~RING_OP1_CR_ENQ_PTR;
  368. op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
  369. rh_reg_write(RING_OPERATION1, op1_val);
  370. spin_unlock_irq(&rh->lock);
  371. return 0;
  372. }
  373. static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
  374. struct hci_xfer *xfer_list, int n)
  375. {
  376. struct hci_rings_data *rings = hci->io_data;
  377. struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
  378. unsigned int i;
  379. bool did_unqueue = false;
  380. /* stop the ring */
  381. rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
  382. if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
  383. /*
  384. * We're deep in it if ever this condition is ever met.
  385. * Hardware might still be writing to memory, etc.
  386. */
  387. dev_crit(&hci->master.dev, "unable to abort the ring\n");
  388. WARN_ON(1);
  389. }
  390. for (i = 0; i < n; i++) {
  391. struct hci_xfer *xfer = xfer_list + i;
  392. int idx = xfer->ring_entry;
  393. /*
  394. * At the time the abort happened, the xfer might have
  395. * completed already. If not then replace corresponding
  396. * descriptor entries with a no-op.
  397. */
  398. if (idx >= 0) {
  399. u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
  400. /* store no-op cmd descriptor */
  401. *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
  402. *ring_data++ = 0;
  403. if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
  404. *ring_data++ = 0;
  405. *ring_data++ = 0;
  406. }
  407. /* disassociate this xfer struct */
  408. rh->src_xfers[idx] = NULL;
  409. /* and unmap it */
  410. hci_dma_unmap_xfer(hci, xfer, 1);
  411. did_unqueue = true;
  412. }
  413. }
  414. /* restart the ring */
  415. rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
  416. return did_unqueue;
  417. }
  418. static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
  419. {
  420. u32 op1_val, op2_val, resp, *ring_resp;
  421. unsigned int tid, done_ptr = rh->done_ptr;
  422. struct hci_xfer *xfer;
  423. for (;;) {
  424. op2_val = rh_reg_read(RING_OPERATION2);
  425. if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
  426. break;
  427. ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
  428. resp = *ring_resp;
  429. tid = RESP_TID(resp);
  430. DBG("resp = 0x%08x", resp);
  431. xfer = rh->src_xfers[done_ptr];
  432. if (!xfer) {
  433. DBG("orphaned ring entry");
  434. } else {
  435. hci_dma_unmap_xfer(hci, xfer, 1);
  436. xfer->ring_entry = -1;
  437. xfer->response = resp;
  438. if (tid != xfer->cmd_tid) {
  439. dev_err(&hci->master.dev,
  440. "response tid=%d when expecting %d\n",
  441. tid, xfer->cmd_tid);
  442. /* TODO: do something about it? */
  443. }
  444. if (xfer->completion)
  445. complete(xfer->completion);
  446. }
  447. done_ptr = (done_ptr + 1) % rh->xfer_entries;
  448. rh->done_ptr = done_ptr;
  449. }
  450. /* take care to update the software dequeue pointer atomically */
  451. spin_lock(&rh->lock);
  452. op1_val = rh_reg_read(RING_OPERATION1);
  453. op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
  454. op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
  455. rh_reg_write(RING_OPERATION1, op1_val);
  456. spin_unlock(&rh->lock);
  457. }
  458. static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
  459. const struct i3c_ibi_setup *req)
  460. {
  461. struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
  462. struct i3c_generic_ibi_pool *pool;
  463. struct hci_dma_dev_ibi_data *dev_ibi;
  464. dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
  465. if (!dev_ibi)
  466. return -ENOMEM;
  467. pool = i3c_generic_ibi_alloc_pool(dev, req);
  468. if (IS_ERR(pool)) {
  469. kfree(dev_ibi);
  470. return PTR_ERR(pool);
  471. }
  472. dev_ibi->pool = pool;
  473. dev_ibi->max_len = req->max_payload_len;
  474. dev_data->ibi_data = dev_ibi;
  475. return 0;
  476. }
  477. static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
  478. {
  479. struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
  480. struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
  481. dev_data->ibi_data = NULL;
  482. i3c_generic_ibi_free_pool(dev_ibi->pool);
  483. kfree(dev_ibi);
  484. }
  485. static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
  486. struct i3c_dev_desc *dev,
  487. struct i3c_ibi_slot *slot)
  488. {
  489. struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
  490. struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
  491. i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
  492. }
  493. static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
  494. {
  495. struct i3c_dev_desc *dev;
  496. struct i3c_hci_dev_data *dev_data;
  497. struct hci_dma_dev_ibi_data *dev_ibi;
  498. struct i3c_ibi_slot *slot;
  499. u32 op1_val, op2_val, ibi_status_error;
  500. unsigned int ptr, enq_ptr, deq_ptr;
  501. unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
  502. int ibi_addr, last_ptr;
  503. void *ring_ibi_data;
  504. dma_addr_t ring_ibi_data_dma;
  505. op1_val = rh_reg_read(RING_OPERATION1);
  506. deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
  507. op2_val = rh_reg_read(RING_OPERATION2);
  508. enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
  509. ibi_status_error = 0;
  510. ibi_addr = -1;
  511. ibi_chunks = 0;
  512. ibi_size = 0;
  513. last_ptr = -1;
  514. /* let's find all we can about this IBI */
  515. for (ptr = deq_ptr; ptr != enq_ptr;
  516. ptr = (ptr + 1) % rh->ibi_status_entries) {
  517. u32 ibi_status, *ring_ibi_status;
  518. unsigned int chunks;
  519. ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
  520. ibi_status = *ring_ibi_status;
  521. DBG("status = %#x", ibi_status);
  522. if (ibi_status_error) {
  523. /* we no longer care */
  524. } else if (ibi_status & IBI_ERROR) {
  525. ibi_status_error = ibi_status;
  526. } else if (ibi_addr == -1) {
  527. ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
  528. } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
  529. /* the address changed unexpectedly */
  530. ibi_status_error = ibi_status;
  531. }
  532. chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
  533. ibi_chunks += chunks;
  534. if (!(ibi_status & IBI_LAST_STATUS)) {
  535. ibi_size += chunks * rh->ibi_chunk_sz;
  536. } else {
  537. ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
  538. last_ptr = ptr;
  539. break;
  540. }
  541. }
  542. /* validate what we've got */
  543. if (last_ptr == -1) {
  544. /* this IBI sequence is not yet complete */
  545. DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
  546. return;
  547. }
  548. deq_ptr = last_ptr + 1;
  549. deq_ptr %= rh->ibi_status_entries;
  550. if (ibi_status_error) {
  551. dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
  552. goto done;
  553. }
  554. /* determine who this is for */
  555. dev = i3c_hci_addr_to_dev(hci, ibi_addr);
  556. if (!dev) {
  557. dev_err(&hci->master.dev,
  558. "IBI for unknown device %#x\n", ibi_addr);
  559. goto done;
  560. }
  561. dev_data = i3c_dev_get_master_data(dev);
  562. dev_ibi = dev_data->ibi_data;
  563. if (ibi_size > dev_ibi->max_len) {
  564. dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
  565. ibi_size, dev_ibi->max_len);
  566. goto done;
  567. }
  568. /*
  569. * This ring model is not suitable for zero-copy processing of IBIs.
  570. * We have the data chunk ring wrap-around to deal with, meaning
  571. * that the payload might span multiple chunks beginning at the
  572. * end of the ring and wrap to the start of the ring. Furthermore
  573. * there is no guarantee that those chunks will be released in order
  574. * and in a timely manner by the upper driver. So let's just copy
  575. * them to a discrete buffer. In practice they're supposed to be
  576. * small anyway.
  577. */
  578. slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
  579. if (!slot) {
  580. dev_err(&hci->master.dev, "no free slot for IBI\n");
  581. goto done;
  582. }
  583. /* copy first part of the payload */
  584. ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
  585. ring_ibi_data = rh->ibi_data + ibi_data_offset;
  586. ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
  587. first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
  588. * rh->ibi_chunk_sz;
  589. if (first_part > ibi_size)
  590. first_part = ibi_size;
  591. dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
  592. first_part, DMA_FROM_DEVICE);
  593. memcpy(slot->data, ring_ibi_data, first_part);
  594. /* copy second part if any */
  595. if (ibi_size > first_part) {
  596. /* we wrap back to the start and copy remaining data */
  597. ring_ibi_data = rh->ibi_data;
  598. ring_ibi_data_dma = rh->ibi_data_dma;
  599. dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
  600. ibi_size - first_part, DMA_FROM_DEVICE);
  601. memcpy(slot->data + first_part, ring_ibi_data,
  602. ibi_size - first_part);
  603. }
  604. /* submit it */
  605. slot->dev = dev;
  606. slot->len = ibi_size;
  607. i3c_master_queue_ibi(dev, slot);
  608. done:
  609. /* take care to update the ibi dequeue pointer atomically */
  610. spin_lock(&rh->lock);
  611. op1_val = rh_reg_read(RING_OPERATION1);
  612. op1_val &= ~RING_OP1_IBI_DEQ_PTR;
  613. op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
  614. rh_reg_write(RING_OPERATION1, op1_val);
  615. spin_unlock(&rh->lock);
  616. /* update the chunk pointer */
  617. rh->ibi_chunk_ptr += ibi_chunks;
  618. rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
  619. /* and tell the hardware about freed chunks */
  620. rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
  621. }
  622. static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
  623. {
  624. struct hci_rings_data *rings = hci->io_data;
  625. unsigned int i;
  626. bool handled = false;
  627. for (i = 0; mask && i < rings->total; i++) {
  628. struct hci_rh_data *rh;
  629. u32 status;
  630. if (!(mask & BIT(i)))
  631. continue;
  632. mask &= ~BIT(i);
  633. rh = &rings->headers[i];
  634. status = rh_reg_read(INTR_STATUS);
  635. DBG("rh%d status: %#x", i, status);
  636. if (!status)
  637. continue;
  638. rh_reg_write(INTR_STATUS, status);
  639. if (status & INTR_IBI_READY)
  640. hci_dma_process_ibi(hci, rh);
  641. if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
  642. hci_dma_xfer_done(hci, rh);
  643. if (status & INTR_RING_OP)
  644. complete(&rh->op_done);
  645. if (status & INTR_TRANSFER_ABORT) {
  646. u32 ring_status;
  647. dev_notice_ratelimited(&hci->master.dev,
  648. "ring %d: Transfer Aborted\n", i);
  649. mipi_i3c_hci_resume(hci);
  650. ring_status = rh_reg_read(RING_STATUS);
  651. if (!(ring_status & RING_STATUS_RUNNING) &&
  652. status & INTR_TRANSFER_COMPLETION &&
  653. status & INTR_TRANSFER_ERR) {
  654. /*
  655. * Ring stop followed by run is an Intel
  656. * specific required quirk after resuming the
  657. * halted controller. Do it only when the ring
  658. * is not in running state after a transfer
  659. * error.
  660. */
  661. rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
  662. rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
  663. RING_CTRL_RUN_STOP);
  664. }
  665. }
  666. if (status & INTR_WARN_INS_STOP_MODE)
  667. dev_warn_ratelimited(&hci->master.dev,
  668. "ring %d: Inserted Stop on Mode Change\n", i);
  669. if (status & INTR_IBI_RING_FULL)
  670. dev_err_ratelimited(&hci->master.dev,
  671. "ring %d: IBI Ring Full Condition\n", i);
  672. handled = true;
  673. }
  674. return handled;
  675. }
  676. const struct hci_io_ops mipi_i3c_hci_dma = {
  677. .init = hci_dma_init,
  678. .cleanup = hci_dma_cleanup,
  679. .queue_xfer = hci_dma_queue_xfer,
  680. .dequeue_xfer = hci_dma_dequeue_xfer,
  681. .irq_handler = hci_dma_irq_handler,
  682. .request_ibi = hci_dma_request_ibi,
  683. .free_ibi = hci_dma_free_ibi,
  684. .recycle_ibi_slot = hci_dma_recycle_ibi_slot,
  685. };