ivc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <soc/tegra/ivc.h>
  6. #define TEGRA_IVC_ALIGN 64
  7. /*
  8. * IVC channel reset protocol.
  9. *
  10. * Each end uses its tx_channel.state to indicate its synchronization state.
  11. */
  12. enum tegra_ivc_state {
  13. /*
  14. * This value is zero for backwards compatibility with services that
  15. * assume channels to be initially zeroed. Such channels are in an
  16. * initially valid state, but cannot be asynchronously reset, and must
  17. * maintain a valid state at all times.
  18. *
  19. * The transmitting end can enter the established state from the sync or
  20. * ack state when it observes the receiving endpoint in the ack or
  21. * established state, indicating that has cleared the counters in our
  22. * rx_channel.
  23. */
  24. TEGRA_IVC_STATE_ESTABLISHED = 0,
  25. /*
  26. * If an endpoint is observed in the sync state, the remote endpoint is
  27. * allowed to clear the counters it owns asynchronously with respect to
  28. * the current endpoint. Therefore, the current endpoint is no longer
  29. * allowed to communicate.
  30. */
  31. TEGRA_IVC_STATE_SYNC,
  32. /*
  33. * When the transmitting end observes the receiving end in the sync
  34. * state, it can clear the w_count and r_count and transition to the ack
  35. * state. If the remote endpoint observes us in the ack state, it can
  36. * return to the established state once it has cleared its counters.
  37. */
  38. TEGRA_IVC_STATE_ACK
  39. };
  40. /*
  41. * This structure is divided into two-cache aligned parts, the first is only
  42. * written through the tx.channel pointer, while the second is only written
  43. * through the rx.channel pointer. This delineates ownership of the cache
  44. * lines, which is critical to performance and necessary in non-cache coherent
  45. * implementations.
  46. */
  47. struct tegra_ivc_header {
  48. union {
  49. struct {
  50. /* fields owned by the transmitting end */
  51. u32 count;
  52. u32 state;
  53. };
  54. u8 pad[TEGRA_IVC_ALIGN];
  55. } tx;
  56. union {
  57. /* fields owned by the receiving end */
  58. u32 count;
  59. u8 pad[TEGRA_IVC_ALIGN];
  60. } rx;
  61. };
  62. #define tegra_ivc_header_read_field(hdr, field) \
  63. iosys_map_rd_field(hdr, 0, struct tegra_ivc_header, field)
  64. #define tegra_ivc_header_write_field(hdr, field, value) \
  65. iosys_map_wr_field(hdr, 0, struct tegra_ivc_header, field, value)
  66. static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
  67. {
  68. if (!ivc->peer)
  69. return;
  70. dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
  71. DMA_FROM_DEVICE);
  72. }
  73. static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
  74. {
  75. if (!ivc->peer)
  76. return;
  77. dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
  78. DMA_TO_DEVICE);
  79. }
  80. static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, struct iosys_map *map)
  81. {
  82. /*
  83. * This function performs multiple checks on the same values with
  84. * security implications, so create snapshots with READ_ONCE() to
  85. * ensure that these checks use the same values.
  86. */
  87. u32 tx = tegra_ivc_header_read_field(map, tx.count);
  88. u32 rx = tegra_ivc_header_read_field(map, rx.count);
  89. /*
  90. * Perform an over-full check to prevent denial of service attacks
  91. * where a server could be easily fooled into believing that there's
  92. * an extremely large number of frames ready, since receivers are not
  93. * expected to check for full or over-full conditions.
  94. *
  95. * Although the channel isn't empty, this is an invalid case caused by
  96. * a potentially malicious peer, so returning empty is safer, because
  97. * it gives the impression that the channel has gone silent.
  98. */
  99. if (tx - rx > ivc->num_frames)
  100. return true;
  101. return tx == rx;
  102. }
  103. static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct iosys_map *map)
  104. {
  105. u32 tx = tegra_ivc_header_read_field(map, tx.count);
  106. u32 rx = tegra_ivc_header_read_field(map, rx.count);
  107. /*
  108. * Invalid cases where the counters indicate that the queue is over
  109. * capacity also appear full.
  110. */
  111. return tx - rx >= ivc->num_frames;
  112. }
  113. static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct iosys_map *map)
  114. {
  115. u32 tx = tegra_ivc_header_read_field(map, tx.count);
  116. u32 rx = tegra_ivc_header_read_field(map, rx.count);
  117. /*
  118. * This function isn't expected to be used in scenarios where an
  119. * over-full situation can lead to denial of service attacks. See the
  120. * comment in tegra_ivc_empty() for an explanation about special
  121. * over-full considerations.
  122. */
  123. return tx - rx;
  124. }
  125. static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
  126. {
  127. unsigned int count = tegra_ivc_header_read_field(&ivc->tx.map, tx.count);
  128. tegra_ivc_header_write_field(&ivc->tx.map, tx.count, count + 1);
  129. if (ivc->tx.position == ivc->num_frames - 1)
  130. ivc->tx.position = 0;
  131. else
  132. ivc->tx.position++;
  133. }
  134. static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
  135. {
  136. unsigned int count = tegra_ivc_header_read_field(&ivc->rx.map, rx.count);
  137. tegra_ivc_header_write_field(&ivc->rx.map, rx.count, count + 1);
  138. if (ivc->rx.position == ivc->num_frames - 1)
  139. ivc->rx.position = 0;
  140. else
  141. ivc->rx.position++;
  142. }
  143. static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
  144. {
  145. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  146. unsigned int state;
  147. /*
  148. * tx.channel->state is set locally, so it is not synchronized with
  149. * state from the remote peer. The remote peer cannot reset its
  150. * transmit counters until we've acknowledged its synchronization
  151. * request, so no additional synchronization is required because an
  152. * asynchronous transition of rx.channel->state to
  153. * TEGRA_IVC_STATE_ACK is not allowed.
  154. */
  155. state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
  156. if (state != TEGRA_IVC_STATE_ESTABLISHED)
  157. return -ECONNRESET;
  158. /*
  159. * Avoid unnecessary invalidations when performing repeated accesses
  160. * to an IVC channel by checking the old queue pointers first.
  161. *
  162. * Synchronization is only necessary when these pointers indicate
  163. * empty or full.
  164. */
  165. if (!tegra_ivc_empty(ivc, &ivc->rx.map))
  166. return 0;
  167. tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
  168. if (tegra_ivc_empty(ivc, &ivc->rx.map))
  169. return -ENOSPC;
  170. return 0;
  171. }
  172. static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
  173. {
  174. unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
  175. unsigned int state;
  176. state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
  177. if (state != TEGRA_IVC_STATE_ESTABLISHED)
  178. return -ECONNRESET;
  179. if (!tegra_ivc_full(ivc, &ivc->tx.map))
  180. return 0;
  181. tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
  182. if (tegra_ivc_full(ivc, &ivc->tx.map))
  183. return -ENOSPC;
  184. return 0;
  185. }
  186. static int tegra_ivc_frame_virt(struct tegra_ivc *ivc, const struct iosys_map *header,
  187. unsigned int frame, struct iosys_map *map)
  188. {
  189. size_t offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
  190. if (WARN_ON(frame >= ivc->num_frames))
  191. return -EINVAL;
  192. *map = IOSYS_MAP_INIT_OFFSET(header, offset);
  193. return 0;
  194. }
  195. static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
  196. dma_addr_t phys,
  197. unsigned int frame)
  198. {
  199. unsigned long offset;
  200. offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
  201. return phys + offset;
  202. }
  203. static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
  204. dma_addr_t phys,
  205. unsigned int frame,
  206. unsigned int offset,
  207. size_t size)
  208. {
  209. if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
  210. return;
  211. phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
  212. dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
  213. }
  214. static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
  215. dma_addr_t phys,
  216. unsigned int frame,
  217. unsigned int offset,
  218. size_t size)
  219. {
  220. if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
  221. return;
  222. phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
  223. dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
  224. }
  225. /* directly peek at the next frame rx'ed */
  226. int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
  227. {
  228. int err;
  229. if (WARN_ON(ivc == NULL))
  230. return -EINVAL;
  231. err = tegra_ivc_check_read(ivc);
  232. if (err < 0)
  233. return err;
  234. /*
  235. * Order observation of ivc->rx.position potentially indicating new
  236. * data before data read.
  237. */
  238. smp_rmb();
  239. tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
  240. ivc->frame_size);
  241. return tegra_ivc_frame_virt(ivc, &ivc->rx.map, ivc->rx.position, map);
  242. }
  243. EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
  244. int tegra_ivc_read_advance(struct tegra_ivc *ivc)
  245. {
  246. unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
  247. unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
  248. int err;
  249. /*
  250. * No read barriers or synchronization here: the caller is expected to
  251. * have already observed the channel non-empty. This check is just to
  252. * catch programming errors.
  253. */
  254. err = tegra_ivc_check_read(ivc);
  255. if (err < 0)
  256. return err;
  257. tegra_ivc_advance_rx(ivc);
  258. tegra_ivc_flush(ivc, ivc->rx.phys + rx);
  259. /*
  260. * Ensure our write to ivc->rx.position occurs before our read from
  261. * ivc->tx.position.
  262. */
  263. smp_mb();
  264. /*
  265. * Notify only upon transition from full to non-full. The available
  266. * count can only asynchronously increase, so the worst possible
  267. * side-effect will be a spurious notification.
  268. */
  269. tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
  270. if (tegra_ivc_available(ivc, &ivc->rx.map) == ivc->num_frames - 1)
  271. ivc->notify(ivc, ivc->notify_data);
  272. return 0;
  273. }
  274. EXPORT_SYMBOL(tegra_ivc_read_advance);
  275. /* directly poke at the next frame to be tx'ed */
  276. int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
  277. {
  278. int err;
  279. err = tegra_ivc_check_write(ivc);
  280. if (err < 0)
  281. return err;
  282. return tegra_ivc_frame_virt(ivc, &ivc->tx.map, ivc->tx.position, map);
  283. }
  284. EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
  285. /* advance the tx buffer */
  286. int tegra_ivc_write_advance(struct tegra_ivc *ivc)
  287. {
  288. unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
  289. unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
  290. int err;
  291. err = tegra_ivc_check_write(ivc);
  292. if (err < 0)
  293. return err;
  294. tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
  295. ivc->frame_size);
  296. /*
  297. * Order any possible stores to the frame before update of
  298. * ivc->tx.position.
  299. */
  300. smp_wmb();
  301. tegra_ivc_advance_tx(ivc);
  302. tegra_ivc_flush(ivc, ivc->tx.phys + tx);
  303. /*
  304. * Ensure our write to ivc->tx.position occurs before our read from
  305. * ivc->rx.position.
  306. */
  307. smp_mb();
  308. /*
  309. * Notify only upon transition from empty to non-empty. The available
  310. * count can only asynchronously decrease, so the worst possible
  311. * side-effect will be a spurious notification.
  312. */
  313. tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
  314. if (tegra_ivc_available(ivc, &ivc->tx.map) == 1)
  315. ivc->notify(ivc, ivc->notify_data);
  316. return 0;
  317. }
  318. EXPORT_SYMBOL(tegra_ivc_write_advance);
  319. void tegra_ivc_reset(struct tegra_ivc *ivc)
  320. {
  321. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  322. tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_SYNC);
  323. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  324. ivc->notify(ivc, ivc->notify_data);
  325. }
  326. EXPORT_SYMBOL(tegra_ivc_reset);
  327. /*
  328. * =======================================================
  329. * IVC State Transition Table - see tegra_ivc_notified()
  330. * =======================================================
  331. *
  332. * local remote action
  333. * ----- ------ -----------------------------------
  334. * SYNC EST <none>
  335. * SYNC ACK reset counters; move to EST; notify
  336. * SYNC SYNC reset counters; move to ACK; notify
  337. * ACK EST move to EST; notify
  338. * ACK ACK move to EST; notify
  339. * ACK SYNC reset counters; move to ACK; notify
  340. * EST EST <none>
  341. * EST ACK <none>
  342. * EST SYNC reset counters; move to ACK; notify
  343. *
  344. * ===============================================================
  345. */
  346. int tegra_ivc_notified(struct tegra_ivc *ivc)
  347. {
  348. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  349. enum tegra_ivc_state rx_state, tx_state;
  350. /* Copy the receiver's state out of shared memory. */
  351. tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
  352. rx_state = tegra_ivc_header_read_field(&ivc->rx.map, tx.state);
  353. tx_state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
  354. if (rx_state == TEGRA_IVC_STATE_SYNC) {
  355. offset = offsetof(struct tegra_ivc_header, tx.count);
  356. /*
  357. * Order observation of TEGRA_IVC_STATE_SYNC before stores
  358. * clearing tx.channel.
  359. */
  360. smp_rmb();
  361. /*
  362. * Reset tx.channel counters. The remote end is in the SYNC
  363. * state and won't make progress until we change our state,
  364. * so the counters are not in use at this time.
  365. */
  366. tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
  367. tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
  368. ivc->tx.position = 0;
  369. ivc->rx.position = 0;
  370. /*
  371. * Ensure that counters appear cleared before new state can be
  372. * observed.
  373. */
  374. smp_wmb();
  375. /*
  376. * Move to ACK state. We have just cleared our counters, so it
  377. * is now safe for the remote end to start using these values.
  378. */
  379. tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ACK);
  380. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  381. /*
  382. * Notify remote end to observe state transition.
  383. */
  384. ivc->notify(ivc, ivc->notify_data);
  385. } else if (tx_state == TEGRA_IVC_STATE_SYNC &&
  386. rx_state == TEGRA_IVC_STATE_ACK) {
  387. offset = offsetof(struct tegra_ivc_header, tx.count);
  388. /*
  389. * Order observation of ivc_state_sync before stores clearing
  390. * tx_channel.
  391. */
  392. smp_rmb();
  393. /*
  394. * Reset tx.channel counters. The remote end is in the ACK
  395. * state and won't make progress until we change our state,
  396. * so the counters are not in use at this time.
  397. */
  398. tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
  399. tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
  400. ivc->tx.position = 0;
  401. ivc->rx.position = 0;
  402. /*
  403. * Ensure that counters appear cleared before new state can be
  404. * observed.
  405. */
  406. smp_wmb();
  407. /*
  408. * Move to ESTABLISHED state. We know that the remote end has
  409. * already cleared its counters, so it is safe to start
  410. * writing/reading on this channel.
  411. */
  412. tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
  413. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  414. /*
  415. * Notify remote end to observe state transition.
  416. */
  417. ivc->notify(ivc, ivc->notify_data);
  418. } else if (tx_state == TEGRA_IVC_STATE_ACK) {
  419. offset = offsetof(struct tegra_ivc_header, tx.count);
  420. /*
  421. * At this point, we have observed the peer to be in either
  422. * the ACK or ESTABLISHED state. Next, order observation of
  423. * peer state before storing to tx.channel.
  424. */
  425. smp_rmb();
  426. /*
  427. * Move to ESTABLISHED state. We know that we have previously
  428. * cleared our counters, and we know that the remote end has
  429. * cleared its counters, so it is safe to start writing/reading
  430. * on this channel.
  431. */
  432. tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
  433. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  434. /*
  435. * Notify remote end to observe state transition.
  436. */
  437. ivc->notify(ivc, ivc->notify_data);
  438. } else {
  439. /*
  440. * There is no need to handle any further action. Either the
  441. * channel is already fully established, or we are waiting for
  442. * the remote end to catch up with our current state. Refer
  443. * to the diagram in "IVC State Transition Table" above.
  444. */
  445. }
  446. if (tx_state != TEGRA_IVC_STATE_ESTABLISHED)
  447. return -EAGAIN;
  448. return 0;
  449. }
  450. EXPORT_SYMBOL(tegra_ivc_notified);
  451. size_t tegra_ivc_align(size_t size)
  452. {
  453. return ALIGN(size, TEGRA_IVC_ALIGN);
  454. }
  455. EXPORT_SYMBOL(tegra_ivc_align);
  456. unsigned tegra_ivc_total_queue_size(unsigned queue_size)
  457. {
  458. if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
  459. pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
  460. __func__, queue_size, TEGRA_IVC_ALIGN);
  461. return 0;
  462. }
  463. return queue_size + sizeof(struct tegra_ivc_header);
  464. }
  465. EXPORT_SYMBOL(tegra_ivc_total_queue_size);
  466. static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
  467. unsigned int num_frames, size_t frame_size)
  468. {
  469. BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
  470. TEGRA_IVC_ALIGN));
  471. BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
  472. TEGRA_IVC_ALIGN));
  473. BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
  474. TEGRA_IVC_ALIGN));
  475. if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
  476. pr_err("num_frames * frame_size overflows\n");
  477. return -EINVAL;
  478. }
  479. if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
  480. pr_err("frame size not adequately aligned: %zu\n", frame_size);
  481. return -EINVAL;
  482. }
  483. /*
  484. * The headers must at least be aligned enough for counters
  485. * to be accessed atomically.
  486. */
  487. if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
  488. pr_err("IVC channel start not aligned: %#lx\n", rx);
  489. return -EINVAL;
  490. }
  491. if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
  492. pr_err("IVC channel start not aligned: %#lx\n", tx);
  493. return -EINVAL;
  494. }
  495. if (rx < tx) {
  496. if (rx + frame_size * num_frames > tx) {
  497. pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
  498. rx, frame_size * num_frames, tx);
  499. return -EINVAL;
  500. }
  501. } else {
  502. if (tx + frame_size * num_frames > rx) {
  503. pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
  504. tx, frame_size * num_frames, rx);
  505. return -EINVAL;
  506. }
  507. }
  508. return 0;
  509. }
  510. static inline void iosys_map_copy(struct iosys_map *dst, const struct iosys_map *src)
  511. {
  512. *dst = *src;
  513. }
  514. static inline unsigned long iosys_map_get_address(const struct iosys_map *map)
  515. {
  516. if (map->is_iomem)
  517. return (unsigned long)map->vaddr_iomem;
  518. return (unsigned long)map->vaddr;
  519. }
  520. static inline void *iosys_map_get_vaddr(const struct iosys_map *map)
  521. {
  522. if (WARN_ON(map->is_iomem))
  523. return NULL;
  524. return map->vaddr;
  525. }
  526. int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, const struct iosys_map *rx,
  527. dma_addr_t rx_phys, const struct iosys_map *tx, dma_addr_t tx_phys,
  528. unsigned int num_frames, size_t frame_size,
  529. void (*notify)(struct tegra_ivc *ivc, void *data),
  530. void *data)
  531. {
  532. size_t queue_size;
  533. int err;
  534. if (WARN_ON(!ivc || !notify))
  535. return -EINVAL;
  536. /*
  537. * All sizes that can be returned by communication functions should
  538. * fit in an int.
  539. */
  540. if (frame_size > INT_MAX)
  541. return -E2BIG;
  542. err = tegra_ivc_check_params(iosys_map_get_address(rx), iosys_map_get_address(tx),
  543. num_frames, frame_size);
  544. if (err < 0)
  545. return err;
  546. queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
  547. if (peer) {
  548. ivc->rx.phys = dma_map_single(peer, iosys_map_get_vaddr(rx), queue_size,
  549. DMA_BIDIRECTIONAL);
  550. if (dma_mapping_error(peer, ivc->rx.phys))
  551. return -ENOMEM;
  552. ivc->tx.phys = dma_map_single(peer, iosys_map_get_vaddr(tx), queue_size,
  553. DMA_BIDIRECTIONAL);
  554. if (dma_mapping_error(peer, ivc->tx.phys)) {
  555. dma_unmap_single(peer, ivc->rx.phys, queue_size,
  556. DMA_BIDIRECTIONAL);
  557. return -ENOMEM;
  558. }
  559. } else {
  560. ivc->rx.phys = rx_phys;
  561. ivc->tx.phys = tx_phys;
  562. }
  563. iosys_map_copy(&ivc->rx.map, rx);
  564. iosys_map_copy(&ivc->tx.map, tx);
  565. ivc->peer = peer;
  566. ivc->notify = notify;
  567. ivc->notify_data = data;
  568. ivc->frame_size = frame_size;
  569. ivc->num_frames = num_frames;
  570. /*
  571. * These values aren't necessarily correct until the channel has been
  572. * reset.
  573. */
  574. ivc->tx.position = 0;
  575. ivc->rx.position = 0;
  576. return 0;
  577. }
  578. EXPORT_SYMBOL(tegra_ivc_init);
  579. void tegra_ivc_cleanup(struct tegra_ivc *ivc)
  580. {
  581. if (ivc->peer) {
  582. size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
  583. ivc->frame_size);
  584. dma_unmap_single(ivc->peer, ivc->rx.phys, size,
  585. DMA_BIDIRECTIONAL);
  586. dma_unmap_single(ivc->peer, ivc->tx.phys, size,
  587. DMA_BIDIRECTIONAL);
  588. }
  589. }
  590. EXPORT_SYMBOL(tegra_ivc_cleanup);