ivc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #include <soc/tegra/ivc.h>
  14. #define TEGRA_IVC_ALIGN 64
  15. /*
  16. * IVC channel reset protocol.
  17. *
  18. * Each end uses its tx_channel.state to indicate its synchronization state.
  19. */
  20. enum tegra_ivc_state {
  21. /*
  22. * This value is zero for backwards compatibility with services that
  23. * assume channels to be initially zeroed. Such channels are in an
  24. * initially valid state, but cannot be asynchronously reset, and must
  25. * maintain a valid state at all times.
  26. *
  27. * The transmitting end can enter the established state from the sync or
  28. * ack state when it observes the receiving endpoint in the ack or
  29. * established state, indicating that has cleared the counters in our
  30. * rx_channel.
  31. */
  32. TEGRA_IVC_STATE_ESTABLISHED = 0,
  33. /*
  34. * If an endpoint is observed in the sync state, the remote endpoint is
  35. * allowed to clear the counters it owns asynchronously with respect to
  36. * the current endpoint. Therefore, the current endpoint is no longer
  37. * allowed to communicate.
  38. */
  39. TEGRA_IVC_STATE_SYNC,
  40. /*
  41. * When the transmitting end observes the receiving end in the sync
  42. * state, it can clear the w_count and r_count and transition to the ack
  43. * state. If the remote endpoint observes us in the ack state, it can
  44. * return to the established state once it has cleared its counters.
  45. */
  46. TEGRA_IVC_STATE_ACK
  47. };
  48. /*
  49. * This structure is divided into two-cache aligned parts, the first is only
  50. * written through the tx.channel pointer, while the second is only written
  51. * through the rx.channel pointer. This delineates ownership of the cache
  52. * lines, which is critical to performance and necessary in non-cache coherent
  53. * implementations.
  54. */
  55. struct tegra_ivc_header {
  56. union {
  57. struct {
  58. /* fields owned by the transmitting end */
  59. u32 count;
  60. u32 state;
  61. };
  62. u8 pad[TEGRA_IVC_ALIGN];
  63. } tx;
  64. union {
  65. /* fields owned by the receiving end */
  66. u32 count;
  67. u8 pad[TEGRA_IVC_ALIGN];
  68. } rx;
  69. };
  70. static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
  71. {
  72. if (!ivc->peer)
  73. return;
  74. dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
  75. DMA_FROM_DEVICE);
  76. }
  77. static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
  78. {
  79. if (!ivc->peer)
  80. return;
  81. dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
  82. DMA_TO_DEVICE);
  83. }
  84. static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
  85. struct tegra_ivc_header *header)
  86. {
  87. /*
  88. * This function performs multiple checks on the same values with
  89. * security implications, so create snapshots with READ_ONCE() to
  90. * ensure that these checks use the same values.
  91. */
  92. u32 tx = READ_ONCE(header->tx.count);
  93. u32 rx = READ_ONCE(header->rx.count);
  94. /*
  95. * Perform an over-full check to prevent denial of service attacks
  96. * where a server could be easily fooled into believing that there's
  97. * an extremely large number of frames ready, since receivers are not
  98. * expected to check for full or over-full conditions.
  99. *
  100. * Although the channel isn't empty, this is an invalid case caused by
  101. * a potentially malicious peer, so returning empty is safer, because
  102. * it gives the impression that the channel has gone silent.
  103. */
  104. if (tx - rx > ivc->num_frames)
  105. return true;
  106. return tx == rx;
  107. }
  108. static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
  109. struct tegra_ivc_header *header)
  110. {
  111. u32 tx = READ_ONCE(header->tx.count);
  112. u32 rx = READ_ONCE(header->rx.count);
  113. /*
  114. * Invalid cases where the counters indicate that the queue is over
  115. * capacity also appear full.
  116. */
  117. return tx - rx >= ivc->num_frames;
  118. }
  119. static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
  120. struct tegra_ivc_header *header)
  121. {
  122. u32 tx = READ_ONCE(header->tx.count);
  123. u32 rx = READ_ONCE(header->rx.count);
  124. /*
  125. * This function isn't expected to be used in scenarios where an
  126. * over-full situation can lead to denial of service attacks. See the
  127. * comment in tegra_ivc_empty() for an explanation about special
  128. * over-full considerations.
  129. */
  130. return tx - rx;
  131. }
  132. static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
  133. {
  134. WRITE_ONCE(ivc->tx.channel->tx.count,
  135. READ_ONCE(ivc->tx.channel->tx.count) + 1);
  136. if (ivc->tx.position == ivc->num_frames - 1)
  137. ivc->tx.position = 0;
  138. else
  139. ivc->tx.position++;
  140. }
  141. static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
  142. {
  143. WRITE_ONCE(ivc->rx.channel->rx.count,
  144. READ_ONCE(ivc->rx.channel->rx.count) + 1);
  145. if (ivc->rx.position == ivc->num_frames - 1)
  146. ivc->rx.position = 0;
  147. else
  148. ivc->rx.position++;
  149. }
  150. static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
  151. {
  152. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  153. /*
  154. * tx.channel->state is set locally, so it is not synchronized with
  155. * state from the remote peer. The remote peer cannot reset its
  156. * transmit counters until we've acknowledged its synchronization
  157. * request, so no additional synchronization is required because an
  158. * asynchronous transition of rx.channel->state to
  159. * TEGRA_IVC_STATE_ACK is not allowed.
  160. */
  161. if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
  162. return -ECONNRESET;
  163. /*
  164. * Avoid unnecessary invalidations when performing repeated accesses
  165. * to an IVC channel by checking the old queue pointers first.
  166. *
  167. * Synchronization is only necessary when these pointers indicate
  168. * empty or full.
  169. */
  170. if (!tegra_ivc_empty(ivc, ivc->rx.channel))
  171. return 0;
  172. tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
  173. if (tegra_ivc_empty(ivc, ivc->rx.channel))
  174. return -ENOSPC;
  175. return 0;
  176. }
  177. static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
  178. {
  179. unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
  180. if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
  181. return -ECONNRESET;
  182. if (!tegra_ivc_full(ivc, ivc->tx.channel))
  183. return 0;
  184. tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
  185. if (tegra_ivc_full(ivc, ivc->tx.channel))
  186. return -ENOSPC;
  187. return 0;
  188. }
  189. static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
  190. struct tegra_ivc_header *header,
  191. unsigned int frame)
  192. {
  193. if (WARN_ON(frame >= ivc->num_frames))
  194. return ERR_PTR(-EINVAL);
  195. return (void *)(header + 1) + ivc->frame_size * frame;
  196. }
  197. static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
  198. dma_addr_t phys,
  199. unsigned int frame)
  200. {
  201. unsigned long offset;
  202. offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
  203. return phys + offset;
  204. }
  205. static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
  206. dma_addr_t phys,
  207. unsigned int frame,
  208. unsigned int offset,
  209. size_t size)
  210. {
  211. if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
  212. return;
  213. phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
  214. dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
  215. }
  216. static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
  217. dma_addr_t phys,
  218. unsigned int frame,
  219. unsigned int offset,
  220. size_t size)
  221. {
  222. if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
  223. return;
  224. phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
  225. dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
  226. }
  227. /* directly peek at the next frame rx'ed */
  228. void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
  229. {
  230. int err;
  231. if (WARN_ON(ivc == NULL))
  232. return ERR_PTR(-EINVAL);
  233. err = tegra_ivc_check_read(ivc);
  234. if (err < 0)
  235. return ERR_PTR(err);
  236. /*
  237. * Order observation of ivc->rx.position potentially indicating new
  238. * data before data read.
  239. */
  240. smp_rmb();
  241. tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
  242. ivc->frame_size);
  243. return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
  244. }
  245. EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
  246. int tegra_ivc_read_advance(struct tegra_ivc *ivc)
  247. {
  248. unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
  249. unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
  250. int err;
  251. /*
  252. * No read barriers or synchronization here: the caller is expected to
  253. * have already observed the channel non-empty. This check is just to
  254. * catch programming errors.
  255. */
  256. err = tegra_ivc_check_read(ivc);
  257. if (err < 0)
  258. return err;
  259. tegra_ivc_advance_rx(ivc);
  260. tegra_ivc_flush(ivc, ivc->rx.phys + rx);
  261. /*
  262. * Ensure our write to ivc->rx.position occurs before our read from
  263. * ivc->tx.position.
  264. */
  265. smp_mb();
  266. /*
  267. * Notify only upon transition from full to non-full. The available
  268. * count can only asynchronously increase, so the worst possible
  269. * side-effect will be a spurious notification.
  270. */
  271. tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
  272. if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
  273. ivc->notify(ivc, ivc->notify_data);
  274. return 0;
  275. }
  276. EXPORT_SYMBOL(tegra_ivc_read_advance);
  277. /* directly poke at the next frame to be tx'ed */
  278. void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
  279. {
  280. int err;
  281. err = tegra_ivc_check_write(ivc);
  282. if (err < 0)
  283. return ERR_PTR(err);
  284. return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
  285. }
  286. EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
  287. /* advance the tx buffer */
  288. int tegra_ivc_write_advance(struct tegra_ivc *ivc)
  289. {
  290. unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
  291. unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
  292. int err;
  293. err = tegra_ivc_check_write(ivc);
  294. if (err < 0)
  295. return err;
  296. tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
  297. ivc->frame_size);
  298. /*
  299. * Order any possible stores to the frame before update of
  300. * ivc->tx.position.
  301. */
  302. smp_wmb();
  303. tegra_ivc_advance_tx(ivc);
  304. tegra_ivc_flush(ivc, ivc->tx.phys + tx);
  305. /*
  306. * Ensure our write to ivc->tx.position occurs before our read from
  307. * ivc->rx.position.
  308. */
  309. smp_mb();
  310. /*
  311. * Notify only upon transition from empty to non-empty. The available
  312. * count can only asynchronously decrease, so the worst possible
  313. * side-effect will be a spurious notification.
  314. */
  315. tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
  316. if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
  317. ivc->notify(ivc, ivc->notify_data);
  318. return 0;
  319. }
  320. EXPORT_SYMBOL(tegra_ivc_write_advance);
  321. void tegra_ivc_reset(struct tegra_ivc *ivc)
  322. {
  323. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  324. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
  325. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  326. ivc->notify(ivc, ivc->notify_data);
  327. }
  328. EXPORT_SYMBOL(tegra_ivc_reset);
  329. /*
  330. * =======================================================
  331. * IVC State Transition Table - see tegra_ivc_notified()
  332. * =======================================================
  333. *
  334. * local remote action
  335. * ----- ------ -----------------------------------
  336. * SYNC EST <none>
  337. * SYNC ACK reset counters; move to EST; notify
  338. * SYNC SYNC reset counters; move to ACK; notify
  339. * ACK EST move to EST; notify
  340. * ACK ACK move to EST; notify
  341. * ACK SYNC reset counters; move to ACK; notify
  342. * EST EST <none>
  343. * EST ACK <none>
  344. * EST SYNC reset counters; move to ACK; notify
  345. *
  346. * ===============================================================
  347. */
  348. int tegra_ivc_notified(struct tegra_ivc *ivc)
  349. {
  350. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  351. enum tegra_ivc_state state;
  352. /* Copy the receiver's state out of shared memory. */
  353. tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
  354. state = READ_ONCE(ivc->rx.channel->tx.state);
  355. if (state == TEGRA_IVC_STATE_SYNC) {
  356. offset = offsetof(struct tegra_ivc_header, tx.count);
  357. /*
  358. * Order observation of TEGRA_IVC_STATE_SYNC before stores
  359. * clearing tx.channel.
  360. */
  361. smp_rmb();
  362. /*
  363. * Reset tx.channel counters. The remote end is in the SYNC
  364. * state and won't make progress until we change our state,
  365. * so the counters are not in use at this time.
  366. */
  367. ivc->tx.channel->tx.count = 0;
  368. ivc->rx.channel->rx.count = 0;
  369. ivc->tx.position = 0;
  370. ivc->rx.position = 0;
  371. /*
  372. * Ensure that counters appear cleared before new state can be
  373. * observed.
  374. */
  375. smp_wmb();
  376. /*
  377. * Move to ACK state. We have just cleared our counters, so it
  378. * is now safe for the remote end to start using these values.
  379. */
  380. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
  381. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  382. /*
  383. * Notify remote end to observe state transition.
  384. */
  385. ivc->notify(ivc, ivc->notify_data);
  386. } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
  387. state == TEGRA_IVC_STATE_ACK) {
  388. offset = offsetof(struct tegra_ivc_header, tx.count);
  389. /*
  390. * Order observation of ivc_state_sync before stores clearing
  391. * tx_channel.
  392. */
  393. smp_rmb();
  394. /*
  395. * Reset tx.channel counters. The remote end is in the ACK
  396. * state and won't make progress until we change our state,
  397. * so the counters are not in use at this time.
  398. */
  399. ivc->tx.channel->tx.count = 0;
  400. ivc->rx.channel->rx.count = 0;
  401. ivc->tx.position = 0;
  402. ivc->rx.position = 0;
  403. /*
  404. * Ensure that counters appear cleared before new state can be
  405. * observed.
  406. */
  407. smp_wmb();
  408. /*
  409. * Move to ESTABLISHED state. We know that the remote end has
  410. * already cleared its counters, so it is safe to start
  411. * writing/reading on this channel.
  412. */
  413. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
  414. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  415. /*
  416. * Notify remote end to observe state transition.
  417. */
  418. ivc->notify(ivc, ivc->notify_data);
  419. } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
  420. offset = offsetof(struct tegra_ivc_header, tx.count);
  421. /*
  422. * At this point, we have observed the peer to be in either
  423. * the ACK or ESTABLISHED state. Next, order observation of
  424. * peer state before storing to tx.channel.
  425. */
  426. smp_rmb();
  427. /*
  428. * Move to ESTABLISHED state. We know that we have previously
  429. * cleared our counters, and we know that the remote end has
  430. * cleared its counters, so it is safe to start writing/reading
  431. * on this channel.
  432. */
  433. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
  434. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  435. /*
  436. * Notify remote end to observe state transition.
  437. */
  438. ivc->notify(ivc, ivc->notify_data);
  439. } else {
  440. /*
  441. * There is no need to handle any further action. Either the
  442. * channel is already fully established, or we are waiting for
  443. * the remote end to catch up with our current state. Refer
  444. * to the diagram in "IVC State Transition Table" above.
  445. */
  446. }
  447. if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
  448. return -EAGAIN;
  449. return 0;
  450. }
  451. EXPORT_SYMBOL(tegra_ivc_notified);
  452. size_t tegra_ivc_align(size_t size)
  453. {
  454. return ALIGN(size, TEGRA_IVC_ALIGN);
  455. }
  456. EXPORT_SYMBOL(tegra_ivc_align);
  457. unsigned tegra_ivc_total_queue_size(unsigned queue_size)
  458. {
  459. if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
  460. pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
  461. __func__, queue_size, TEGRA_IVC_ALIGN);
  462. return 0;
  463. }
  464. return queue_size + sizeof(struct tegra_ivc_header);
  465. }
  466. EXPORT_SYMBOL(tegra_ivc_total_queue_size);
  467. static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
  468. unsigned int num_frames, size_t frame_size)
  469. {
  470. BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
  471. TEGRA_IVC_ALIGN));
  472. BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
  473. TEGRA_IVC_ALIGN));
  474. BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
  475. TEGRA_IVC_ALIGN));
  476. if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
  477. pr_err("num_frames * frame_size overflows\n");
  478. return -EINVAL;
  479. }
  480. if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
  481. pr_err("frame size not adequately aligned: %zu\n", frame_size);
  482. return -EINVAL;
  483. }
  484. /*
  485. * The headers must at least be aligned enough for counters
  486. * to be accessed atomically.
  487. */
  488. if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
  489. pr_err("IVC channel start not aligned: %#lx\n", rx);
  490. return -EINVAL;
  491. }
  492. if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
  493. pr_err("IVC channel start not aligned: %#lx\n", tx);
  494. return -EINVAL;
  495. }
  496. if (rx < tx) {
  497. if (rx + frame_size * num_frames > tx) {
  498. pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
  499. rx, frame_size * num_frames, tx);
  500. return -EINVAL;
  501. }
  502. } else {
  503. if (tx + frame_size * num_frames > rx) {
  504. pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
  505. tx, frame_size * num_frames, rx);
  506. return -EINVAL;
  507. }
  508. }
  509. return 0;
  510. }
  511. int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
  512. dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
  513. unsigned int num_frames, size_t frame_size,
  514. void (*notify)(struct tegra_ivc *ivc, void *data),
  515. void *data)
  516. {
  517. size_t queue_size;
  518. int err;
  519. if (WARN_ON(!ivc || !notify))
  520. return -EINVAL;
  521. /*
  522. * All sizes that can be returned by communication functions should
  523. * fit in an int.
  524. */
  525. if (frame_size > INT_MAX)
  526. return -E2BIG;
  527. err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
  528. num_frames, frame_size);
  529. if (err < 0)
  530. return err;
  531. queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
  532. if (peer) {
  533. ivc->rx.phys = dma_map_single(peer, rx, queue_size,
  534. DMA_BIDIRECTIONAL);
  535. if (dma_mapping_error(peer, ivc->rx.phys))
  536. return -ENOMEM;
  537. ivc->tx.phys = dma_map_single(peer, tx, queue_size,
  538. DMA_BIDIRECTIONAL);
  539. if (dma_mapping_error(peer, ivc->tx.phys)) {
  540. dma_unmap_single(peer, ivc->rx.phys, queue_size,
  541. DMA_BIDIRECTIONAL);
  542. return -ENOMEM;
  543. }
  544. } else {
  545. ivc->rx.phys = rx_phys;
  546. ivc->tx.phys = tx_phys;
  547. }
  548. ivc->rx.channel = rx;
  549. ivc->tx.channel = tx;
  550. ivc->peer = peer;
  551. ivc->notify = notify;
  552. ivc->notify_data = data;
  553. ivc->frame_size = frame_size;
  554. ivc->num_frames = num_frames;
  555. /*
  556. * These values aren't necessarily correct until the channel has been
  557. * reset.
  558. */
  559. ivc->tx.position = 0;
  560. ivc->rx.position = 0;
  561. return 0;
  562. }
  563. EXPORT_SYMBOL(tegra_ivc_init);
  564. void tegra_ivc_cleanup(struct tegra_ivc *ivc)
  565. {
  566. if (ivc->peer) {
  567. size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
  568. ivc->frame_size);
  569. dma_unmap_single(ivc->peer, ivc->rx.phys, size,
  570. DMA_BIDIRECTIONAL);
  571. dma_unmap_single(ivc->peer, ivc->tx.phys, size,
  572. DMA_BIDIRECTIONAL);
  573. }
  574. }
  575. EXPORT_SYMBOL(tegra_ivc_cleanup);