seq_fifo.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * ALSA sequencer FIFO
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. *
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. #include <sound/core.h>
  22. #include <linux/slab.h>
  23. #include <linux/sched/signal.h>
  24. #include "seq_fifo.h"
  25. #include "seq_lock.h"
  26. /* FIFO */
  27. /* create new fifo */
  28. struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
  29. {
  30. struct snd_seq_fifo *f;
  31. f = kzalloc(sizeof(*f), GFP_KERNEL);
  32. if (!f)
  33. return NULL;
  34. f->pool = snd_seq_pool_new(poolsize);
  35. if (f->pool == NULL) {
  36. kfree(f);
  37. return NULL;
  38. }
  39. if (snd_seq_pool_init(f->pool) < 0) {
  40. snd_seq_pool_delete(&f->pool);
  41. kfree(f);
  42. return NULL;
  43. }
  44. spin_lock_init(&f->lock);
  45. snd_use_lock_init(&f->use_lock);
  46. init_waitqueue_head(&f->input_sleep);
  47. atomic_set(&f->overflow, 0);
  48. f->head = NULL;
  49. f->tail = NULL;
  50. f->cells = 0;
  51. return f;
  52. }
  53. void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
  54. {
  55. struct snd_seq_fifo *f;
  56. if (snd_BUG_ON(!fifo))
  57. return;
  58. f = *fifo;
  59. if (snd_BUG_ON(!f))
  60. return;
  61. *fifo = NULL;
  62. if (f->pool)
  63. snd_seq_pool_mark_closing(f->pool);
  64. snd_seq_fifo_clear(f);
  65. /* wake up clients if any */
  66. if (waitqueue_active(&f->input_sleep))
  67. wake_up(&f->input_sleep);
  68. /* release resources...*/
  69. /*....................*/
  70. if (f->pool) {
  71. snd_seq_pool_done(f->pool);
  72. snd_seq_pool_delete(&f->pool);
  73. }
  74. kfree(f);
  75. }
  76. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
  77. /* clear queue */
  78. void snd_seq_fifo_clear(struct snd_seq_fifo *f)
  79. {
  80. struct snd_seq_event_cell *cell;
  81. unsigned long flags;
  82. /* clear overflow flag */
  83. atomic_set(&f->overflow, 0);
  84. snd_use_lock_sync(&f->use_lock);
  85. spin_lock_irqsave(&f->lock, flags);
  86. /* drain the fifo */
  87. while ((cell = fifo_cell_out(f)) != NULL) {
  88. snd_seq_cell_free(cell);
  89. }
  90. spin_unlock_irqrestore(&f->lock, flags);
  91. }
  92. /* enqueue event to fifo */
  93. int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
  94. struct snd_seq_event *event)
  95. {
  96. struct snd_seq_event_cell *cell;
  97. unsigned long flags;
  98. int err;
  99. if (snd_BUG_ON(!f))
  100. return -EINVAL;
  101. snd_use_lock_use(&f->use_lock);
  102. err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
  103. if (err < 0) {
  104. if ((err == -ENOMEM) || (err == -EAGAIN))
  105. atomic_inc(&f->overflow);
  106. snd_use_lock_free(&f->use_lock);
  107. return err;
  108. }
  109. /* append new cells to fifo */
  110. spin_lock_irqsave(&f->lock, flags);
  111. if (f->tail != NULL)
  112. f->tail->next = cell;
  113. f->tail = cell;
  114. if (f->head == NULL)
  115. f->head = cell;
  116. cell->next = NULL;
  117. f->cells++;
  118. spin_unlock_irqrestore(&f->lock, flags);
  119. /* wakeup client */
  120. if (waitqueue_active(&f->input_sleep))
  121. wake_up(&f->input_sleep);
  122. snd_use_lock_free(&f->use_lock);
  123. return 0; /* success */
  124. }
  125. /* dequeue cell from fifo */
  126. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
  127. {
  128. struct snd_seq_event_cell *cell;
  129. if ((cell = f->head) != NULL) {
  130. f->head = cell->next;
  131. /* reset tail if this was the last element */
  132. if (f->tail == cell)
  133. f->tail = NULL;
  134. cell->next = NULL;
  135. f->cells--;
  136. }
  137. return cell;
  138. }
  139. /* dequeue cell from fifo and copy on user space */
  140. int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
  141. struct snd_seq_event_cell **cellp, int nonblock)
  142. {
  143. struct snd_seq_event_cell *cell;
  144. unsigned long flags;
  145. wait_queue_entry_t wait;
  146. if (snd_BUG_ON(!f))
  147. return -EINVAL;
  148. *cellp = NULL;
  149. init_waitqueue_entry(&wait, current);
  150. spin_lock_irqsave(&f->lock, flags);
  151. while ((cell = fifo_cell_out(f)) == NULL) {
  152. if (nonblock) {
  153. /* non-blocking - return immediately */
  154. spin_unlock_irqrestore(&f->lock, flags);
  155. return -EAGAIN;
  156. }
  157. set_current_state(TASK_INTERRUPTIBLE);
  158. add_wait_queue(&f->input_sleep, &wait);
  159. spin_unlock_irq(&f->lock);
  160. schedule();
  161. spin_lock_irq(&f->lock);
  162. remove_wait_queue(&f->input_sleep, &wait);
  163. if (signal_pending(current)) {
  164. spin_unlock_irqrestore(&f->lock, flags);
  165. return -ERESTARTSYS;
  166. }
  167. }
  168. spin_unlock_irqrestore(&f->lock, flags);
  169. *cellp = cell;
  170. return 0;
  171. }
  172. void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
  173. struct snd_seq_event_cell *cell)
  174. {
  175. unsigned long flags;
  176. if (cell) {
  177. spin_lock_irqsave(&f->lock, flags);
  178. cell->next = f->head;
  179. f->head = cell;
  180. if (!f->tail)
  181. f->tail = cell;
  182. f->cells++;
  183. spin_unlock_irqrestore(&f->lock, flags);
  184. }
  185. }
  186. /* polling; return non-zero if queue is available */
  187. int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
  188. poll_table *wait)
  189. {
  190. poll_wait(file, &f->input_sleep, wait);
  191. return (f->cells > 0);
  192. }
  193. /* change the size of pool; all old events are removed */
  194. int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
  195. {
  196. unsigned long flags;
  197. struct snd_seq_pool *newpool, *oldpool;
  198. struct snd_seq_event_cell *cell, *next, *oldhead;
  199. if (snd_BUG_ON(!f || !f->pool))
  200. return -EINVAL;
  201. /* allocate new pool */
  202. newpool = snd_seq_pool_new(poolsize);
  203. if (newpool == NULL)
  204. return -ENOMEM;
  205. if (snd_seq_pool_init(newpool) < 0) {
  206. snd_seq_pool_delete(&newpool);
  207. return -ENOMEM;
  208. }
  209. spin_lock_irqsave(&f->lock, flags);
  210. /* remember old pool */
  211. oldpool = f->pool;
  212. oldhead = f->head;
  213. /* exchange pools */
  214. f->pool = newpool;
  215. f->head = NULL;
  216. f->tail = NULL;
  217. f->cells = 0;
  218. /* NOTE: overflow flag is not cleared */
  219. spin_unlock_irqrestore(&f->lock, flags);
  220. /* close the old pool and wait until all users are gone */
  221. snd_seq_pool_mark_closing(oldpool);
  222. snd_use_lock_sync(&f->use_lock);
  223. /* release cells in old pool */
  224. for (cell = oldhead; cell; cell = next) {
  225. next = cell->next;
  226. snd_seq_cell_free(cell);
  227. }
  228. snd_seq_pool_delete(&oldpool);
  229. return 0;
  230. }
  231. /* get the number of unused cells safely */
  232. int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
  233. {
  234. unsigned long flags;
  235. int cells;
  236. if (!f)
  237. return 0;
  238. snd_use_lock_use(&f->use_lock);
  239. spin_lock_irqsave(&f->lock, flags);
  240. cells = snd_seq_unused_cells(f->pool);
  241. spin_unlock_irqrestore(&f->lock, flags);
  242. snd_use_lock_free(&f->use_lock);
  243. return cells;
  244. }