seq_clientmgr.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540
  1. /*
  2. * ALSA sequencer Client Manager
  3. * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@perex.cz>
  5. * Takashi Iwai <tiwai@suse.de>
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/init.h>
  24. #include <linux/export.h>
  25. #include <linux/slab.h>
  26. #include <sound/core.h>
  27. #include <sound/minors.h>
  28. #include <linux/kmod.h>
  29. #include <sound/seq_kernel.h>
  30. #include "seq_clientmgr.h"
  31. #include "seq_memory.h"
  32. #include "seq_queue.h"
  33. #include "seq_timer.h"
  34. #include "seq_info.h"
  35. #include "seq_system.h"
  36. #include <sound/seq_device.h>
  37. #ifdef CONFIG_COMPAT
  38. #include <linux/compat.h>
  39. #endif
  40. /* Client Manager
  41. * this module handles the connections of userland and kernel clients
  42. *
  43. */
  44. /*
  45. * There are four ranges of client numbers (last two shared):
  46. * 0..15: global clients
  47. * 16..127: statically allocated client numbers for cards 0..27
  48. * 128..191: dynamically allocated client numbers for cards 28..31
  49. * 128..191: dynamically allocated client numbers for applications
  50. */
  51. /* number of kernel non-card clients */
  52. #define SNDRV_SEQ_GLOBAL_CLIENTS 16
  53. /* clients per cards, for static clients */
  54. #define SNDRV_SEQ_CLIENTS_PER_CARD 4
  55. /* dynamically allocated client numbers (both kernel drivers and user space) */
  56. #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
  57. #define SNDRV_SEQ_LFLG_INPUT 0x0001
  58. #define SNDRV_SEQ_LFLG_OUTPUT 0x0002
  59. #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
  60. static DEFINE_SPINLOCK(clients_lock);
  61. static DEFINE_MUTEX(register_mutex);
  62. /*
  63. * client table
  64. */
  65. static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
  66. static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
  67. static struct snd_seq_usage client_usage;
  68. /*
  69. * prototypes
  70. */
  71. static int bounce_error_event(struct snd_seq_client *client,
  72. struct snd_seq_event *event,
  73. int err, int atomic, int hop);
  74. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  75. struct snd_seq_event *event,
  76. int filter, int atomic, int hop);
  77. /*
  78. */
  79. static inline unsigned short snd_seq_file_flags(struct file *file)
  80. {
  81. switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
  82. case FMODE_WRITE:
  83. return SNDRV_SEQ_LFLG_OUTPUT;
  84. case FMODE_READ:
  85. return SNDRV_SEQ_LFLG_INPUT;
  86. default:
  87. return SNDRV_SEQ_LFLG_OPEN;
  88. }
  89. }
  90. static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
  91. {
  92. return snd_seq_total_cells(client->pool) > 0;
  93. }
  94. /* return pointer to client structure for specified id */
  95. static struct snd_seq_client *clientptr(int clientid)
  96. {
  97. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  98. pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
  99. clientid);
  100. return NULL;
  101. }
  102. return clienttab[clientid];
  103. }
  104. struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
  105. {
  106. unsigned long flags;
  107. struct snd_seq_client *client;
  108. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  109. pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
  110. clientid);
  111. return NULL;
  112. }
  113. spin_lock_irqsave(&clients_lock, flags);
  114. client = clientptr(clientid);
  115. if (client)
  116. goto __lock;
  117. if (clienttablock[clientid]) {
  118. spin_unlock_irqrestore(&clients_lock, flags);
  119. return NULL;
  120. }
  121. spin_unlock_irqrestore(&clients_lock, flags);
  122. #ifdef CONFIG_MODULES
  123. if (!in_interrupt()) {
  124. static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
  125. static char card_requested[SNDRV_CARDS];
  126. if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
  127. int idx;
  128. if (!client_requested[clientid]) {
  129. client_requested[clientid] = 1;
  130. for (idx = 0; idx < 15; idx++) {
  131. if (seq_client_load[idx] < 0)
  132. break;
  133. if (seq_client_load[idx] == clientid) {
  134. request_module("snd-seq-client-%i",
  135. clientid);
  136. break;
  137. }
  138. }
  139. }
  140. } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
  141. int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
  142. SNDRV_SEQ_CLIENTS_PER_CARD;
  143. if (card < snd_ecards_limit) {
  144. if (! card_requested[card]) {
  145. card_requested[card] = 1;
  146. snd_request_card(card);
  147. }
  148. snd_seq_device_load_drivers();
  149. }
  150. }
  151. spin_lock_irqsave(&clients_lock, flags);
  152. client = clientptr(clientid);
  153. if (client)
  154. goto __lock;
  155. spin_unlock_irqrestore(&clients_lock, flags);
  156. }
  157. #endif
  158. return NULL;
  159. __lock:
  160. snd_use_lock_use(&client->use_lock);
  161. spin_unlock_irqrestore(&clients_lock, flags);
  162. return client;
  163. }
  164. static void usage_alloc(struct snd_seq_usage *res, int num)
  165. {
  166. res->cur += num;
  167. if (res->cur > res->peak)
  168. res->peak = res->cur;
  169. }
  170. static void usage_free(struct snd_seq_usage *res, int num)
  171. {
  172. res->cur -= num;
  173. }
  174. /* initialise data structures */
  175. int __init client_init_data(void)
  176. {
  177. /* zap out the client table */
  178. memset(&clienttablock, 0, sizeof(clienttablock));
  179. memset(&clienttab, 0, sizeof(clienttab));
  180. return 0;
  181. }
  182. static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
  183. {
  184. unsigned long flags;
  185. int c;
  186. struct snd_seq_client *client;
  187. /* init client data */
  188. client = kzalloc(sizeof(*client), GFP_KERNEL);
  189. if (client == NULL)
  190. return NULL;
  191. client->pool = snd_seq_pool_new(poolsize);
  192. if (client->pool == NULL) {
  193. kfree(client);
  194. return NULL;
  195. }
  196. client->type = NO_CLIENT;
  197. snd_use_lock_init(&client->use_lock);
  198. rwlock_init(&client->ports_lock);
  199. mutex_init(&client->ports_mutex);
  200. INIT_LIST_HEAD(&client->ports_list_head);
  201. mutex_init(&client->ioctl_mutex);
  202. /* find free slot in the client table */
  203. spin_lock_irqsave(&clients_lock, flags);
  204. if (client_index < 0) {
  205. for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
  206. c < SNDRV_SEQ_MAX_CLIENTS;
  207. c++) {
  208. if (clienttab[c] || clienttablock[c])
  209. continue;
  210. clienttab[client->number = c] = client;
  211. spin_unlock_irqrestore(&clients_lock, flags);
  212. return client;
  213. }
  214. } else {
  215. if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
  216. clienttab[client->number = client_index] = client;
  217. spin_unlock_irqrestore(&clients_lock, flags);
  218. return client;
  219. }
  220. }
  221. spin_unlock_irqrestore(&clients_lock, flags);
  222. snd_seq_pool_delete(&client->pool);
  223. kfree(client);
  224. return NULL; /* no free slot found or busy, return failure code */
  225. }
  226. static int seq_free_client1(struct snd_seq_client *client)
  227. {
  228. unsigned long flags;
  229. if (!client)
  230. return 0;
  231. spin_lock_irqsave(&clients_lock, flags);
  232. clienttablock[client->number] = 1;
  233. clienttab[client->number] = NULL;
  234. spin_unlock_irqrestore(&clients_lock, flags);
  235. snd_seq_delete_all_ports(client);
  236. snd_seq_queue_client_leave(client->number);
  237. snd_use_lock_sync(&client->use_lock);
  238. snd_seq_queue_client_termination(client->number);
  239. if (client->pool)
  240. snd_seq_pool_delete(&client->pool);
  241. spin_lock_irqsave(&clients_lock, flags);
  242. clienttablock[client->number] = 0;
  243. spin_unlock_irqrestore(&clients_lock, flags);
  244. return 0;
  245. }
  246. static void seq_free_client(struct snd_seq_client * client)
  247. {
  248. mutex_lock(&register_mutex);
  249. switch (client->type) {
  250. case NO_CLIENT:
  251. pr_warn("ALSA: seq: Trying to free unused client %d\n",
  252. client->number);
  253. break;
  254. case USER_CLIENT:
  255. case KERNEL_CLIENT:
  256. seq_free_client1(client);
  257. usage_free(&client_usage, 1);
  258. break;
  259. default:
  260. pr_err("ALSA: seq: Trying to free client %d with undefined type = %d\n",
  261. client->number, client->type);
  262. }
  263. mutex_unlock(&register_mutex);
  264. snd_seq_system_client_ev_client_exit(client->number);
  265. }
  266. /* -------------------------------------------------------- */
  267. /* create a user client */
  268. static int snd_seq_open(struct inode *inode, struct file *file)
  269. {
  270. int c, mode; /* client id */
  271. struct snd_seq_client *client;
  272. struct snd_seq_user_client *user;
  273. int err;
  274. err = nonseekable_open(inode, file);
  275. if (err < 0)
  276. return err;
  277. mutex_lock(&register_mutex);
  278. client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
  279. if (!client) {
  280. mutex_unlock(&register_mutex);
  281. return -ENOMEM; /* failure code */
  282. }
  283. mode = snd_seq_file_flags(file);
  284. if (mode & SNDRV_SEQ_LFLG_INPUT)
  285. client->accept_input = 1;
  286. if (mode & SNDRV_SEQ_LFLG_OUTPUT)
  287. client->accept_output = 1;
  288. user = &client->data.user;
  289. user->fifo = NULL;
  290. user->fifo_pool_size = 0;
  291. if (mode & SNDRV_SEQ_LFLG_INPUT) {
  292. user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
  293. user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
  294. if (user->fifo == NULL) {
  295. seq_free_client1(client);
  296. kfree(client);
  297. mutex_unlock(&register_mutex);
  298. return -ENOMEM;
  299. }
  300. }
  301. usage_alloc(&client_usage, 1);
  302. client->type = USER_CLIENT;
  303. mutex_unlock(&register_mutex);
  304. c = client->number;
  305. file->private_data = client;
  306. /* fill client data */
  307. user->file = file;
  308. sprintf(client->name, "Client-%d", c);
  309. client->data.user.owner = get_pid(task_pid(current));
  310. /* make others aware this new client */
  311. snd_seq_system_client_ev_client_start(c);
  312. return 0;
  313. }
  314. /* delete a user client */
  315. static int snd_seq_release(struct inode *inode, struct file *file)
  316. {
  317. struct snd_seq_client *client = file->private_data;
  318. if (client) {
  319. seq_free_client(client);
  320. if (client->data.user.fifo)
  321. snd_seq_fifo_delete(&client->data.user.fifo);
  322. put_pid(client->data.user.owner);
  323. kfree(client);
  324. }
  325. return 0;
  326. }
  327. /* handle client read() */
  328. /* possible error values:
  329. * -ENXIO invalid client or file open mode
  330. * -ENOSPC FIFO overflow (the flag is cleared after this error report)
  331. * -EINVAL no enough user-space buffer to write the whole event
  332. * -EFAULT seg. fault during copy to user space
  333. */
  334. static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
  335. loff_t *offset)
  336. {
  337. struct snd_seq_client *client = file->private_data;
  338. struct snd_seq_fifo *fifo;
  339. int err;
  340. long result = 0;
  341. struct snd_seq_event_cell *cell;
  342. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
  343. return -ENXIO;
  344. if (!access_ok(VERIFY_WRITE, buf, count))
  345. return -EFAULT;
  346. /* check client structures are in place */
  347. if (snd_BUG_ON(!client))
  348. return -ENXIO;
  349. if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
  350. return -ENXIO;
  351. if (atomic_read(&fifo->overflow) > 0) {
  352. /* buffer overflow is detected */
  353. snd_seq_fifo_clear(fifo);
  354. /* return error code */
  355. return -ENOSPC;
  356. }
  357. cell = NULL;
  358. err = 0;
  359. snd_seq_fifo_lock(fifo);
  360. /* while data available in queue */
  361. while (count >= sizeof(struct snd_seq_event)) {
  362. int nonblock;
  363. nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
  364. if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
  365. break;
  366. }
  367. if (snd_seq_ev_is_variable(&cell->event)) {
  368. struct snd_seq_event tmpev;
  369. tmpev = cell->event;
  370. tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
  371. if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) {
  372. err = -EFAULT;
  373. break;
  374. }
  375. count -= sizeof(struct snd_seq_event);
  376. buf += sizeof(struct snd_seq_event);
  377. err = snd_seq_expand_var_event(&cell->event, count,
  378. (char __force *)buf, 0,
  379. sizeof(struct snd_seq_event));
  380. if (err < 0)
  381. break;
  382. result += err;
  383. count -= err;
  384. buf += err;
  385. } else {
  386. if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) {
  387. err = -EFAULT;
  388. break;
  389. }
  390. count -= sizeof(struct snd_seq_event);
  391. buf += sizeof(struct snd_seq_event);
  392. }
  393. snd_seq_cell_free(cell);
  394. cell = NULL; /* to be sure */
  395. result += sizeof(struct snd_seq_event);
  396. }
  397. if (err < 0) {
  398. if (cell)
  399. snd_seq_fifo_cell_putback(fifo, cell);
  400. if (err == -EAGAIN && result > 0)
  401. err = 0;
  402. }
  403. snd_seq_fifo_unlock(fifo);
  404. return (err < 0) ? err : result;
  405. }
  406. /*
  407. * check access permission to the port
  408. */
  409. static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
  410. {
  411. if ((port->capability & flags) != flags)
  412. return 0;
  413. return flags;
  414. }
  415. /*
  416. * check if the destination client is available, and return the pointer
  417. * if filter is non-zero, client filter bitmap is tested.
  418. */
  419. static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
  420. int filter)
  421. {
  422. struct snd_seq_client *dest;
  423. dest = snd_seq_client_use_ptr(event->dest.client);
  424. if (dest == NULL)
  425. return NULL;
  426. if (! dest->accept_input)
  427. goto __not_avail;
  428. if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
  429. ! test_bit(event->type, dest->event_filter))
  430. goto __not_avail;
  431. if (filter && !(dest->filter & filter))
  432. goto __not_avail;
  433. return dest; /* ok - accessible */
  434. __not_avail:
  435. snd_seq_client_unlock(dest);
  436. return NULL;
  437. }
  438. /*
  439. * Return the error event.
  440. *
  441. * If the receiver client is a user client, the original event is
  442. * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
  443. * the original event is also variable length, the external data is
  444. * copied after the event record.
  445. * If the receiver client is a kernel client, the original event is
  446. * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
  447. * kmalloc.
  448. */
  449. static int bounce_error_event(struct snd_seq_client *client,
  450. struct snd_seq_event *event,
  451. int err, int atomic, int hop)
  452. {
  453. struct snd_seq_event bounce_ev;
  454. int result;
  455. if (client == NULL ||
  456. ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
  457. ! client->accept_input)
  458. return 0; /* ignored */
  459. /* set up quoted error */
  460. memset(&bounce_ev, 0, sizeof(bounce_ev));
  461. bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
  462. bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
  463. bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
  464. bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
  465. bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
  466. bounce_ev.dest.client = client->number;
  467. bounce_ev.dest.port = event->source.port;
  468. bounce_ev.data.quote.origin = event->dest;
  469. bounce_ev.data.quote.event = event;
  470. bounce_ev.data.quote.value = -err; /* use positive value */
  471. result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
  472. if (result < 0) {
  473. client->event_lost++;
  474. return result;
  475. }
  476. return result;
  477. }
  478. /*
  479. * rewrite the time-stamp of the event record with the curren time
  480. * of the given queue.
  481. * return non-zero if updated.
  482. */
  483. static int update_timestamp_of_queue(struct snd_seq_event *event,
  484. int queue, int real_time)
  485. {
  486. struct snd_seq_queue *q;
  487. q = queueptr(queue);
  488. if (! q)
  489. return 0;
  490. event->queue = queue;
  491. event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
  492. if (real_time) {
  493. event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
  494. event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
  495. } else {
  496. event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
  497. event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
  498. }
  499. queuefree(q);
  500. return 1;
  501. }
  502. /*
  503. * deliver an event to the specified destination.
  504. * if filter is non-zero, client filter bitmap is tested.
  505. *
  506. * RETURN VALUE: 0 : if succeeded
  507. * <0 : error
  508. */
  509. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  510. struct snd_seq_event *event,
  511. int filter, int atomic, int hop)
  512. {
  513. struct snd_seq_client *dest = NULL;
  514. struct snd_seq_client_port *dest_port = NULL;
  515. int result = -ENOENT;
  516. int direct;
  517. direct = snd_seq_ev_is_direct(event);
  518. dest = get_event_dest_client(event, filter);
  519. if (dest == NULL)
  520. goto __skip;
  521. dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
  522. if (dest_port == NULL)
  523. goto __skip;
  524. /* check permission */
  525. if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
  526. result = -EPERM;
  527. goto __skip;
  528. }
  529. if (dest_port->timestamping)
  530. update_timestamp_of_queue(event, dest_port->time_queue,
  531. dest_port->time_real);
  532. switch (dest->type) {
  533. case USER_CLIENT:
  534. if (dest->data.user.fifo)
  535. result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
  536. break;
  537. case KERNEL_CLIENT:
  538. if (dest_port->event_input == NULL)
  539. break;
  540. result = dest_port->event_input(event, direct,
  541. dest_port->private_data,
  542. atomic, hop);
  543. break;
  544. default:
  545. break;
  546. }
  547. __skip:
  548. if (dest_port)
  549. snd_seq_port_unlock(dest_port);
  550. if (dest)
  551. snd_seq_client_unlock(dest);
  552. if (result < 0 && !direct) {
  553. result = bounce_error_event(client, event, result, atomic, hop);
  554. }
  555. return result;
  556. }
  557. /*
  558. * send the event to all subscribers:
  559. */
  560. static int deliver_to_subscribers(struct snd_seq_client *client,
  561. struct snd_seq_event *event,
  562. int atomic, int hop)
  563. {
  564. struct snd_seq_subscribers *subs;
  565. int err, result = 0, num_ev = 0;
  566. struct snd_seq_event event_saved;
  567. struct snd_seq_client_port *src_port;
  568. struct snd_seq_port_subs_info *grp;
  569. src_port = snd_seq_port_use_ptr(client, event->source.port);
  570. if (src_port == NULL)
  571. return -EINVAL; /* invalid source port */
  572. /* save original event record */
  573. event_saved = *event;
  574. grp = &src_port->c_src;
  575. /* lock list */
  576. if (atomic)
  577. read_lock(&grp->list_lock);
  578. else
  579. down_read_nested(&grp->list_mutex, hop);
  580. list_for_each_entry(subs, &grp->list_head, src_list) {
  581. /* both ports ready? */
  582. if (atomic_read(&subs->ref_count) != 2)
  583. continue;
  584. event->dest = subs->info.dest;
  585. if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  586. /* convert time according to flag with subscription */
  587. update_timestamp_of_queue(event, subs->info.queue,
  588. subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
  589. err = snd_seq_deliver_single_event(client, event,
  590. 0, atomic, hop);
  591. if (err < 0) {
  592. /* save first error that occurs and continue */
  593. if (!result)
  594. result = err;
  595. continue;
  596. }
  597. num_ev++;
  598. /* restore original event record */
  599. *event = event_saved;
  600. }
  601. if (atomic)
  602. read_unlock(&grp->list_lock);
  603. else
  604. up_read(&grp->list_mutex);
  605. *event = event_saved; /* restore */
  606. snd_seq_port_unlock(src_port);
  607. return (result < 0) ? result : num_ev;
  608. }
  609. #ifdef SUPPORT_BROADCAST
  610. /*
  611. * broadcast to all ports:
  612. */
  613. static int port_broadcast_event(struct snd_seq_client *client,
  614. struct snd_seq_event *event,
  615. int atomic, int hop)
  616. {
  617. int num_ev = 0, err, result = 0;
  618. struct snd_seq_client *dest_client;
  619. struct snd_seq_client_port *port;
  620. dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST);
  621. if (dest_client == NULL)
  622. return 0; /* no matching destination */
  623. read_lock(&dest_client->ports_lock);
  624. list_for_each_entry(port, &dest_client->ports_list_head, list) {
  625. event->dest.port = port->addr.port;
  626. /* pass NULL as source client to avoid error bounce */
  627. err = snd_seq_deliver_single_event(NULL, event,
  628. SNDRV_SEQ_FILTER_BROADCAST,
  629. atomic, hop);
  630. if (err < 0) {
  631. /* save first error that occurs and continue */
  632. if (!result)
  633. result = err;
  634. continue;
  635. }
  636. num_ev++;
  637. }
  638. read_unlock(&dest_client->ports_lock);
  639. snd_seq_client_unlock(dest_client);
  640. event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
  641. return (result < 0) ? result : num_ev;
  642. }
  643. /*
  644. * send the event to all clients:
  645. * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
  646. */
  647. static int broadcast_event(struct snd_seq_client *client,
  648. struct snd_seq_event *event, int atomic, int hop)
  649. {
  650. int err, result = 0, num_ev = 0;
  651. int dest;
  652. struct snd_seq_addr addr;
  653. addr = event->dest; /* save */
  654. for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) {
  655. /* don't send to itself */
  656. if (dest == client->number)
  657. continue;
  658. event->dest.client = dest;
  659. event->dest.port = addr.port;
  660. if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  661. err = port_broadcast_event(client, event, atomic, hop);
  662. else
  663. /* pass NULL as source client to avoid error bounce */
  664. err = snd_seq_deliver_single_event(NULL, event,
  665. SNDRV_SEQ_FILTER_BROADCAST,
  666. atomic, hop);
  667. if (err < 0) {
  668. /* save first error that occurs and continue */
  669. if (!result)
  670. result = err;
  671. continue;
  672. }
  673. num_ev += err;
  674. }
  675. event->dest = addr; /* restore */
  676. return (result < 0) ? result : num_ev;
  677. }
  678. /* multicast - not supported yet */
  679. static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event,
  680. int atomic, int hop)
  681. {
  682. pr_debug("ALSA: seq: multicast not supported yet.\n");
  683. return 0; /* ignored */
  684. }
  685. #endif /* SUPPORT_BROADCAST */
  686. /* deliver an event to the destination port(s).
  687. * if the event is to subscribers or broadcast, the event is dispatched
  688. * to multiple targets.
  689. *
  690. * RETURN VALUE: n > 0 : the number of delivered events.
  691. * n == 0 : the event was not passed to any client.
  692. * n < 0 : error - event was not processed.
  693. */
  694. static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
  695. int atomic, int hop)
  696. {
  697. int result;
  698. hop++;
  699. if (hop >= SNDRV_SEQ_MAX_HOPS) {
  700. pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n",
  701. event->source.client, event->source.port,
  702. event->dest.client, event->dest.port);
  703. return -EMLINK;
  704. }
  705. if (snd_seq_ev_is_variable(event) &&
  706. snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR)))
  707. return -EINVAL;
  708. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
  709. event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
  710. result = deliver_to_subscribers(client, event, atomic, hop);
  711. #ifdef SUPPORT_BROADCAST
  712. else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
  713. event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST)
  714. result = broadcast_event(client, event, atomic, hop);
  715. else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS)
  716. result = multicast_event(client, event, atomic, hop);
  717. else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  718. result = port_broadcast_event(client, event, atomic, hop);
  719. #endif
  720. else
  721. result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
  722. return result;
  723. }
  724. /*
  725. * dispatch an event cell:
  726. * This function is called only from queue check routines in timer
  727. * interrupts or after enqueued.
  728. * The event cell shall be released or re-queued in this function.
  729. *
  730. * RETURN VALUE: n > 0 : the number of delivered events.
  731. * n == 0 : the event was not passed to any client.
  732. * n < 0 : error - event was not processed.
  733. */
  734. int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
  735. {
  736. struct snd_seq_client *client;
  737. int result;
  738. if (snd_BUG_ON(!cell))
  739. return -EINVAL;
  740. client = snd_seq_client_use_ptr(cell->event.source.client);
  741. if (client == NULL) {
  742. snd_seq_cell_free(cell); /* release this cell */
  743. return -EINVAL;
  744. }
  745. if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
  746. /* NOTE event:
  747. * the event cell is re-used as a NOTE-OFF event and
  748. * enqueued again.
  749. */
  750. struct snd_seq_event tmpev, *ev;
  751. /* reserve this event to enqueue note-off later */
  752. tmpev = cell->event;
  753. tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
  754. result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
  755. /*
  756. * This was originally a note event. We now re-use the
  757. * cell for the note-off event.
  758. */
  759. ev = &cell->event;
  760. ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
  761. ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
  762. /* add the duration time */
  763. switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
  764. case SNDRV_SEQ_TIME_STAMP_TICK:
  765. ev->time.tick += ev->data.note.duration;
  766. break;
  767. case SNDRV_SEQ_TIME_STAMP_REAL:
  768. /* unit for duration is ms */
  769. ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
  770. ev->time.time.tv_sec += ev->data.note.duration / 1000 +
  771. ev->time.time.tv_nsec / 1000000000;
  772. ev->time.time.tv_nsec %= 1000000000;
  773. break;
  774. }
  775. ev->data.note.velocity = ev->data.note.off_velocity;
  776. /* Now queue this cell as the note off event */
  777. if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
  778. snd_seq_cell_free(cell); /* release this cell */
  779. } else {
  780. /* Normal events:
  781. * event cell is freed after processing the event
  782. */
  783. result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
  784. snd_seq_cell_free(cell);
  785. }
  786. snd_seq_client_unlock(client);
  787. return result;
  788. }
  789. /* Allocate a cell from client pool and enqueue it to queue:
  790. * if pool is empty and blocking is TRUE, sleep until a new cell is
  791. * available.
  792. */
  793. static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
  794. struct snd_seq_event *event,
  795. struct file *file, int blocking,
  796. int atomic, int hop,
  797. struct mutex *mutexp)
  798. {
  799. struct snd_seq_event_cell *cell;
  800. int err;
  801. /* special queue values - force direct passing */
  802. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  803. event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
  804. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  805. } else
  806. #ifdef SUPPORT_BROADCAST
  807. if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
  808. event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST;
  809. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  810. }
  811. #endif
  812. if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  813. /* check presence of source port */
  814. struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
  815. if (src_port == NULL)
  816. return -EINVAL;
  817. snd_seq_port_unlock(src_port);
  818. }
  819. /* direct event processing without enqueued */
  820. if (snd_seq_ev_is_direct(event)) {
  821. if (event->type == SNDRV_SEQ_EVENT_NOTE)
  822. return -EINVAL; /* this event must be enqueued! */
  823. return snd_seq_deliver_event(client, event, atomic, hop);
  824. }
  825. /* Not direct, normal queuing */
  826. if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
  827. return -EINVAL; /* invalid queue */
  828. if (! snd_seq_write_pool_allocated(client))
  829. return -ENXIO; /* queue is not allocated */
  830. /* allocate an event cell */
  831. err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
  832. file, mutexp);
  833. if (err < 0)
  834. return err;
  835. /* we got a cell. enqueue it. */
  836. if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
  837. snd_seq_cell_free(cell);
  838. return err;
  839. }
  840. return 0;
  841. }
  842. /*
  843. * check validity of event type and data length.
  844. * return non-zero if invalid.
  845. */
  846. static int check_event_type_and_length(struct snd_seq_event *ev)
  847. {
  848. switch (snd_seq_ev_length_type(ev)) {
  849. case SNDRV_SEQ_EVENT_LENGTH_FIXED:
  850. if (snd_seq_ev_is_variable_type(ev))
  851. return -EINVAL;
  852. break;
  853. case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
  854. if (! snd_seq_ev_is_variable_type(ev) ||
  855. (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
  856. return -EINVAL;
  857. break;
  858. case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
  859. if (! snd_seq_ev_is_direct(ev))
  860. return -EINVAL;
  861. break;
  862. }
  863. return 0;
  864. }
  865. /* handle write() */
  866. /* possible error values:
  867. * -ENXIO invalid client or file open mode
  868. * -ENOMEM malloc failed
  869. * -EFAULT seg. fault during copy from user space
  870. * -EINVAL invalid event
  871. * -EAGAIN no space in output pool
  872. * -EINTR interrupts while sleep
  873. * -EMLINK too many hops
  874. * others depends on return value from driver callback
  875. */
  876. static ssize_t snd_seq_write(struct file *file, const char __user *buf,
  877. size_t count, loff_t *offset)
  878. {
  879. struct snd_seq_client *client = file->private_data;
  880. int written = 0, len;
  881. int err, handled;
  882. struct snd_seq_event event;
  883. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
  884. return -ENXIO;
  885. /* check client structures are in place */
  886. if (snd_BUG_ON(!client))
  887. return -ENXIO;
  888. if (!client->accept_output || client->pool == NULL)
  889. return -ENXIO;
  890. repeat:
  891. handled = 0;
  892. /* allocate the pool now if the pool is not allocated yet */
  893. mutex_lock(&client->ioctl_mutex);
  894. if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
  895. err = snd_seq_pool_init(client->pool);
  896. if (err < 0)
  897. goto out;
  898. }
  899. /* only process whole events */
  900. err = -EINVAL;
  901. while (count >= sizeof(struct snd_seq_event)) {
  902. /* Read in the event header from the user */
  903. len = sizeof(event);
  904. if (copy_from_user(&event, buf, len)) {
  905. err = -EFAULT;
  906. break;
  907. }
  908. event.source.client = client->number; /* fill in client number */
  909. /* Check for extension data length */
  910. if (check_event_type_and_length(&event)) {
  911. err = -EINVAL;
  912. break;
  913. }
  914. /* check for special events */
  915. if (event.type == SNDRV_SEQ_EVENT_NONE)
  916. goto __skip_event;
  917. else if (snd_seq_ev_is_reserved(&event)) {
  918. err = -EINVAL;
  919. break;
  920. }
  921. if (snd_seq_ev_is_variable(&event)) {
  922. int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  923. if ((size_t)(extlen + len) > count) {
  924. /* back out, will get an error this time or next */
  925. err = -EINVAL;
  926. break;
  927. }
  928. /* set user space pointer */
  929. event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
  930. event.data.ext.ptr = (char __force *)buf
  931. + sizeof(struct snd_seq_event);
  932. len += extlen; /* increment data length */
  933. } else {
  934. #ifdef CONFIG_COMPAT
  935. if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
  936. void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]);
  937. event.data.ext.ptr = ptr;
  938. }
  939. #endif
  940. }
  941. /* ok, enqueue it */
  942. err = snd_seq_client_enqueue_event(client, &event, file,
  943. !(file->f_flags & O_NONBLOCK),
  944. 0, 0, &client->ioctl_mutex);
  945. if (err < 0)
  946. break;
  947. handled++;
  948. __skip_event:
  949. /* Update pointers and counts */
  950. count -= len;
  951. buf += len;
  952. written += len;
  953. /* let's have a coffee break if too many events are queued */
  954. if (++handled >= 200) {
  955. mutex_unlock(&client->ioctl_mutex);
  956. goto repeat;
  957. }
  958. }
  959. out:
  960. mutex_unlock(&client->ioctl_mutex);
  961. return written ? written : err;
  962. }
  963. /*
  964. * handle polling
  965. */
  966. static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
  967. {
  968. struct snd_seq_client *client = file->private_data;
  969. __poll_t mask = 0;
  970. /* check client structures are in place */
  971. if (snd_BUG_ON(!client))
  972. return EPOLLERR;
  973. if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
  974. client->data.user.fifo) {
  975. /* check if data is available in the outqueue */
  976. if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
  977. mask |= EPOLLIN | EPOLLRDNORM;
  978. }
  979. if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
  980. /* check if data is available in the pool */
  981. if (!snd_seq_write_pool_allocated(client) ||
  982. snd_seq_pool_poll_wait(client->pool, file, wait))
  983. mask |= EPOLLOUT | EPOLLWRNORM;
  984. }
  985. return mask;
  986. }
  987. /*-----------------------------------------------------*/
  988. static int snd_seq_ioctl_pversion(struct snd_seq_client *client, void *arg)
  989. {
  990. int *pversion = arg;
  991. *pversion = SNDRV_SEQ_VERSION;
  992. return 0;
  993. }
  994. static int snd_seq_ioctl_client_id(struct snd_seq_client *client, void *arg)
  995. {
  996. int *client_id = arg;
  997. *client_id = client->number;
  998. return 0;
  999. }
  1000. /* SYSTEM_INFO ioctl() */
  1001. static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void *arg)
  1002. {
  1003. struct snd_seq_system_info *info = arg;
  1004. memset(info, 0, sizeof(*info));
  1005. /* fill the info fields */
  1006. info->queues = SNDRV_SEQ_MAX_QUEUES;
  1007. info->clients = SNDRV_SEQ_MAX_CLIENTS;
  1008. info->ports = SNDRV_SEQ_MAX_PORTS;
  1009. info->channels = 256; /* fixed limit */
  1010. info->cur_clients = client_usage.cur;
  1011. info->cur_queues = snd_seq_queue_get_cur_queues();
  1012. return 0;
  1013. }
  1014. /* RUNNING_MODE ioctl() */
  1015. static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg)
  1016. {
  1017. struct snd_seq_running_info *info = arg;
  1018. struct snd_seq_client *cptr;
  1019. int err = 0;
  1020. /* requested client number */
  1021. cptr = snd_seq_client_use_ptr(info->client);
  1022. if (cptr == NULL)
  1023. return -ENOENT; /* don't change !!! */
  1024. #ifdef SNDRV_BIG_ENDIAN
  1025. if (!info->big_endian) {
  1026. err = -EINVAL;
  1027. goto __err;
  1028. }
  1029. #else
  1030. if (info->big_endian) {
  1031. err = -EINVAL;
  1032. goto __err;
  1033. }
  1034. #endif
  1035. if (info->cpu_mode > sizeof(long)) {
  1036. err = -EINVAL;
  1037. goto __err;
  1038. }
  1039. cptr->convert32 = (info->cpu_mode < sizeof(long));
  1040. __err:
  1041. snd_seq_client_unlock(cptr);
  1042. return err;
  1043. }
  1044. /* CLIENT_INFO ioctl() */
  1045. static void get_client_info(struct snd_seq_client *cptr,
  1046. struct snd_seq_client_info *info)
  1047. {
  1048. info->client = cptr->number;
  1049. /* fill the info fields */
  1050. info->type = cptr->type;
  1051. strcpy(info->name, cptr->name);
  1052. info->filter = cptr->filter;
  1053. info->event_lost = cptr->event_lost;
  1054. memcpy(info->event_filter, cptr->event_filter, 32);
  1055. info->num_ports = cptr->num_ports;
  1056. if (cptr->type == USER_CLIENT)
  1057. info->pid = pid_vnr(cptr->data.user.owner);
  1058. else
  1059. info->pid = -1;
  1060. if (cptr->type == KERNEL_CLIENT)
  1061. info->card = cptr->data.kernel.card ? cptr->data.kernel.card->number : -1;
  1062. else
  1063. info->card = -1;
  1064. memset(info->reserved, 0, sizeof(info->reserved));
  1065. }
  1066. static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
  1067. void *arg)
  1068. {
  1069. struct snd_seq_client_info *client_info = arg;
  1070. struct snd_seq_client *cptr;
  1071. /* requested client number */
  1072. cptr = snd_seq_client_use_ptr(client_info->client);
  1073. if (cptr == NULL)
  1074. return -ENOENT; /* don't change !!! */
  1075. get_client_info(cptr, client_info);
  1076. snd_seq_client_unlock(cptr);
  1077. return 0;
  1078. }
  1079. /* CLIENT_INFO ioctl() */
  1080. static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
  1081. void *arg)
  1082. {
  1083. struct snd_seq_client_info *client_info = arg;
  1084. /* it is not allowed to set the info fields for an another client */
  1085. if (client->number != client_info->client)
  1086. return -EPERM;
  1087. /* also client type must be set now */
  1088. if (client->type != client_info->type)
  1089. return -EINVAL;
  1090. /* fill the info fields */
  1091. if (client_info->name[0])
  1092. strscpy(client->name, client_info->name, sizeof(client->name));
  1093. client->filter = client_info->filter;
  1094. client->event_lost = client_info->event_lost;
  1095. memcpy(client->event_filter, client_info->event_filter, 32);
  1096. return 0;
  1097. }
  1098. /*
  1099. * CREATE PORT ioctl()
  1100. */
  1101. static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
  1102. {
  1103. struct snd_seq_port_info *info = arg;
  1104. struct snd_seq_client_port *port;
  1105. struct snd_seq_port_callback *callback;
  1106. int port_idx;
  1107. /* it is not allowed to create the port for an another client */
  1108. if (info->addr.client != client->number)
  1109. return -EPERM;
  1110. port = snd_seq_create_port(client, (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info->addr.port : -1);
  1111. if (port == NULL)
  1112. return -ENOMEM;
  1113. if (client->type == USER_CLIENT && info->kernel) {
  1114. port_idx = port->addr.port;
  1115. snd_seq_port_unlock(port);
  1116. snd_seq_delete_port(client, port_idx);
  1117. return -EINVAL;
  1118. }
  1119. if (client->type == KERNEL_CLIENT) {
  1120. if ((callback = info->kernel) != NULL) {
  1121. if (callback->owner)
  1122. port->owner = callback->owner;
  1123. port->private_data = callback->private_data;
  1124. port->private_free = callback->private_free;
  1125. port->event_input = callback->event_input;
  1126. port->c_src.open = callback->subscribe;
  1127. port->c_src.close = callback->unsubscribe;
  1128. port->c_dest.open = callback->use;
  1129. port->c_dest.close = callback->unuse;
  1130. }
  1131. }
  1132. info->addr = port->addr;
  1133. snd_seq_set_port_info(port, info);
  1134. snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
  1135. snd_seq_port_unlock(port);
  1136. return 0;
  1137. }
  1138. /*
  1139. * DELETE PORT ioctl()
  1140. */
  1141. static int snd_seq_ioctl_delete_port(struct snd_seq_client *client, void *arg)
  1142. {
  1143. struct snd_seq_port_info *info = arg;
  1144. int err;
  1145. /* it is not allowed to remove the port for an another client */
  1146. if (info->addr.client != client->number)
  1147. return -EPERM;
  1148. err = snd_seq_delete_port(client, info->addr.port);
  1149. if (err >= 0)
  1150. snd_seq_system_client_ev_port_exit(client->number, info->addr.port);
  1151. return err;
  1152. }
  1153. /*
  1154. * GET_PORT_INFO ioctl() (on any client)
  1155. */
  1156. static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg)
  1157. {
  1158. struct snd_seq_port_info *info = arg;
  1159. struct snd_seq_client *cptr;
  1160. struct snd_seq_client_port *port;
  1161. cptr = snd_seq_client_use_ptr(info->addr.client);
  1162. if (cptr == NULL)
  1163. return -ENXIO;
  1164. port = snd_seq_port_use_ptr(cptr, info->addr.port);
  1165. if (port == NULL) {
  1166. snd_seq_client_unlock(cptr);
  1167. return -ENOENT; /* don't change */
  1168. }
  1169. /* get port info */
  1170. snd_seq_get_port_info(port, info);
  1171. snd_seq_port_unlock(port);
  1172. snd_seq_client_unlock(cptr);
  1173. return 0;
  1174. }
  1175. /*
  1176. * SET_PORT_INFO ioctl() (only ports on this/own client)
  1177. */
  1178. static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg)
  1179. {
  1180. struct snd_seq_port_info *info = arg;
  1181. struct snd_seq_client_port *port;
  1182. if (info->addr.client != client->number) /* only set our own ports ! */
  1183. return -EPERM;
  1184. port = snd_seq_port_use_ptr(client, info->addr.port);
  1185. if (port) {
  1186. snd_seq_set_port_info(port, info);
  1187. snd_seq_port_unlock(port);
  1188. }
  1189. return 0;
  1190. }
  1191. /*
  1192. * port subscription (connection)
  1193. */
  1194. #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
  1195. #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
  1196. static int check_subscription_permission(struct snd_seq_client *client,
  1197. struct snd_seq_client_port *sport,
  1198. struct snd_seq_client_port *dport,
  1199. struct snd_seq_port_subscribe *subs)
  1200. {
  1201. if (client->number != subs->sender.client &&
  1202. client->number != subs->dest.client) {
  1203. /* connection by third client - check export permission */
  1204. if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1205. return -EPERM;
  1206. if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1207. return -EPERM;
  1208. }
  1209. /* check read permission */
  1210. /* if sender or receiver is the subscribing client itself,
  1211. * no permission check is necessary
  1212. */
  1213. if (client->number != subs->sender.client) {
  1214. if (! check_port_perm(sport, PERM_RD))
  1215. return -EPERM;
  1216. }
  1217. /* check write permission */
  1218. if (client->number != subs->dest.client) {
  1219. if (! check_port_perm(dport, PERM_WR))
  1220. return -EPERM;
  1221. }
  1222. return 0;
  1223. }
  1224. /*
  1225. * send an subscription notify event to user client:
  1226. * client must be user client.
  1227. */
  1228. int snd_seq_client_notify_subscription(int client, int port,
  1229. struct snd_seq_port_subscribe *info,
  1230. int evtype)
  1231. {
  1232. struct snd_seq_event event;
  1233. memset(&event, 0, sizeof(event));
  1234. event.type = evtype;
  1235. event.data.connect.dest = info->dest;
  1236. event.data.connect.sender = info->sender;
  1237. return snd_seq_system_notify(client, port, &event); /* non-atomic */
  1238. }
  1239. /*
  1240. * add to port's subscription list IOCTL interface
  1241. */
  1242. static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
  1243. void *arg)
  1244. {
  1245. struct snd_seq_port_subscribe *subs = arg;
  1246. int result = -EINVAL;
  1247. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1248. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1249. if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
  1250. goto __end;
  1251. if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
  1252. goto __end;
  1253. if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
  1254. goto __end;
  1255. if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
  1256. goto __end;
  1257. result = check_subscription_permission(client, sport, dport, subs);
  1258. if (result < 0)
  1259. goto __end;
  1260. /* connect them */
  1261. result = snd_seq_port_connect(client, sender, sport, receiver, dport, subs);
  1262. if (! result) /* broadcast announce */
  1263. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1264. subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
  1265. __end:
  1266. if (sport)
  1267. snd_seq_port_unlock(sport);
  1268. if (dport)
  1269. snd_seq_port_unlock(dport);
  1270. if (sender)
  1271. snd_seq_client_unlock(sender);
  1272. if (receiver)
  1273. snd_seq_client_unlock(receiver);
  1274. return result;
  1275. }
  1276. /*
  1277. * remove from port's subscription list
  1278. */
  1279. static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
  1280. void *arg)
  1281. {
  1282. struct snd_seq_port_subscribe *subs = arg;
  1283. int result = -ENXIO;
  1284. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1285. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1286. if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
  1287. goto __end;
  1288. if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
  1289. goto __end;
  1290. if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
  1291. goto __end;
  1292. if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
  1293. goto __end;
  1294. result = check_subscription_permission(client, sport, dport, subs);
  1295. if (result < 0)
  1296. goto __end;
  1297. result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, subs);
  1298. if (! result) /* broadcast announce */
  1299. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1300. subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
  1301. __end:
  1302. if (sport)
  1303. snd_seq_port_unlock(sport);
  1304. if (dport)
  1305. snd_seq_port_unlock(dport);
  1306. if (sender)
  1307. snd_seq_client_unlock(sender);
  1308. if (receiver)
  1309. snd_seq_client_unlock(receiver);
  1310. return result;
  1311. }
  1312. /* CREATE_QUEUE ioctl() */
  1313. static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
  1314. {
  1315. struct snd_seq_queue_info *info = arg;
  1316. struct snd_seq_queue *q;
  1317. q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
  1318. if (IS_ERR(q))
  1319. return PTR_ERR(q);
  1320. info->queue = q->queue;
  1321. info->locked = q->locked;
  1322. info->owner = q->owner;
  1323. /* set queue name */
  1324. if (!info->name[0])
  1325. snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
  1326. strscpy(q->name, info->name, sizeof(q->name));
  1327. snd_use_lock_free(&q->use_lock);
  1328. return 0;
  1329. }
  1330. /* DELETE_QUEUE ioctl() */
  1331. static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client, void *arg)
  1332. {
  1333. struct snd_seq_queue_info *info = arg;
  1334. return snd_seq_queue_delete(client->number, info->queue);
  1335. }
  1336. /* GET_QUEUE_INFO ioctl() */
  1337. static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
  1338. void *arg)
  1339. {
  1340. struct snd_seq_queue_info *info = arg;
  1341. struct snd_seq_queue *q;
  1342. q = queueptr(info->queue);
  1343. if (q == NULL)
  1344. return -EINVAL;
  1345. memset(info, 0, sizeof(*info));
  1346. info->queue = q->queue;
  1347. info->owner = q->owner;
  1348. info->locked = q->locked;
  1349. strlcpy(info->name, q->name, sizeof(info->name));
  1350. queuefree(q);
  1351. return 0;
  1352. }
  1353. /* SET_QUEUE_INFO ioctl() */
  1354. static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
  1355. void *arg)
  1356. {
  1357. struct snd_seq_queue_info *info = arg;
  1358. struct snd_seq_queue *q;
  1359. if (info->owner != client->number)
  1360. return -EINVAL;
  1361. /* change owner/locked permission */
  1362. if (snd_seq_queue_check_access(info->queue, client->number)) {
  1363. if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0)
  1364. return -EPERM;
  1365. if (info->locked)
  1366. snd_seq_queue_use(info->queue, client->number, 1);
  1367. } else {
  1368. return -EPERM;
  1369. }
  1370. q = queueptr(info->queue);
  1371. if (! q)
  1372. return -EINVAL;
  1373. if (q->owner != client->number) {
  1374. queuefree(q);
  1375. return -EPERM;
  1376. }
  1377. strscpy(q->name, info->name, sizeof(q->name));
  1378. queuefree(q);
  1379. return 0;
  1380. }
  1381. /* GET_NAMED_QUEUE ioctl() */
  1382. static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client,
  1383. void *arg)
  1384. {
  1385. struct snd_seq_queue_info *info = arg;
  1386. struct snd_seq_queue *q;
  1387. q = snd_seq_queue_find_name(info->name);
  1388. if (q == NULL)
  1389. return -EINVAL;
  1390. info->queue = q->queue;
  1391. info->owner = q->owner;
  1392. info->locked = q->locked;
  1393. queuefree(q);
  1394. return 0;
  1395. }
  1396. /* GET_QUEUE_STATUS ioctl() */
  1397. static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
  1398. void *arg)
  1399. {
  1400. struct snd_seq_queue_status *status = arg;
  1401. struct snd_seq_queue *queue;
  1402. struct snd_seq_timer *tmr;
  1403. queue = queueptr(status->queue);
  1404. if (queue == NULL)
  1405. return -EINVAL;
  1406. memset(status, 0, sizeof(*status));
  1407. status->queue = queue->queue;
  1408. tmr = queue->timer;
  1409. status->events = queue->tickq->cells + queue->timeq->cells;
  1410. status->time = snd_seq_timer_get_cur_time(tmr, true);
  1411. status->tick = snd_seq_timer_get_cur_tick(tmr);
  1412. status->running = tmr->running;
  1413. status->flags = queue->flags;
  1414. queuefree(queue);
  1415. return 0;
  1416. }
  1417. /* GET_QUEUE_TEMPO ioctl() */
  1418. static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
  1419. void *arg)
  1420. {
  1421. struct snd_seq_queue_tempo *tempo = arg;
  1422. struct snd_seq_queue *queue;
  1423. struct snd_seq_timer *tmr;
  1424. queue = queueptr(tempo->queue);
  1425. if (queue == NULL)
  1426. return -EINVAL;
  1427. memset(tempo, 0, sizeof(*tempo));
  1428. tempo->queue = queue->queue;
  1429. tmr = queue->timer;
  1430. tempo->tempo = tmr->tempo;
  1431. tempo->ppq = tmr->ppq;
  1432. tempo->skew_value = tmr->skew;
  1433. tempo->skew_base = tmr->skew_base;
  1434. queuefree(queue);
  1435. return 0;
  1436. }
  1437. /* SET_QUEUE_TEMPO ioctl() */
  1438. int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
  1439. {
  1440. if (!snd_seq_queue_check_access(tempo->queue, client))
  1441. return -EPERM;
  1442. return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
  1443. }
  1444. EXPORT_SYMBOL(snd_seq_set_queue_tempo);
  1445. static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
  1446. void *arg)
  1447. {
  1448. struct snd_seq_queue_tempo *tempo = arg;
  1449. int result;
  1450. result = snd_seq_set_queue_tempo(client->number, tempo);
  1451. return result < 0 ? result : 0;
  1452. }
  1453. /* GET_QUEUE_TIMER ioctl() */
  1454. static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
  1455. void *arg)
  1456. {
  1457. struct snd_seq_queue_timer *timer = arg;
  1458. struct snd_seq_queue *queue;
  1459. struct snd_seq_timer *tmr;
  1460. queue = queueptr(timer->queue);
  1461. if (queue == NULL)
  1462. return -EINVAL;
  1463. mutex_lock(&queue->timer_mutex);
  1464. tmr = queue->timer;
  1465. memset(timer, 0, sizeof(*timer));
  1466. timer->queue = queue->queue;
  1467. timer->type = tmr->type;
  1468. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1469. timer->u.alsa.id = tmr->alsa_id;
  1470. timer->u.alsa.resolution = tmr->preferred_resolution;
  1471. }
  1472. mutex_unlock(&queue->timer_mutex);
  1473. queuefree(queue);
  1474. return 0;
  1475. }
  1476. /* SET_QUEUE_TIMER ioctl() */
  1477. static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
  1478. void *arg)
  1479. {
  1480. struct snd_seq_queue_timer *timer = arg;
  1481. int result = 0;
  1482. if (timer->type != SNDRV_SEQ_TIMER_ALSA)
  1483. return -EINVAL;
  1484. if (snd_seq_queue_check_access(timer->queue, client->number)) {
  1485. struct snd_seq_queue *q;
  1486. struct snd_seq_timer *tmr;
  1487. q = queueptr(timer->queue);
  1488. if (q == NULL)
  1489. return -ENXIO;
  1490. mutex_lock(&q->timer_mutex);
  1491. tmr = q->timer;
  1492. snd_seq_queue_timer_close(timer->queue);
  1493. tmr->type = timer->type;
  1494. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1495. tmr->alsa_id = timer->u.alsa.id;
  1496. tmr->preferred_resolution = timer->u.alsa.resolution;
  1497. }
  1498. result = snd_seq_queue_timer_open(timer->queue);
  1499. mutex_unlock(&q->timer_mutex);
  1500. queuefree(q);
  1501. } else {
  1502. return -EPERM;
  1503. }
  1504. return result;
  1505. }
  1506. /* GET_QUEUE_CLIENT ioctl() */
  1507. static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
  1508. void *arg)
  1509. {
  1510. struct snd_seq_queue_client *info = arg;
  1511. int used;
  1512. used = snd_seq_queue_is_used(info->queue, client->number);
  1513. if (used < 0)
  1514. return -EINVAL;
  1515. info->used = used;
  1516. info->client = client->number;
  1517. return 0;
  1518. }
  1519. /* SET_QUEUE_CLIENT ioctl() */
  1520. static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
  1521. void *arg)
  1522. {
  1523. struct snd_seq_queue_client *info = arg;
  1524. int err;
  1525. if (info->used >= 0) {
  1526. err = snd_seq_queue_use(info->queue, client->number, info->used);
  1527. if (err < 0)
  1528. return err;
  1529. }
  1530. return snd_seq_ioctl_get_queue_client(client, arg);
  1531. }
  1532. /* GET_CLIENT_POOL ioctl() */
  1533. static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
  1534. void *arg)
  1535. {
  1536. struct snd_seq_client_pool *info = arg;
  1537. struct snd_seq_client *cptr;
  1538. cptr = snd_seq_client_use_ptr(info->client);
  1539. if (cptr == NULL)
  1540. return -ENOENT;
  1541. memset(info, 0, sizeof(*info));
  1542. info->client = cptr->number;
  1543. info->output_pool = cptr->pool->size;
  1544. info->output_room = cptr->pool->room;
  1545. info->output_free = info->output_pool;
  1546. info->output_free = snd_seq_unused_cells(cptr->pool);
  1547. if (cptr->type == USER_CLIENT) {
  1548. info->input_pool = cptr->data.user.fifo_pool_size;
  1549. info->input_free = info->input_pool;
  1550. info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
  1551. } else {
  1552. info->input_pool = 0;
  1553. info->input_free = 0;
  1554. }
  1555. snd_seq_client_unlock(cptr);
  1556. return 0;
  1557. }
  1558. /* SET_CLIENT_POOL ioctl() */
  1559. static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
  1560. void *arg)
  1561. {
  1562. struct snd_seq_client_pool *info = arg;
  1563. int rc;
  1564. if (client->number != info->client)
  1565. return -EINVAL; /* can't change other clients */
  1566. if (info->output_pool >= 1 && info->output_pool <= SNDRV_SEQ_MAX_EVENTS &&
  1567. (! snd_seq_write_pool_allocated(client) ||
  1568. info->output_pool != client->pool->size)) {
  1569. if (snd_seq_write_pool_allocated(client)) {
  1570. /* is the pool in use? */
  1571. if (atomic_read(&client->pool->counter))
  1572. return -EBUSY;
  1573. /* remove all existing cells */
  1574. snd_seq_pool_mark_closing(client->pool);
  1575. snd_seq_pool_done(client->pool);
  1576. }
  1577. client->pool->size = info->output_pool;
  1578. rc = snd_seq_pool_init(client->pool);
  1579. if (rc < 0)
  1580. return rc;
  1581. }
  1582. if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
  1583. info->input_pool >= 1 &&
  1584. info->input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
  1585. info->input_pool != client->data.user.fifo_pool_size) {
  1586. /* change pool size */
  1587. rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool);
  1588. if (rc < 0)
  1589. return rc;
  1590. client->data.user.fifo_pool_size = info->input_pool;
  1591. }
  1592. if (info->output_room >= 1 &&
  1593. info->output_room <= client->pool->size) {
  1594. client->pool->room = info->output_room;
  1595. }
  1596. return snd_seq_ioctl_get_client_pool(client, arg);
  1597. }
  1598. /* REMOVE_EVENTS ioctl() */
  1599. static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
  1600. void *arg)
  1601. {
  1602. struct snd_seq_remove_events *info = arg;
  1603. /*
  1604. * Input mostly not implemented XXX.
  1605. */
  1606. if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
  1607. /*
  1608. * No restrictions so for a user client we can clear
  1609. * the whole fifo
  1610. */
  1611. if (client->type == USER_CLIENT && client->data.user.fifo)
  1612. snd_seq_fifo_clear(client->data.user.fifo);
  1613. }
  1614. if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
  1615. snd_seq_queue_remove_cells(client->number, info);
  1616. return 0;
  1617. }
  1618. /*
  1619. * get subscription info
  1620. */
  1621. static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
  1622. void *arg)
  1623. {
  1624. struct snd_seq_port_subscribe *subs = arg;
  1625. int result;
  1626. struct snd_seq_client *sender = NULL;
  1627. struct snd_seq_client_port *sport = NULL;
  1628. result = -EINVAL;
  1629. if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
  1630. goto __end;
  1631. if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
  1632. goto __end;
  1633. result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
  1634. subs);
  1635. __end:
  1636. if (sport)
  1637. snd_seq_port_unlock(sport);
  1638. if (sender)
  1639. snd_seq_client_unlock(sender);
  1640. return result;
  1641. }
  1642. /*
  1643. * get subscription info - check only its presence
  1644. */
  1645. static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
  1646. {
  1647. struct snd_seq_query_subs *subs = arg;
  1648. int result = -ENXIO;
  1649. struct snd_seq_client *cptr = NULL;
  1650. struct snd_seq_client_port *port = NULL;
  1651. struct snd_seq_port_subs_info *group;
  1652. struct list_head *p;
  1653. int i;
  1654. if ((cptr = snd_seq_client_use_ptr(subs->root.client)) == NULL)
  1655. goto __end;
  1656. if ((port = snd_seq_port_use_ptr(cptr, subs->root.port)) == NULL)
  1657. goto __end;
  1658. switch (subs->type) {
  1659. case SNDRV_SEQ_QUERY_SUBS_READ:
  1660. group = &port->c_src;
  1661. break;
  1662. case SNDRV_SEQ_QUERY_SUBS_WRITE:
  1663. group = &port->c_dest;
  1664. break;
  1665. default:
  1666. goto __end;
  1667. }
  1668. down_read(&group->list_mutex);
  1669. /* search for the subscriber */
  1670. subs->num_subs = group->count;
  1671. i = 0;
  1672. result = -ENOENT;
  1673. list_for_each(p, &group->list_head) {
  1674. if (i++ == subs->index) {
  1675. /* found! */
  1676. struct snd_seq_subscribers *s;
  1677. if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) {
  1678. s = list_entry(p, struct snd_seq_subscribers, src_list);
  1679. subs->addr = s->info.dest;
  1680. } else {
  1681. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  1682. subs->addr = s->info.sender;
  1683. }
  1684. subs->flags = s->info.flags;
  1685. subs->queue = s->info.queue;
  1686. result = 0;
  1687. break;
  1688. }
  1689. }
  1690. up_read(&group->list_mutex);
  1691. __end:
  1692. if (port)
  1693. snd_seq_port_unlock(port);
  1694. if (cptr)
  1695. snd_seq_client_unlock(cptr);
  1696. return result;
  1697. }
  1698. /*
  1699. * query next client
  1700. */
  1701. static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
  1702. void *arg)
  1703. {
  1704. struct snd_seq_client_info *info = arg;
  1705. struct snd_seq_client *cptr = NULL;
  1706. /* search for next client */
  1707. if (info->client < INT_MAX)
  1708. info->client++;
  1709. if (info->client < 0)
  1710. info->client = 0;
  1711. for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
  1712. cptr = snd_seq_client_use_ptr(info->client);
  1713. if (cptr)
  1714. break; /* found */
  1715. }
  1716. if (cptr == NULL)
  1717. return -ENOENT;
  1718. get_client_info(cptr, info);
  1719. snd_seq_client_unlock(cptr);
  1720. return 0;
  1721. }
  1722. /*
  1723. * query next port
  1724. */
  1725. static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
  1726. void *arg)
  1727. {
  1728. struct snd_seq_port_info *info = arg;
  1729. struct snd_seq_client *cptr;
  1730. struct snd_seq_client_port *port = NULL;
  1731. cptr = snd_seq_client_use_ptr(info->addr.client);
  1732. if (cptr == NULL)
  1733. return -ENXIO;
  1734. /* search for next port */
  1735. info->addr.port++;
  1736. port = snd_seq_port_query_nearest(cptr, info);
  1737. if (port == NULL) {
  1738. snd_seq_client_unlock(cptr);
  1739. return -ENOENT;
  1740. }
  1741. /* get port info */
  1742. info->addr = port->addr;
  1743. snd_seq_get_port_info(port, info);
  1744. snd_seq_port_unlock(port);
  1745. snd_seq_client_unlock(cptr);
  1746. return 0;
  1747. }
  1748. /* -------------------------------------------------------- */
  1749. static const struct ioctl_handler {
  1750. unsigned int cmd;
  1751. int (*func)(struct snd_seq_client *client, void *arg);
  1752. } ioctl_handlers[] = {
  1753. { SNDRV_SEQ_IOCTL_PVERSION, snd_seq_ioctl_pversion },
  1754. { SNDRV_SEQ_IOCTL_CLIENT_ID, snd_seq_ioctl_client_id },
  1755. { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
  1756. { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
  1757. { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
  1758. { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
  1759. { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
  1760. { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
  1761. { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
  1762. { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
  1763. { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
  1764. { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
  1765. { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
  1766. { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
  1767. { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
  1768. { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
  1769. { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
  1770. { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
  1771. { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
  1772. { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
  1773. { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
  1774. { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
  1775. { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
  1776. { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
  1777. { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
  1778. { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
  1779. { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
  1780. { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
  1781. { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
  1782. { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
  1783. { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
  1784. { 0, NULL },
  1785. };
  1786. static long snd_seq_ioctl(struct file *file, unsigned int cmd,
  1787. unsigned long arg)
  1788. {
  1789. struct snd_seq_client *client = file->private_data;
  1790. /* To use kernel stack for ioctl data. */
  1791. union {
  1792. int pversion;
  1793. int client_id;
  1794. struct snd_seq_system_info system_info;
  1795. struct snd_seq_running_info running_info;
  1796. struct snd_seq_client_info client_info;
  1797. struct snd_seq_port_info port_info;
  1798. struct snd_seq_port_subscribe port_subscribe;
  1799. struct snd_seq_queue_info queue_info;
  1800. struct snd_seq_queue_status queue_status;
  1801. struct snd_seq_queue_tempo tempo;
  1802. struct snd_seq_queue_timer queue_timer;
  1803. struct snd_seq_queue_client queue_client;
  1804. struct snd_seq_client_pool client_pool;
  1805. struct snd_seq_remove_events remove_events;
  1806. struct snd_seq_query_subs query_subs;
  1807. } buf;
  1808. const struct ioctl_handler *handler;
  1809. unsigned long size;
  1810. int err;
  1811. if (snd_BUG_ON(!client))
  1812. return -ENXIO;
  1813. for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
  1814. if (handler->cmd == cmd)
  1815. break;
  1816. }
  1817. if (handler->cmd == 0)
  1818. return -ENOTTY;
  1819. memset(&buf, 0, sizeof(buf));
  1820. /*
  1821. * All of ioctl commands for ALSA sequencer get an argument of size
  1822. * within 13 bits. We can safely pick up the size from the command.
  1823. */
  1824. size = _IOC_SIZE(handler->cmd);
  1825. if (handler->cmd & IOC_IN) {
  1826. if (copy_from_user(&buf, (const void __user *)arg, size))
  1827. return -EFAULT;
  1828. }
  1829. mutex_lock(&client->ioctl_mutex);
  1830. err = handler->func(client, &buf);
  1831. mutex_unlock(&client->ioctl_mutex);
  1832. if (err >= 0) {
  1833. /* Some commands includes a bug in 'dir' field. */
  1834. if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
  1835. handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL ||
  1836. (handler->cmd & IOC_OUT))
  1837. if (copy_to_user((void __user *)arg, &buf, size))
  1838. return -EFAULT;
  1839. }
  1840. return err;
  1841. }
  1842. #ifdef CONFIG_COMPAT
  1843. #include "seq_compat.c"
  1844. #else
  1845. #define snd_seq_ioctl_compat NULL
  1846. #endif
  1847. /* -------------------------------------------------------- */
  1848. /* exported to kernel modules */
  1849. int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
  1850. const char *name_fmt, ...)
  1851. {
  1852. struct snd_seq_client *client;
  1853. va_list args;
  1854. if (snd_BUG_ON(in_interrupt()))
  1855. return -EBUSY;
  1856. if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
  1857. return -EINVAL;
  1858. if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
  1859. return -EINVAL;
  1860. mutex_lock(&register_mutex);
  1861. if (card) {
  1862. client_index += SNDRV_SEQ_GLOBAL_CLIENTS
  1863. + card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
  1864. if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
  1865. client_index = -1;
  1866. }
  1867. /* empty write queue as default */
  1868. client = seq_create_client1(client_index, 0);
  1869. if (client == NULL) {
  1870. mutex_unlock(&register_mutex);
  1871. return -EBUSY; /* failure code */
  1872. }
  1873. usage_alloc(&client_usage, 1);
  1874. client->accept_input = 1;
  1875. client->accept_output = 1;
  1876. client->data.kernel.card = card;
  1877. va_start(args, name_fmt);
  1878. vsnprintf(client->name, sizeof(client->name), name_fmt, args);
  1879. va_end(args);
  1880. client->type = KERNEL_CLIENT;
  1881. mutex_unlock(&register_mutex);
  1882. /* make others aware this new client */
  1883. snd_seq_system_client_ev_client_start(client->number);
  1884. /* return client number to caller */
  1885. return client->number;
  1886. }
  1887. EXPORT_SYMBOL(snd_seq_create_kernel_client);
  1888. /* exported to kernel modules */
  1889. int snd_seq_delete_kernel_client(int client)
  1890. {
  1891. struct snd_seq_client *ptr;
  1892. if (snd_BUG_ON(in_interrupt()))
  1893. return -EBUSY;
  1894. ptr = clientptr(client);
  1895. if (ptr == NULL)
  1896. return -EINVAL;
  1897. seq_free_client(ptr);
  1898. kfree(ptr);
  1899. return 0;
  1900. }
  1901. EXPORT_SYMBOL(snd_seq_delete_kernel_client);
  1902. /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
  1903. * and snd_seq_kernel_client_enqueue_blocking
  1904. */
  1905. static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
  1906. struct file *file, int blocking,
  1907. int atomic, int hop)
  1908. {
  1909. struct snd_seq_client *cptr;
  1910. int result;
  1911. if (snd_BUG_ON(!ev))
  1912. return -EINVAL;
  1913. if (ev->type == SNDRV_SEQ_EVENT_NONE)
  1914. return 0; /* ignore this */
  1915. if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
  1916. return -EINVAL; /* quoted events can't be enqueued */
  1917. /* fill in client number */
  1918. ev->source.client = client;
  1919. if (check_event_type_and_length(ev))
  1920. return -EINVAL;
  1921. cptr = snd_seq_client_use_ptr(client);
  1922. if (cptr == NULL)
  1923. return -EINVAL;
  1924. if (! cptr->accept_output)
  1925. result = -EPERM;
  1926. else /* send it */
  1927. result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
  1928. atomic, hop, NULL);
  1929. snd_seq_client_unlock(cptr);
  1930. return result;
  1931. }
  1932. /*
  1933. * exported, called by kernel clients to enqueue events (w/o blocking)
  1934. *
  1935. * RETURN VALUE: zero if succeed, negative if error
  1936. */
  1937. int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
  1938. int atomic, int hop)
  1939. {
  1940. return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
  1941. }
  1942. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
  1943. /*
  1944. * exported, called by kernel clients to enqueue events (with blocking)
  1945. *
  1946. * RETURN VALUE: zero if succeed, negative if error
  1947. */
  1948. int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
  1949. struct file *file,
  1950. int atomic, int hop)
  1951. {
  1952. return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
  1953. }
  1954. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
  1955. /*
  1956. * exported, called by kernel clients to dispatch events directly to other
  1957. * clients, bypassing the queues. Event time-stamp will be updated.
  1958. *
  1959. * RETURN VALUE: negative = delivery failed,
  1960. * zero, or positive: the number of delivered events
  1961. */
  1962. int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
  1963. int atomic, int hop)
  1964. {
  1965. struct snd_seq_client *cptr;
  1966. int result;
  1967. if (snd_BUG_ON(!ev))
  1968. return -EINVAL;
  1969. /* fill in client number */
  1970. ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
  1971. ev->source.client = client;
  1972. if (check_event_type_and_length(ev))
  1973. return -EINVAL;
  1974. cptr = snd_seq_client_use_ptr(client);
  1975. if (cptr == NULL)
  1976. return -EINVAL;
  1977. if (!cptr->accept_output)
  1978. result = -EPERM;
  1979. else
  1980. result = snd_seq_deliver_event(cptr, ev, atomic, hop);
  1981. snd_seq_client_unlock(cptr);
  1982. return result;
  1983. }
  1984. EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
  1985. /**
  1986. * snd_seq_kernel_client_ctl - operate a command for a client with data in
  1987. * kernel space.
  1988. * @clientid: A numerical ID for a client.
  1989. * @cmd: An ioctl(2) command for ALSA sequencer operation.
  1990. * @arg: A pointer to data in kernel space.
  1991. *
  1992. * Against its name, both kernel/application client can be handled by this
  1993. * kernel API. A pointer of 'arg' argument should be in kernel space.
  1994. *
  1995. * Return: 0 at success. Negative error code at failure.
  1996. */
  1997. int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
  1998. {
  1999. const struct ioctl_handler *handler;
  2000. struct snd_seq_client *client;
  2001. client = clientptr(clientid);
  2002. if (client == NULL)
  2003. return -ENXIO;
  2004. for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
  2005. if (handler->cmd == cmd)
  2006. return handler->func(client, arg);
  2007. }
  2008. pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
  2009. cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
  2010. return -ENOTTY;
  2011. }
  2012. EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
  2013. /* exported (for OSS emulator) */
  2014. int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
  2015. {
  2016. struct snd_seq_client *client;
  2017. client = clientptr(clientid);
  2018. if (client == NULL)
  2019. return -ENXIO;
  2020. if (! snd_seq_write_pool_allocated(client))
  2021. return 1;
  2022. if (snd_seq_pool_poll_wait(client->pool, file, wait))
  2023. return 1;
  2024. return 0;
  2025. }
  2026. EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
  2027. /*---------------------------------------------------------------------------*/
  2028. #ifdef CONFIG_SND_PROC_FS
  2029. /*
  2030. * /proc interface
  2031. */
  2032. static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
  2033. struct snd_seq_port_subs_info *group,
  2034. int is_src, char *msg)
  2035. {
  2036. struct list_head *p;
  2037. struct snd_seq_subscribers *s;
  2038. int count = 0;
  2039. down_read(&group->list_mutex);
  2040. if (list_empty(&group->list_head)) {
  2041. up_read(&group->list_mutex);
  2042. return;
  2043. }
  2044. snd_iprintf(buffer, msg);
  2045. list_for_each(p, &group->list_head) {
  2046. if (is_src)
  2047. s = list_entry(p, struct snd_seq_subscribers, src_list);
  2048. else
  2049. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  2050. if (count++)
  2051. snd_iprintf(buffer, ", ");
  2052. snd_iprintf(buffer, "%d:%d",
  2053. is_src ? s->info.dest.client : s->info.sender.client,
  2054. is_src ? s->info.dest.port : s->info.sender.port);
  2055. if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  2056. snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
  2057. if (group->exclusive)
  2058. snd_iprintf(buffer, "[ex]");
  2059. }
  2060. up_read(&group->list_mutex);
  2061. snd_iprintf(buffer, "\n");
  2062. }
  2063. #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
  2064. #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
  2065. #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
  2066. #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
  2067. static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
  2068. struct snd_seq_client *client)
  2069. {
  2070. struct snd_seq_client_port *p;
  2071. mutex_lock(&client->ports_mutex);
  2072. list_for_each_entry(p, &client->ports_list_head, list) {
  2073. snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c)\n",
  2074. p->addr.port, p->name,
  2075. FLAG_PERM_RD(p->capability),
  2076. FLAG_PERM_WR(p->capability),
  2077. FLAG_PERM_EX(p->capability),
  2078. FLAG_PERM_DUPLEX(p->capability));
  2079. snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: ");
  2080. snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: ");
  2081. }
  2082. mutex_unlock(&client->ports_mutex);
  2083. }
  2084. /* exported to seq_info.c */
  2085. void snd_seq_info_clients_read(struct snd_info_entry *entry,
  2086. struct snd_info_buffer *buffer)
  2087. {
  2088. int c;
  2089. struct snd_seq_client *client;
  2090. snd_iprintf(buffer, "Client info\n");
  2091. snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur);
  2092. snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak);
  2093. snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
  2094. snd_iprintf(buffer, "\n");
  2095. /* list the client table */
  2096. for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
  2097. client = snd_seq_client_use_ptr(c);
  2098. if (client == NULL)
  2099. continue;
  2100. if (client->type == NO_CLIENT) {
  2101. snd_seq_client_unlock(client);
  2102. continue;
  2103. }
  2104. snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n",
  2105. c, client->name,
  2106. client->type == USER_CLIENT ? "User" : "Kernel");
  2107. snd_seq_info_dump_ports(buffer, client);
  2108. if (snd_seq_write_pool_allocated(client)) {
  2109. snd_iprintf(buffer, " Output pool :\n");
  2110. snd_seq_info_pool(buffer, client->pool, " ");
  2111. }
  2112. if (client->type == USER_CLIENT && client->data.user.fifo &&
  2113. client->data.user.fifo->pool) {
  2114. snd_iprintf(buffer, " Input pool :\n");
  2115. snd_seq_info_pool(buffer, client->data.user.fifo->pool, " ");
  2116. }
  2117. snd_seq_client_unlock(client);
  2118. }
  2119. }
  2120. #endif /* CONFIG_SND_PROC_FS */
  2121. /*---------------------------------------------------------------------------*/
  2122. /*
  2123. * REGISTRATION PART
  2124. */
  2125. static const struct file_operations snd_seq_f_ops =
  2126. {
  2127. .owner = THIS_MODULE,
  2128. .read = snd_seq_read,
  2129. .write = snd_seq_write,
  2130. .open = snd_seq_open,
  2131. .release = snd_seq_release,
  2132. .llseek = no_llseek,
  2133. .poll = snd_seq_poll,
  2134. .unlocked_ioctl = snd_seq_ioctl,
  2135. .compat_ioctl = snd_seq_ioctl_compat,
  2136. };
  2137. static struct device seq_dev;
  2138. /*
  2139. * register sequencer device
  2140. */
  2141. int __init snd_sequencer_device_init(void)
  2142. {
  2143. int err;
  2144. snd_device_initialize(&seq_dev, NULL);
  2145. dev_set_name(&seq_dev, "seq");
  2146. mutex_lock(&register_mutex);
  2147. err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
  2148. &snd_seq_f_ops, NULL, &seq_dev);
  2149. mutex_unlock(&register_mutex);
  2150. if (err < 0) {
  2151. put_device(&seq_dev);
  2152. return err;
  2153. }
  2154. return 0;
  2155. }
  2156. /*
  2157. * unregister sequencer device
  2158. */
  2159. void snd_sequencer_device_done(void)
  2160. {
  2161. snd_unregister_device(&seq_dev);
  2162. put_device(&seq_dev);
  2163. }