lx_core.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* -*- linux-c -*- *
  3. *
  4. * ALSA driver for the digigram lx6464es interface
  5. * low-level interface
  6. *
  7. * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
  8. */
  9. /* #define RMH_DEBUG 1 */
  10. #include <linux/bitops.h>
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/delay.h>
  14. #include "lx6464es.h"
  15. #include "lx_core.h"
  16. /* low-level register access */
  17. static const unsigned long dsp_port_offsets[] = {
  18. 0,
  19. 0x400,
  20. 0x401,
  21. 0x402,
  22. 0x403,
  23. 0x404,
  24. 0x405,
  25. 0x406,
  26. 0x407,
  27. 0x408,
  28. 0x409,
  29. 0x40a,
  30. 0x40b,
  31. 0x40c,
  32. 0x410,
  33. 0x411,
  34. 0x412,
  35. 0x413,
  36. 0x414,
  37. 0x415,
  38. 0x416,
  39. 0x420,
  40. 0x430,
  41. 0x431,
  42. 0x432,
  43. 0x433,
  44. 0x434,
  45. 0x440
  46. };
  47. static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
  48. {
  49. void __iomem *base_address = chip->port_dsp_bar;
  50. return base_address + dsp_port_offsets[port]*4;
  51. }
  52. unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
  53. {
  54. void __iomem *address = lx_dsp_register(chip, port);
  55. return ioread32(address);
  56. }
  57. static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
  58. u32 len)
  59. {
  60. u32 __iomem *address = lx_dsp_register(chip, port);
  61. int i;
  62. /* we cannot use memcpy_fromio */
  63. for (i = 0; i != len; ++i)
  64. data[i] = ioread32(address + i);
  65. }
  66. void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
  67. {
  68. void __iomem *address = lx_dsp_register(chip, port);
  69. iowrite32(data, address);
  70. }
  71. static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
  72. const u32 *data, u32 len)
  73. {
  74. u32 __iomem *address = lx_dsp_register(chip, port);
  75. int i;
  76. /* we cannot use memcpy_to */
  77. for (i = 0; i != len; ++i)
  78. iowrite32(data[i], address + i);
  79. }
  80. static const unsigned long plx_port_offsets[] = {
  81. 0x04,
  82. 0x40,
  83. 0x44,
  84. 0x48,
  85. 0x4c,
  86. 0x50,
  87. 0x54,
  88. 0x58,
  89. 0x5c,
  90. 0x64,
  91. 0x68,
  92. 0x6C
  93. };
  94. static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
  95. {
  96. void __iomem *base_address = chip->port_plx_remapped;
  97. return base_address + plx_port_offsets[port];
  98. }
  99. unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
  100. {
  101. void __iomem *address = lx_plx_register(chip, port);
  102. return ioread32(address);
  103. }
  104. void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
  105. {
  106. void __iomem *address = lx_plx_register(chip, port);
  107. iowrite32(data, address);
  108. }
  109. /* rmh */
  110. #ifdef CONFIG_SND_DEBUG
  111. #define CMD_NAME(a) a
  112. #else
  113. #define CMD_NAME(a) NULL
  114. #endif
  115. #define Reg_CSM_MR 0x00000002
  116. #define Reg_CSM_MC 0x00000001
  117. struct dsp_cmd_info {
  118. u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
  119. * word).*/
  120. u16 dcCmdLength; /* Command length in words of 24 bits.*/
  121. u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
  122. * random. */
  123. u16 dcStatusLength; /* Status length (if fixed).*/
  124. char *dcOpName;
  125. };
  126. /*
  127. Initialization and control data for the Microblaze interface
  128. - OpCode:
  129. the opcode field of the command set at the proper offset
  130. - CmdLength
  131. the number of command words
  132. - StatusType
  133. offset in the status registers: 0 means that the return value may be
  134. different from 0, and must be read
  135. - StatusLength
  136. the number of status words (in addition to the return value)
  137. */
  138. static const struct dsp_cmd_info dsp_commands[] =
  139. {
  140. { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
  141. , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
  142. { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
  143. , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
  144. { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
  145. , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
  146. { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
  147. , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
  148. { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
  149. , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
  150. { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
  151. , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
  152. { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
  153. , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
  154. { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
  155. , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
  156. { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
  157. , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
  158. { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
  159. , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
  160. { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
  161. , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
  162. { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
  163. , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
  164. { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
  165. , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
  166. { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
  167. , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
  168. { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
  169. , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
  170. { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
  171. , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
  172. { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
  173. , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
  174. { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
  175. , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
  176. { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
  177. , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
  178. { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
  179. , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
  180. };
  181. static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
  182. {
  183. snd_BUG_ON(cmd >= CMD_14_INVALID);
  184. rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
  185. rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
  186. rmh->stat_len = dsp_commands[cmd].dcStatusLength;
  187. rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
  188. rmh->cmd_idx = cmd;
  189. memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
  190. #ifdef CONFIG_SND_DEBUG
  191. memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
  192. #endif
  193. #ifdef RMH_DEBUG
  194. rmh->cmd_idx = cmd;
  195. #endif
  196. }
  197. #ifdef RMH_DEBUG
  198. #define LXRMH "lx6464es rmh: "
  199. static void lx_message_dump(struct lx_rmh *rmh)
  200. {
  201. u8 idx = rmh->cmd_idx;
  202. int i;
  203. pr_debug(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
  204. for (i = 0; i != rmh->cmd_len; ++i)
  205. pr_debug(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
  206. for (i = 0; i != rmh->stat_len; ++i)
  207. pr_debug(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
  208. pr_debug("\n");
  209. }
  210. #else
  211. static inline void lx_message_dump(struct lx_rmh *rmh)
  212. {}
  213. #endif
  214. /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
  215. #define XILINX_TIMEOUT_MS 40
  216. #define XILINX_POLL_NO_SLEEP 100
  217. #define XILINX_POLL_ITERATIONS 150
  218. static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
  219. {
  220. u32 reg = ED_DSP_TIMED_OUT;
  221. int dwloop;
  222. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  223. dev_err(chip->card->dev, "PIOSendMessage eReg_CSM %x\n", reg);
  224. return -EBUSY;
  225. }
  226. /* write command */
  227. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  228. /* MicoBlaze gogogo */
  229. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  230. /* wait for device to answer */
  231. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
  232. if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
  233. if (rmh->dsp_stat == 0)
  234. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  235. else
  236. reg = 0;
  237. goto polling_successful;
  238. } else
  239. udelay(1);
  240. }
  241. dev_warn(chip->card->dev, "TIMEOUT lx_message_send_atomic! "
  242. "polling failed\n");
  243. polling_successful:
  244. if ((reg & ERROR_VALUE) == 0) {
  245. /* read response */
  246. if (rmh->stat_len) {
  247. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  248. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  249. rmh->stat_len);
  250. }
  251. } else
  252. dev_err(chip->card->dev, "rmh error: %08x\n", reg);
  253. /* clear Reg_CSM_MR */
  254. lx_dsp_reg_write(chip, eReg_CSM, 0);
  255. switch (reg) {
  256. case ED_DSP_TIMED_OUT:
  257. dev_warn(chip->card->dev, "lx_message_send: dsp timeout\n");
  258. return -ETIMEDOUT;
  259. case ED_DSP_CRASHED:
  260. dev_warn(chip->card->dev, "lx_message_send: dsp crashed\n");
  261. return -EAGAIN;
  262. }
  263. lx_message_dump(rmh);
  264. return reg;
  265. }
  266. /* low-level dsp access */
  267. int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
  268. {
  269. u16 ret;
  270. mutex_lock(&chip->msg_lock);
  271. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  272. ret = lx_message_send_atomic(chip, &chip->rmh);
  273. *rdsp_version = chip->rmh.stat[1];
  274. mutex_unlock(&chip->msg_lock);
  275. return ret;
  276. }
  277. int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
  278. {
  279. u16 ret = 0;
  280. u32 freq_raw = 0;
  281. u32 freq = 0;
  282. u32 frequency = 0;
  283. mutex_lock(&chip->msg_lock);
  284. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  285. ret = lx_message_send_atomic(chip, &chip->rmh);
  286. if (ret == 0) {
  287. freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
  288. freq = freq_raw & XES_FREQ_COUNT8_MASK;
  289. if ((freq < XES_FREQ_COUNT8_48_MAX) ||
  290. (freq > XES_FREQ_COUNT8_44_MIN))
  291. frequency = 0; /* unknown */
  292. else if (freq >= XES_FREQ_COUNT8_44_MAX)
  293. frequency = 44100;
  294. else
  295. frequency = 48000;
  296. }
  297. mutex_unlock(&chip->msg_lock);
  298. *rfreq = frequency * chip->freq_ratio;
  299. return ret;
  300. }
  301. int lx_dsp_get_mac(struct lx6464es *chip)
  302. {
  303. u32 macmsb, maclsb;
  304. macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
  305. maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
  306. /* todo: endianess handling */
  307. chip->mac_address[5] = ((u8 *)(&maclsb))[0];
  308. chip->mac_address[4] = ((u8 *)(&maclsb))[1];
  309. chip->mac_address[3] = ((u8 *)(&maclsb))[2];
  310. chip->mac_address[2] = ((u8 *)(&macmsb))[0];
  311. chip->mac_address[1] = ((u8 *)(&macmsb))[1];
  312. chip->mac_address[0] = ((u8 *)(&macmsb))[2];
  313. return 0;
  314. }
  315. int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
  316. {
  317. int ret;
  318. mutex_lock(&chip->msg_lock);
  319. lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
  320. chip->rmh.cmd[0] |= gran;
  321. ret = lx_message_send_atomic(chip, &chip->rmh);
  322. mutex_unlock(&chip->msg_lock);
  323. return ret;
  324. }
  325. int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
  326. {
  327. int ret;
  328. mutex_lock(&chip->msg_lock);
  329. lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
  330. chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
  331. ret = lx_message_send_atomic(chip, &chip->rmh);
  332. if (!ret)
  333. memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
  334. mutex_unlock(&chip->msg_lock);
  335. return ret;
  336. }
  337. #define PIPE_INFO_TO_CMD(capture, pipe) \
  338. ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
  339. /* low-level pipe handling */
  340. int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
  341. int channels)
  342. {
  343. int err;
  344. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  345. mutex_lock(&chip->msg_lock);
  346. lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
  347. chip->rmh.cmd[0] |= pipe_cmd;
  348. chip->rmh.cmd[0] |= channels;
  349. err = lx_message_send_atomic(chip, &chip->rmh);
  350. mutex_unlock(&chip->msg_lock);
  351. if (err != 0)
  352. dev_err(chip->card->dev, "could not allocate pipe\n");
  353. return err;
  354. }
  355. int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
  356. {
  357. int err;
  358. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  359. mutex_lock(&chip->msg_lock);
  360. lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
  361. chip->rmh.cmd[0] |= pipe_cmd;
  362. err = lx_message_send_atomic(chip, &chip->rmh);
  363. mutex_unlock(&chip->msg_lock);
  364. return err;
  365. }
  366. int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
  367. u32 *r_needed, u32 *r_freed, u32 *size_array)
  368. {
  369. int err;
  370. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  371. #ifdef CONFIG_SND_DEBUG
  372. if (size_array)
  373. memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
  374. #endif
  375. *r_needed = 0;
  376. *r_freed = 0;
  377. mutex_lock(&chip->msg_lock);
  378. lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
  379. chip->rmh.cmd[0] |= pipe_cmd;
  380. err = lx_message_send_atomic(chip, &chip->rmh);
  381. if (!err) {
  382. int i;
  383. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  384. u32 stat = chip->rmh.stat[i];
  385. if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
  386. /* finished */
  387. *r_freed += 1;
  388. if (size_array)
  389. size_array[i] = stat & MASK_DATA_SIZE;
  390. } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
  391. == 0)
  392. /* free */
  393. *r_needed += 1;
  394. }
  395. dev_dbg(chip->card->dev,
  396. "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
  397. *r_needed, *r_freed);
  398. for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
  399. ++i) {
  400. dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i,
  401. chip->rmh.stat[i],
  402. chip->rmh.stat[i] & MASK_DATA_SIZE);
  403. }
  404. }
  405. mutex_unlock(&chip->msg_lock);
  406. return err;
  407. }
  408. int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
  409. {
  410. int err;
  411. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  412. mutex_lock(&chip->msg_lock);
  413. lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
  414. chip->rmh.cmd[0] |= pipe_cmd;
  415. err = lx_message_send_atomic(chip, &chip->rmh);
  416. mutex_unlock(&chip->msg_lock);
  417. return err;
  418. }
  419. static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
  420. {
  421. int err;
  422. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  423. mutex_lock(&chip->msg_lock);
  424. lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
  425. chip->rmh.cmd[0] |= pipe_cmd;
  426. err = lx_message_send_atomic(chip, &chip->rmh);
  427. mutex_unlock(&chip->msg_lock);
  428. return err;
  429. }
  430. int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
  431. {
  432. int err;
  433. err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
  434. if (err < 0)
  435. return err;
  436. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  437. return err;
  438. }
  439. int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
  440. {
  441. int err = 0;
  442. err = lx_pipe_wait_for_start(chip, pipe, is_capture);
  443. if (err < 0)
  444. return err;
  445. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  446. return err;
  447. }
  448. int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
  449. u64 *rsample_count)
  450. {
  451. int err;
  452. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  453. mutex_lock(&chip->msg_lock);
  454. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  455. chip->rmh.cmd[0] |= pipe_cmd;
  456. chip->rmh.stat_len = 2; /* need all words here! */
  457. err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
  458. if (err != 0)
  459. dev_err(chip->card->dev,
  460. "could not query pipe's sample count\n");
  461. else {
  462. *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  463. << 24) /* hi part */
  464. + chip->rmh.stat[1]; /* lo part */
  465. }
  466. mutex_unlock(&chip->msg_lock);
  467. return err;
  468. }
  469. int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
  470. {
  471. int err;
  472. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  473. mutex_lock(&chip->msg_lock);
  474. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  475. chip->rmh.cmd[0] |= pipe_cmd;
  476. err = lx_message_send_atomic(chip, &chip->rmh);
  477. if (err != 0)
  478. dev_err(chip->card->dev, "could not query pipe's state\n");
  479. else
  480. *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
  481. mutex_unlock(&chip->msg_lock);
  482. return err;
  483. }
  484. static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
  485. int is_capture, u16 state)
  486. {
  487. int i;
  488. /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
  489. * timeout 50 ms */
  490. for (i = 0; i != 50; ++i) {
  491. u16 current_state;
  492. int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
  493. if (err < 0)
  494. return err;
  495. if (!err && current_state == state)
  496. return 0;
  497. mdelay(1);
  498. }
  499. return -ETIMEDOUT;
  500. }
  501. int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
  502. {
  503. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
  504. }
  505. int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
  506. {
  507. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
  508. }
  509. /* low-level stream handling */
  510. int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
  511. int is_capture, enum stream_state_t state)
  512. {
  513. int err;
  514. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  515. mutex_lock(&chip->msg_lock);
  516. lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
  517. chip->rmh.cmd[0] |= pipe_cmd;
  518. chip->rmh.cmd[0] |= state;
  519. err = lx_message_send_atomic(chip, &chip->rmh);
  520. mutex_unlock(&chip->msg_lock);
  521. return err;
  522. }
  523. int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
  524. u32 pipe, int is_capture)
  525. {
  526. int err;
  527. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  528. u32 channels = runtime->channels;
  529. mutex_lock(&chip->msg_lock);
  530. lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
  531. chip->rmh.cmd[0] |= pipe_cmd;
  532. if (runtime->sample_bits == 16)
  533. /* 16 bit format */
  534. chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
  535. if (snd_pcm_format_little_endian(runtime->format))
  536. /* little endian/intel format */
  537. chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
  538. chip->rmh.cmd[0] |= channels-1;
  539. err = lx_message_send_atomic(chip, &chip->rmh);
  540. mutex_unlock(&chip->msg_lock);
  541. return err;
  542. }
  543. int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
  544. int *rstate)
  545. {
  546. int err;
  547. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  548. mutex_lock(&chip->msg_lock);
  549. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  550. chip->rmh.cmd[0] |= pipe_cmd;
  551. err = lx_message_send_atomic(chip, &chip->rmh);
  552. *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
  553. mutex_unlock(&chip->msg_lock);
  554. return err;
  555. }
  556. int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
  557. u64 *r_bytepos)
  558. {
  559. int err;
  560. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  561. mutex_lock(&chip->msg_lock);
  562. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  563. chip->rmh.cmd[0] |= pipe_cmd;
  564. err = lx_message_send_atomic(chip, &chip->rmh);
  565. *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  566. << 32) /* hi part */
  567. + chip->rmh.stat[1]; /* lo part */
  568. mutex_unlock(&chip->msg_lock);
  569. return err;
  570. }
  571. /* low-level buffer handling */
  572. int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
  573. u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
  574. u32 *r_buffer_index)
  575. {
  576. int err;
  577. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  578. mutex_lock(&chip->msg_lock);
  579. lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
  580. chip->rmh.cmd[0] |= pipe_cmd;
  581. chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
  582. /* todo: pause request, circular buffer */
  583. chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
  584. chip->rmh.cmd[2] = buf_address_lo;
  585. if (buf_address_hi) {
  586. chip->rmh.cmd_len = 4;
  587. chip->rmh.cmd[3] = buf_address_hi;
  588. chip->rmh.cmd[0] |= BF_64BITS_ADR;
  589. }
  590. err = lx_message_send_atomic(chip, &chip->rmh);
  591. if (err == 0) {
  592. *r_buffer_index = chip->rmh.stat[0];
  593. goto done;
  594. }
  595. if (err == EB_RBUFFERS_TABLE_OVERFLOW)
  596. dev_err(chip->card->dev,
  597. "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
  598. if (err == EB_INVALID_STREAM)
  599. dev_err(chip->card->dev,
  600. "lx_buffer_give EB_INVALID_STREAM\n");
  601. if (err == EB_CMD_REFUSED)
  602. dev_err(chip->card->dev,
  603. "lx_buffer_give EB_CMD_REFUSED\n");
  604. done:
  605. mutex_unlock(&chip->msg_lock);
  606. return err;
  607. }
  608. int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
  609. u32 *r_buffer_size)
  610. {
  611. int err;
  612. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  613. mutex_lock(&chip->msg_lock);
  614. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  615. chip->rmh.cmd[0] |= pipe_cmd;
  616. chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
  617. * microblaze will seek for it */
  618. err = lx_message_send_atomic(chip, &chip->rmh);
  619. if (err == 0)
  620. *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
  621. mutex_unlock(&chip->msg_lock);
  622. return err;
  623. }
  624. int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
  625. u32 buffer_index)
  626. {
  627. int err;
  628. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  629. mutex_lock(&chip->msg_lock);
  630. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  631. chip->rmh.cmd[0] |= pipe_cmd;
  632. chip->rmh.cmd[0] |= buffer_index;
  633. err = lx_message_send_atomic(chip, &chip->rmh);
  634. mutex_unlock(&chip->msg_lock);
  635. return err;
  636. }
  637. /* low-level gain/peak handling
  638. *
  639. * \todo: can we unmute capture/playback channels independently?
  640. *
  641. * */
  642. int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
  643. {
  644. int err;
  645. /* bit set to 1: channel muted */
  646. u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
  647. mutex_lock(&chip->msg_lock);
  648. lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
  649. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
  650. chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
  651. chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
  652. dev_dbg(chip->card->dev,
  653. "mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
  654. chip->rmh.cmd[2]);
  655. err = lx_message_send_atomic(chip, &chip->rmh);
  656. mutex_unlock(&chip->msg_lock);
  657. return err;
  658. }
  659. static const u32 peak_map[] = {
  660. 0x00000109, /* -90.308dB */
  661. 0x0000083B, /* -72.247dB */
  662. 0x000020C4, /* -60.205dB */
  663. 0x00008273, /* -48.030dB */
  664. 0x00020756, /* -36.005dB */
  665. 0x00040C37, /* -30.001dB */
  666. 0x00081385, /* -24.002dB */
  667. 0x00101D3F, /* -18.000dB */
  668. 0x0016C310, /* -15.000dB */
  669. 0x002026F2, /* -12.001dB */
  670. 0x002D6A86, /* -9.000dB */
  671. 0x004026E6, /* -6.004dB */
  672. 0x005A9DF6, /* -3.000dB */
  673. 0x0065AC8B, /* -2.000dB */
  674. 0x00721481, /* -1.000dB */
  675. 0x007FFFFF, /* FS */
  676. };
  677. int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
  678. u32 *r_levels)
  679. {
  680. int err = 0;
  681. int i;
  682. mutex_lock(&chip->msg_lock);
  683. for (i = 0; i < channels; i += 4) {
  684. u32 s0, s1, s2, s3;
  685. lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
  686. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
  687. err = lx_message_send_atomic(chip, &chip->rmh);
  688. if (err == 0) {
  689. s0 = peak_map[chip->rmh.stat[0] & 0x0F];
  690. s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
  691. s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
  692. s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
  693. } else
  694. s0 = s1 = s2 = s3 = 0;
  695. r_levels[0] = s0;
  696. r_levels[1] = s1;
  697. r_levels[2] = s2;
  698. r_levels[3] = s3;
  699. r_levels += 4;
  700. }
  701. mutex_unlock(&chip->msg_lock);
  702. return err;
  703. }
  704. /* interrupt handling */
  705. #define PCX_IRQ_NONE 0
  706. #define IRQCS_ACTIVE_PCIDB BIT(13)
  707. #define IRQCS_ENABLE_PCIIRQ BIT(8)
  708. #define IRQCS_ENABLE_PCIDB BIT(9)
  709. static u32 lx_interrupt_test_ack(struct lx6464es *chip)
  710. {
  711. u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
  712. /* Test if PCI Doorbell interrupt is active */
  713. if (irqcs & IRQCS_ACTIVE_PCIDB) {
  714. u32 temp;
  715. irqcs = PCX_IRQ_NONE;
  716. while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
  717. /* RAZ interrupt */
  718. irqcs |= temp;
  719. lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
  720. }
  721. return irqcs;
  722. }
  723. return PCX_IRQ_NONE;
  724. }
  725. static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
  726. int *r_async_pending, int *r_async_escmd)
  727. {
  728. u32 irq_async;
  729. u32 irqsrc = lx_interrupt_test_ack(chip);
  730. if (irqsrc == PCX_IRQ_NONE)
  731. return 0;
  732. *r_irqsrc = irqsrc;
  733. irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
  734. * (set by xilinx) + EOB */
  735. if (irq_async & MASK_SYS_STATUS_ESA) {
  736. irq_async &= ~MASK_SYS_STATUS_ESA;
  737. *r_async_escmd = 1;
  738. }
  739. if (irq_async) {
  740. /* dev_dbg(chip->card->dev, "interrupt: async event pending\n"); */
  741. *r_async_pending = 1;
  742. }
  743. return 1;
  744. }
  745. static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
  746. int *r_freq_changed,
  747. u64 *r_notified_in_pipe_mask,
  748. u64 *r_notified_out_pipe_mask)
  749. {
  750. int err;
  751. u32 stat[9]; /* answer from CMD_04_GET_EVENT */
  752. /* We can optimize this to not read dumb events.
  753. * Answer words are in the following order:
  754. * Stat[0] general status
  755. * Stat[1] end of buffer OUT pF
  756. * Stat[2] end of buffer OUT pf
  757. * Stat[3] end of buffer IN pF
  758. * Stat[4] end of buffer IN pf
  759. * Stat[5] MSB underrun
  760. * Stat[6] LSB underrun
  761. * Stat[7] MSB overrun
  762. * Stat[8] LSB overrun
  763. * */
  764. int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
  765. int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
  766. *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
  767. err = lx_dsp_read_async_events(chip, stat);
  768. if (err < 0)
  769. return err;
  770. if (eb_pending_in) {
  771. *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
  772. + stat[4];
  773. dev_dbg(chip->card->dev, "interrupt: EOBI pending %llx\n",
  774. *r_notified_in_pipe_mask);
  775. }
  776. if (eb_pending_out) {
  777. *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
  778. + stat[2];
  779. dev_dbg(chip->card->dev, "interrupt: EOBO pending %llx\n",
  780. *r_notified_out_pipe_mask);
  781. }
  782. /* todo: handle xrun notification */
  783. return err;
  784. }
  785. static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
  786. struct lx_stream *lx_stream)
  787. {
  788. struct snd_pcm_substream *substream = lx_stream->stream;
  789. const unsigned int is_capture = lx_stream->is_capture;
  790. int err;
  791. const u32 channels = substream->runtime->channels;
  792. const u32 bytes_per_frame = channels * 3;
  793. const u32 period_size = substream->runtime->period_size;
  794. const u32 period_bytes = period_size * bytes_per_frame;
  795. const u32 pos = lx_stream->frame_pos;
  796. const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
  797. 0 : pos + 1;
  798. dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
  799. u32 buf_hi = 0;
  800. u32 buf_lo = 0;
  801. u32 buffer_index = 0;
  802. u32 needed, freed;
  803. u32 size_array[MAX_STREAM_BUFFER];
  804. dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
  805. mutex_lock(&chip->lock);
  806. err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
  807. dev_dbg(chip->card->dev,
  808. "interrupt: needed %d, freed %d\n", needed, freed);
  809. unpack_pointer(buf, &buf_lo, &buf_hi);
  810. err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
  811. &buffer_index);
  812. dev_dbg(chip->card->dev,
  813. "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n",
  814. buffer_index, (unsigned long)buf, period_bytes);
  815. lx_stream->frame_pos = next_pos;
  816. mutex_unlock(&chip->lock);
  817. return err;
  818. }
  819. irqreturn_t lx_interrupt(int irq, void *dev_id)
  820. {
  821. struct lx6464es *chip = dev_id;
  822. int async_pending, async_escmd;
  823. u32 irqsrc;
  824. bool wake_thread = false;
  825. dev_dbg(chip->card->dev,
  826. "**************************************************\n");
  827. if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
  828. dev_dbg(chip->card->dev, "IRQ_NONE\n");
  829. return IRQ_NONE; /* this device did not cause the interrupt */
  830. }
  831. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  832. return IRQ_HANDLED;
  833. if (irqsrc & MASK_SYS_STATUS_EOBI)
  834. dev_dbg(chip->card->dev, "interrupt: EOBI\n");
  835. if (irqsrc & MASK_SYS_STATUS_EOBO)
  836. dev_dbg(chip->card->dev, "interrupt: EOBO\n");
  837. if (irqsrc & MASK_SYS_STATUS_URUN)
  838. dev_dbg(chip->card->dev, "interrupt: URUN\n");
  839. if (irqsrc & MASK_SYS_STATUS_ORUN)
  840. dev_dbg(chip->card->dev, "interrupt: ORUN\n");
  841. if (async_pending) {
  842. wake_thread = true;
  843. chip->irqsrc = irqsrc;
  844. }
  845. if (async_escmd) {
  846. /* backdoor for ethersound commands
  847. *
  848. * for now, we do not need this
  849. *
  850. * */
  851. dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
  852. }
  853. return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
  854. }
  855. irqreturn_t lx_threaded_irq(int irq, void *dev_id)
  856. {
  857. struct lx6464es *chip = dev_id;
  858. u64 notified_in_pipe_mask = 0;
  859. u64 notified_out_pipe_mask = 0;
  860. int freq_changed;
  861. int err;
  862. /* handle async events */
  863. err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
  864. &freq_changed,
  865. &notified_in_pipe_mask,
  866. &notified_out_pipe_mask);
  867. if (err)
  868. dev_err(chip->card->dev, "error handling async events\n");
  869. if (notified_in_pipe_mask) {
  870. struct lx_stream *lx_stream = &chip->capture_stream;
  871. dev_dbg(chip->card->dev,
  872. "requesting audio transfer for capture\n");
  873. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  874. if (err < 0)
  875. dev_err(chip->card->dev,
  876. "cannot request new buffer for capture\n");
  877. snd_pcm_period_elapsed(lx_stream->stream);
  878. }
  879. if (notified_out_pipe_mask) {
  880. struct lx_stream *lx_stream = &chip->playback_stream;
  881. dev_dbg(chip->card->dev,
  882. "requesting audio transfer for playback\n");
  883. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  884. if (err < 0)
  885. dev_err(chip->card->dev,
  886. "cannot request new buffer for playback\n");
  887. snd_pcm_period_elapsed(lx_stream->stream);
  888. }
  889. return IRQ_HANDLED;
  890. }
  891. static void lx_irq_set(struct lx6464es *chip, int enable)
  892. {
  893. u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
  894. /* enable/disable interrupts
  895. *
  896. * Set the Doorbell and PCI interrupt enable bits
  897. *
  898. * */
  899. if (enable)
  900. reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  901. else
  902. reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  903. lx_plx_reg_write(chip, ePLX_IRQCS, reg);
  904. }
  905. void lx_irq_enable(struct lx6464es *chip)
  906. {
  907. dev_dbg(chip->card->dev, "->lx_irq_enable\n");
  908. lx_irq_set(chip, 1);
  909. }
  910. void lx_irq_disable(struct lx6464es *chip)
  911. {
  912. dev_dbg(chip->card->dev, "->lx_irq_disable\n");
  913. lx_irq_set(chip, 0);
  914. }