amdtp-stream.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
  4. * with Common Isochronous Packet (IEC 61883-1) headers
  5. *
  6. * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  7. */
  8. #include <linux/device.h>
  9. #include <linux/err.h>
  10. #include <linux/firewire.h>
  11. #include <linux/firewire-constants.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <sound/pcm.h>
  15. #include <sound/pcm_params.h>
  16. #include "amdtp-stream.h"
  17. #define TICKS_PER_CYCLE 3072
  18. #define CYCLES_PER_SECOND 8000
  19. #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
  20. #define OHCI_SECOND_MODULUS 8
  21. /* Always support Linux tracing subsystem. */
  22. #define CREATE_TRACE_POINTS
  23. #include "amdtp-stream-trace.h"
  24. #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
  25. /* isochronous header parameters */
  26. #define ISO_DATA_LENGTH_SHIFT 16
  27. #define TAG_NO_CIP_HEADER 0
  28. #define TAG_CIP 1
  29. // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
  30. #define CIP_HEADER_QUADLETS 2
  31. #define CIP_EOH_SHIFT 31
  32. #define CIP_EOH (1u << CIP_EOH_SHIFT)
  33. #define CIP_EOH_MASK 0x80000000
  34. #define CIP_SID_SHIFT 24
  35. #define CIP_SID_MASK 0x3f000000
  36. #define CIP_DBS_MASK 0x00ff0000
  37. #define CIP_DBS_SHIFT 16
  38. #define CIP_SPH_MASK 0x00000400
  39. #define CIP_SPH_SHIFT 10
  40. #define CIP_DBC_MASK 0x000000ff
  41. #define CIP_FMT_SHIFT 24
  42. #define CIP_FMT_MASK 0x3f000000
  43. #define CIP_FDF_MASK 0x00ff0000
  44. #define CIP_FDF_SHIFT 16
  45. #define CIP_FDF_NO_DATA 0xff
  46. #define CIP_SYT_MASK 0x0000ffff
  47. #define CIP_SYT_NO_INFO 0xffff
  48. #define CIP_SYT_CYCLE_MODULUS 16
  49. #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
  50. #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
  51. /* Audio and Music transfer protocol specific parameters */
  52. #define CIP_FMT_AM 0x10
  53. #define AMDTP_FDF_NO_DATA 0xff
  54. // For iso header and tstamp.
  55. #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
  56. // Add nothing.
  57. #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
  58. // Add two quadlets CIP header.
  59. #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
  60. #define HEADER_TSTAMP_MASK 0x0000ffff
  61. #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
  62. #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
  63. // The initial firmware of OXFW970 can postpone transmission of packet during finishing
  64. // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
  65. // overrun. Actual device can skip more, then this module stops the packet streaming.
  66. #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
  67. static void pcm_period_work(struct work_struct *work);
  68. /**
  69. * amdtp_stream_init - initialize an AMDTP stream structure
  70. * @s: the AMDTP stream to initialize
  71. * @unit: the target of the stream
  72. * @dir: the direction of stream
  73. * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
  74. * @fmt: the value of fmt field in CIP header
  75. * @process_ctx_payloads: callback handler to process payloads of isoc context
  76. * @protocol_size: the size to allocate newly for protocol
  77. */
  78. int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
  79. enum amdtp_stream_direction dir, unsigned int flags,
  80. unsigned int fmt,
  81. amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
  82. unsigned int protocol_size)
  83. {
  84. if (process_ctx_payloads == NULL)
  85. return -EINVAL;
  86. s->protocol = kzalloc(protocol_size, GFP_KERNEL);
  87. if (!s->protocol)
  88. return -ENOMEM;
  89. s->unit = unit;
  90. s->direction = dir;
  91. s->flags = flags;
  92. s->context = ERR_PTR(-1);
  93. mutex_init(&s->mutex);
  94. INIT_WORK(&s->period_work, pcm_period_work);
  95. s->packet_index = 0;
  96. init_waitqueue_head(&s->ready_wait);
  97. s->fmt = fmt;
  98. s->process_ctx_payloads = process_ctx_payloads;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL(amdtp_stream_init);
  102. /**
  103. * amdtp_stream_destroy - free stream resources
  104. * @s: the AMDTP stream to destroy
  105. */
  106. void amdtp_stream_destroy(struct amdtp_stream *s)
  107. {
  108. /* Not initialized. */
  109. if (s->protocol == NULL)
  110. return;
  111. WARN_ON(amdtp_stream_running(s));
  112. kfree(s->protocol);
  113. mutex_destroy(&s->mutex);
  114. }
  115. EXPORT_SYMBOL(amdtp_stream_destroy);
  116. const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
  117. [CIP_SFC_32000] = 8,
  118. [CIP_SFC_44100] = 8,
  119. [CIP_SFC_48000] = 8,
  120. [CIP_SFC_88200] = 16,
  121. [CIP_SFC_96000] = 16,
  122. [CIP_SFC_176400] = 32,
  123. [CIP_SFC_192000] = 32,
  124. };
  125. EXPORT_SYMBOL(amdtp_syt_intervals);
  126. const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
  127. [CIP_SFC_32000] = 32000,
  128. [CIP_SFC_44100] = 44100,
  129. [CIP_SFC_48000] = 48000,
  130. [CIP_SFC_88200] = 88200,
  131. [CIP_SFC_96000] = 96000,
  132. [CIP_SFC_176400] = 176400,
  133. [CIP_SFC_192000] = 192000,
  134. };
  135. EXPORT_SYMBOL(amdtp_rate_table);
  136. static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
  137. struct snd_pcm_hw_rule *rule)
  138. {
  139. struct snd_interval *s = hw_param_interval(params, rule->var);
  140. const struct snd_interval *r =
  141. hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
  142. struct snd_interval t = {0};
  143. unsigned int step = 0;
  144. int i;
  145. for (i = 0; i < CIP_SFC_COUNT; ++i) {
  146. if (snd_interval_test(r, amdtp_rate_table[i]))
  147. step = max(step, amdtp_syt_intervals[i]);
  148. }
  149. if (step == 0)
  150. return -EINVAL;
  151. t.min = roundup(s->min, step);
  152. t.max = rounddown(s->max, step);
  153. t.integer = 1;
  154. return snd_interval_refine(s, &t);
  155. }
  156. /**
  157. * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
  158. * @s: the AMDTP stream, which must be initialized.
  159. * @runtime: the PCM substream runtime
  160. */
  161. int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
  162. struct snd_pcm_runtime *runtime)
  163. {
  164. struct snd_pcm_hardware *hw = &runtime->hw;
  165. unsigned int ctx_header_size;
  166. unsigned int maximum_usec_per_period;
  167. int err;
  168. hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
  169. SNDRV_PCM_INFO_INTERLEAVED |
  170. SNDRV_PCM_INFO_JOINT_DUPLEX |
  171. SNDRV_PCM_INFO_MMAP |
  172. SNDRV_PCM_INFO_MMAP_VALID |
  173. SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
  174. hw->periods_min = 2;
  175. hw->periods_max = UINT_MAX;
  176. /* bytes for a frame */
  177. hw->period_bytes_min = 4 * hw->channels_max;
  178. /* Just to prevent from allocating much pages. */
  179. hw->period_bytes_max = hw->period_bytes_min * 2048;
  180. hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
  181. // Linux driver for 1394 OHCI controller voluntarily flushes isoc
  182. // context when total size of accumulated context header reaches
  183. // PAGE_SIZE. This kicks work for the isoc context and brings
  184. // callback in the middle of scheduled interrupts.
  185. // Although AMDTP streams in the same domain use the same events per
  186. // IRQ, use the largest size of context header between IT/IR contexts.
  187. // Here, use the value of context header in IR context is for both
  188. // contexts.
  189. if (!(s->flags & CIP_NO_HEADER))
  190. ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
  191. else
  192. ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
  193. maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
  194. CYCLES_PER_SECOND / ctx_header_size;
  195. // In IEC 61883-6, one isoc packet can transfer events up to the value
  196. // of syt interval. This comes from the interval of isoc cycle. As 1394
  197. // OHCI controller can generate hardware IRQ per isoc packet, the
  198. // interval is 125 usec.
  199. // However, there are two ways of transmission in IEC 61883-6; blocking
  200. // and non-blocking modes. In blocking mode, the sequence of isoc packet
  201. // includes 'empty' or 'NODATA' packets which include no event. In
  202. // non-blocking mode, the number of events per packet is variable up to
  203. // the syt interval.
  204. // Due to the above protocol design, the minimum PCM frames per
  205. // interrupt should be double of the value of syt interval, thus it is
  206. // 250 usec.
  207. err = snd_pcm_hw_constraint_minmax(runtime,
  208. SNDRV_PCM_HW_PARAM_PERIOD_TIME,
  209. 250, maximum_usec_per_period);
  210. if (err < 0)
  211. goto end;
  212. /* Non-Blocking stream has no more constraints */
  213. if (!(s->flags & CIP_BLOCKING))
  214. goto end;
  215. /*
  216. * One AMDTP packet can include some frames. In blocking mode, the
  217. * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
  218. * depending on its sampling rate. For accurate period interrupt, it's
  219. * preferrable to align period/buffer sizes to current SYT_INTERVAL.
  220. */
  221. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
  222. apply_constraint_to_size, NULL,
  223. SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
  224. SNDRV_PCM_HW_PARAM_RATE, -1);
  225. if (err < 0)
  226. goto end;
  227. err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
  228. apply_constraint_to_size, NULL,
  229. SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
  230. SNDRV_PCM_HW_PARAM_RATE, -1);
  231. if (err < 0)
  232. goto end;
  233. end:
  234. return err;
  235. }
  236. EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
  237. /**
  238. * amdtp_stream_set_parameters - set stream parameters
  239. * @s: the AMDTP stream to configure
  240. * @rate: the sample rate
  241. * @data_block_quadlets: the size of a data block in quadlet unit
  242. * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP
  243. * events.
  244. *
  245. * The parameters must be set before the stream is started, and must not be
  246. * changed while the stream is running.
  247. */
  248. int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
  249. unsigned int data_block_quadlets, unsigned int pcm_frame_multiplier)
  250. {
  251. unsigned int sfc;
  252. for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
  253. if (amdtp_rate_table[sfc] == rate)
  254. break;
  255. }
  256. if (sfc == ARRAY_SIZE(amdtp_rate_table))
  257. return -EINVAL;
  258. s->sfc = sfc;
  259. s->data_block_quadlets = data_block_quadlets;
  260. s->syt_interval = amdtp_syt_intervals[sfc];
  261. // default buffering in the device.
  262. s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
  263. // additional buffering needed to adjust for no-data packets.
  264. if (s->flags & CIP_BLOCKING)
  265. s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
  266. s->pcm_frame_multiplier = pcm_frame_multiplier;
  267. return 0;
  268. }
  269. EXPORT_SYMBOL(amdtp_stream_set_parameters);
  270. // The CIP header is processed in context header apart from context payload.
  271. static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
  272. {
  273. unsigned int multiplier;
  274. if (s->flags & CIP_JUMBO_PAYLOAD)
  275. multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
  276. else
  277. multiplier = 1;
  278. return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
  279. }
  280. /**
  281. * amdtp_stream_get_max_payload - get the stream's packet size
  282. * @s: the AMDTP stream
  283. *
  284. * This function must not be called before the stream has been configured
  285. * with amdtp_stream_set_parameters().
  286. */
  287. unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
  288. {
  289. unsigned int cip_header_size;
  290. if (!(s->flags & CIP_NO_HEADER))
  291. cip_header_size = CIP_HEADER_SIZE;
  292. else
  293. cip_header_size = 0;
  294. return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
  295. }
  296. EXPORT_SYMBOL(amdtp_stream_get_max_payload);
  297. /**
  298. * amdtp_stream_pcm_prepare - prepare PCM device for running
  299. * @s: the AMDTP stream
  300. *
  301. * This function should be called from the PCM device's .prepare callback.
  302. */
  303. void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
  304. {
  305. cancel_work_sync(&s->period_work);
  306. s->pcm_buffer_pointer = 0;
  307. s->pcm_period_pointer = 0;
  308. }
  309. EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
  310. #define prev_packet_desc(s, desc) \
  311. list_prev_entry_circular(desc, &s->packet_descs_list, link)
  312. static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
  313. unsigned int size, unsigned int pos, unsigned int count)
  314. {
  315. const unsigned int syt_interval = s->syt_interval;
  316. int i;
  317. for (i = 0; i < count; ++i) {
  318. struct seq_desc *desc = descs + pos;
  319. if (desc->syt_offset != CIP_SYT_NO_INFO)
  320. desc->data_blocks = syt_interval;
  321. else
  322. desc->data_blocks = 0;
  323. pos = (pos + 1) % size;
  324. }
  325. }
  326. static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
  327. unsigned int size, unsigned int pos,
  328. unsigned int count)
  329. {
  330. const enum cip_sfc sfc = s->sfc;
  331. unsigned int state = s->ctx_data.rx.data_block_state;
  332. int i;
  333. for (i = 0; i < count; ++i) {
  334. struct seq_desc *desc = descs + pos;
  335. if (!cip_sfc_is_base_44100(sfc)) {
  336. // Sample_rate / 8000 is an integer, and precomputed.
  337. desc->data_blocks = state;
  338. } else {
  339. unsigned int phase = state;
  340. /*
  341. * This calculates the number of data blocks per packet so that
  342. * 1) the overall rate is correct and exactly synchronized to
  343. * the bus clock, and
  344. * 2) packets with a rounded-up number of blocks occur as early
  345. * as possible in the sequence (to prevent underruns of the
  346. * device's buffer).
  347. */
  348. if (sfc == CIP_SFC_44100)
  349. /* 6 6 5 6 5 6 5 ... */
  350. desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
  351. else
  352. /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
  353. desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
  354. if (++phase >= (80 >> (sfc >> 1)))
  355. phase = 0;
  356. state = phase;
  357. }
  358. pos = (pos + 1) % size;
  359. }
  360. s->ctx_data.rx.data_block_state = state;
  361. }
  362. static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
  363. unsigned int *syt_offset_state, enum cip_sfc sfc)
  364. {
  365. unsigned int syt_offset;
  366. if (*last_syt_offset < TICKS_PER_CYCLE) {
  367. if (!cip_sfc_is_base_44100(sfc))
  368. syt_offset = *last_syt_offset + *syt_offset_state;
  369. else {
  370. /*
  371. * The time, in ticks, of the n'th SYT_INTERVAL sample is:
  372. * n * SYT_INTERVAL * 24576000 / sample_rate
  373. * Modulo TICKS_PER_CYCLE, the difference between successive
  374. * elements is about 1386.23. Rounding the results of this
  375. * formula to the SYT precision results in a sequence of
  376. * differences that begins with:
  377. * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
  378. * This code generates _exactly_ the same sequence.
  379. */
  380. unsigned int phase = *syt_offset_state;
  381. unsigned int index = phase % 13;
  382. syt_offset = *last_syt_offset;
  383. syt_offset += 1386 + ((index && !(index & 3)) ||
  384. phase == 146);
  385. if (++phase >= 147)
  386. phase = 0;
  387. *syt_offset_state = phase;
  388. }
  389. } else
  390. syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
  391. *last_syt_offset = syt_offset;
  392. if (syt_offset >= TICKS_PER_CYCLE)
  393. syt_offset = CIP_SYT_NO_INFO;
  394. return syt_offset;
  395. }
  396. static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
  397. unsigned int size, unsigned int pos, unsigned int count)
  398. {
  399. const enum cip_sfc sfc = s->sfc;
  400. unsigned int last = s->ctx_data.rx.last_syt_offset;
  401. unsigned int state = s->ctx_data.rx.syt_offset_state;
  402. int i;
  403. for (i = 0; i < count; ++i) {
  404. struct seq_desc *desc = descs + pos;
  405. desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
  406. pos = (pos + 1) % size;
  407. }
  408. s->ctx_data.rx.last_syt_offset = last;
  409. s->ctx_data.rx.syt_offset_state = state;
  410. }
  411. static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
  412. unsigned int transfer_delay)
  413. {
  414. unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
  415. unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
  416. unsigned int syt_offset;
  417. // Round up.
  418. if (syt_cycle_lo < cycle_lo)
  419. syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
  420. syt_cycle_lo -= cycle_lo;
  421. // Subtract transfer delay so that the synchronization offset is not so large
  422. // at transmission.
  423. syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
  424. if (syt_offset < transfer_delay)
  425. syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
  426. return syt_offset - transfer_delay;
  427. }
  428. // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
  429. // Additionally, the sequence of tx packets is severely checked against any discontinuity
  430. // before filling entries in the queue. The calculation is safe even if it looks fragile by
  431. // overrun.
  432. static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
  433. {
  434. const unsigned int cache_size = s->ctx_data.tx.cache.size;
  435. unsigned int cycles = s->ctx_data.tx.cache.pos;
  436. if (cycles < head)
  437. cycles += cache_size;
  438. cycles -= head;
  439. return cycles;
  440. }
  441. static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count)
  442. {
  443. const unsigned int transfer_delay = s->transfer_delay;
  444. const unsigned int cache_size = s->ctx_data.tx.cache.size;
  445. struct seq_desc *cache = s->ctx_data.tx.cache.descs;
  446. unsigned int cache_pos = s->ctx_data.tx.cache.pos;
  447. bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
  448. int i;
  449. for (i = 0; i < desc_count; ++i) {
  450. struct seq_desc *dst = cache + cache_pos;
  451. if (aware_syt && src->syt != CIP_SYT_NO_INFO)
  452. dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
  453. else
  454. dst->syt_offset = CIP_SYT_NO_INFO;
  455. dst->data_blocks = src->data_blocks;
  456. cache_pos = (cache_pos + 1) % cache_size;
  457. src = amdtp_stream_next_packet_desc(s, src);
  458. }
  459. s->ctx_data.tx.cache.pos = cache_pos;
  460. }
  461. static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
  462. unsigned int pos, unsigned int count)
  463. {
  464. pool_ideal_syt_offsets(s, descs, size, pos, count);
  465. if (s->flags & CIP_BLOCKING)
  466. pool_blocking_data_blocks(s, descs, size, pos, count);
  467. else
  468. pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
  469. }
  470. static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
  471. unsigned int pos, unsigned int count)
  472. {
  473. struct amdtp_stream *target = s->ctx_data.rx.replay_target;
  474. const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
  475. const unsigned int cache_size = target->ctx_data.tx.cache.size;
  476. unsigned int cache_pos = s->ctx_data.rx.cache_pos;
  477. int i;
  478. for (i = 0; i < count; ++i) {
  479. descs[pos] = cache[cache_pos];
  480. cache_pos = (cache_pos + 1) % cache_size;
  481. pos = (pos + 1) % size;
  482. }
  483. s->ctx_data.rx.cache_pos = cache_pos;
  484. }
  485. static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
  486. unsigned int pos, unsigned int count)
  487. {
  488. struct amdtp_domain *d = s->domain;
  489. void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
  490. unsigned int pos, unsigned int count);
  491. if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
  492. pool_seq_descs = pool_ideal_seq_descs;
  493. } else {
  494. if (!d->replay.on_the_fly) {
  495. pool_seq_descs = pool_replayed_seq;
  496. } else {
  497. struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
  498. const unsigned int cache_size = tx->ctx_data.tx.cache.size;
  499. const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
  500. unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_pos);
  501. if (cached_cycles > count && cached_cycles > cache_size / 2)
  502. pool_seq_descs = pool_replayed_seq;
  503. else
  504. pool_seq_descs = pool_ideal_seq_descs;
  505. }
  506. }
  507. pool_seq_descs(s, descs, size, pos, count);
  508. }
  509. static void update_pcm_pointers(struct amdtp_stream *s,
  510. struct snd_pcm_substream *pcm,
  511. unsigned int frames)
  512. {
  513. unsigned int ptr;
  514. ptr = s->pcm_buffer_pointer + frames;
  515. if (ptr >= pcm->runtime->buffer_size)
  516. ptr -= pcm->runtime->buffer_size;
  517. WRITE_ONCE(s->pcm_buffer_pointer, ptr);
  518. s->pcm_period_pointer += frames;
  519. if (s->pcm_period_pointer >= pcm->runtime->period_size) {
  520. s->pcm_period_pointer -= pcm->runtime->period_size;
  521. // The program in user process should periodically check the status of intermediate
  522. // buffer associated to PCM substream to process PCM frames in the buffer, instead
  523. // of receiving notification of period elapsed by poll wait.
  524. //
  525. // Use another work item for period elapsed event to prevent the following AB/BA
  526. // deadlock:
  527. //
  528. // thread 1 thread 2
  529. // ================================= =================================
  530. // A.work item (process) pcm ioctl (process)
  531. // v v
  532. // process_rx_packets() B.PCM stream lock
  533. // process_tx_packets() v
  534. // v callbacks in snd_pcm_ops
  535. // update_pcm_pointers() v
  536. // snd_pcm_elapsed() fw_iso_context_flush_completions()
  537. // snd_pcm_stream_lock_irqsave() disable_work_sync()
  538. // v v
  539. // wait until release of B wait until A exits
  540. if (!pcm->runtime->no_period_wakeup)
  541. queue_work(system_highpri_wq, &s->period_work);
  542. }
  543. }
  544. static void pcm_period_work(struct work_struct *work)
  545. {
  546. struct amdtp_stream *s = container_of(work, struct amdtp_stream,
  547. period_work);
  548. struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
  549. if (pcm)
  550. snd_pcm_period_elapsed(pcm);
  551. }
  552. static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
  553. bool sched_irq)
  554. {
  555. int err;
  556. params->interrupt = sched_irq;
  557. params->tag = s->tag;
  558. params->sy = 0;
  559. err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
  560. s->buffer.packets[s->packet_index].offset);
  561. if (err < 0) {
  562. dev_err(&s->unit->device, "queueing error: %d\n", err);
  563. goto end;
  564. }
  565. if (++s->packet_index >= s->queue_size)
  566. s->packet_index = 0;
  567. end:
  568. return err;
  569. }
  570. static inline int queue_out_packet(struct amdtp_stream *s,
  571. struct fw_iso_packet *params, bool sched_irq)
  572. {
  573. params->skip =
  574. !!(params->header_length == 0 && params->payload_length == 0);
  575. return queue_packet(s, params, sched_irq);
  576. }
  577. static inline int queue_in_packet(struct amdtp_stream *s,
  578. struct fw_iso_packet *params)
  579. {
  580. // Queue one packet for IR context.
  581. params->header_length = s->ctx_data.tx.ctx_header_size;
  582. params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
  583. params->skip = false;
  584. return queue_packet(s, params, false);
  585. }
  586. static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
  587. unsigned int data_block_counter, unsigned int syt)
  588. {
  589. cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
  590. (s->data_block_quadlets << CIP_DBS_SHIFT) |
  591. ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
  592. data_block_counter);
  593. cip_header[1] = cpu_to_be32(CIP_EOH |
  594. ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
  595. ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
  596. (syt & CIP_SYT_MASK));
  597. }
  598. static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
  599. struct fw_iso_packet *params, unsigned int header_length,
  600. unsigned int data_blocks,
  601. unsigned int data_block_counter,
  602. unsigned int syt, unsigned int index, u32 curr_cycle_time)
  603. {
  604. unsigned int payload_length;
  605. __be32 *cip_header;
  606. payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
  607. params->payload_length = payload_length;
  608. if (header_length > 0) {
  609. cip_header = (__be32 *)params->header;
  610. generate_cip_header(s, cip_header, data_block_counter, syt);
  611. params->header_length = header_length;
  612. } else {
  613. cip_header = NULL;
  614. }
  615. trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
  616. data_block_counter, s->packet_index, index, curr_cycle_time);
  617. }
  618. static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
  619. unsigned int payload_length,
  620. unsigned int *data_blocks,
  621. unsigned int *data_block_counter, unsigned int *syt)
  622. {
  623. u32 cip_header[2];
  624. unsigned int sph;
  625. unsigned int fmt;
  626. unsigned int fdf;
  627. unsigned int dbc;
  628. bool lost;
  629. cip_header[0] = be32_to_cpu(buf[0]);
  630. cip_header[1] = be32_to_cpu(buf[1]);
  631. /*
  632. * This module supports 'Two-quadlet CIP header with SYT field'.
  633. * For convenience, also check FMT field is AM824 or not.
  634. */
  635. if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
  636. ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
  637. (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
  638. dev_info_ratelimited(&s->unit->device,
  639. "Invalid CIP header for AMDTP: %08X:%08X\n",
  640. cip_header[0], cip_header[1]);
  641. return -EAGAIN;
  642. }
  643. /* Check valid protocol or not. */
  644. sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
  645. fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
  646. if (sph != s->sph || fmt != s->fmt) {
  647. dev_info_ratelimited(&s->unit->device,
  648. "Detect unexpected protocol: %08x %08x\n",
  649. cip_header[0], cip_header[1]);
  650. return -EAGAIN;
  651. }
  652. /* Calculate data blocks */
  653. fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
  654. if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
  655. *data_blocks = 0;
  656. } else {
  657. unsigned int data_block_quadlets =
  658. (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
  659. /* avoid division by zero */
  660. if (data_block_quadlets == 0) {
  661. dev_err(&s->unit->device,
  662. "Detect invalid value in dbs field: %08X\n",
  663. cip_header[0]);
  664. return -EPROTO;
  665. }
  666. if (s->flags & CIP_WRONG_DBS)
  667. data_block_quadlets = s->data_block_quadlets;
  668. *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
  669. }
  670. /* Check data block counter continuity */
  671. dbc = cip_header[0] & CIP_DBC_MASK;
  672. if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
  673. *data_block_counter != UINT_MAX)
  674. dbc = *data_block_counter;
  675. if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
  676. *data_block_counter == UINT_MAX) {
  677. lost = false;
  678. } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
  679. lost = dbc != *data_block_counter;
  680. } else {
  681. unsigned int dbc_interval;
  682. if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
  683. if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
  684. dbc_interval = s->ctx_data.tx.dbc_interval;
  685. else
  686. dbc_interval = *data_blocks;
  687. } else {
  688. dbc_interval = payload_length / sizeof(__be32);
  689. }
  690. lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
  691. }
  692. if (lost) {
  693. dev_err(&s->unit->device,
  694. "Detect discontinuity of CIP: %02X %02X\n",
  695. *data_block_counter, dbc);
  696. return -EIO;
  697. }
  698. *data_block_counter = dbc;
  699. if (!(s->flags & CIP_UNAWARE_SYT))
  700. *syt = cip_header[1] & CIP_SYT_MASK;
  701. return 0;
  702. }
  703. static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
  704. const __be32 *ctx_header,
  705. unsigned int *data_blocks,
  706. unsigned int *data_block_counter,
  707. unsigned int *syt, unsigned int packet_index, unsigned int index,
  708. u32 curr_cycle_time)
  709. {
  710. unsigned int payload_length;
  711. const __be32 *cip_header;
  712. unsigned int cip_header_size;
  713. payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
  714. if (!(s->flags & CIP_NO_HEADER))
  715. cip_header_size = CIP_HEADER_SIZE;
  716. else
  717. cip_header_size = 0;
  718. if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
  719. dev_err(&s->unit->device,
  720. "Detect jumbo payload: %04x %04x\n",
  721. payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
  722. return -EIO;
  723. }
  724. if (cip_header_size > 0) {
  725. if (payload_length >= cip_header_size) {
  726. int err;
  727. cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
  728. err = check_cip_header(s, cip_header, payload_length - cip_header_size,
  729. data_blocks, data_block_counter, syt);
  730. if (err < 0)
  731. return err;
  732. } else {
  733. // Handle the cycle so that empty packet arrives.
  734. cip_header = NULL;
  735. *data_blocks = 0;
  736. *syt = 0;
  737. }
  738. } else {
  739. cip_header = NULL;
  740. *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
  741. *syt = 0;
  742. if (*data_block_counter == UINT_MAX)
  743. *data_block_counter = 0;
  744. }
  745. trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
  746. *data_block_counter, packet_index, index, curr_cycle_time);
  747. return 0;
  748. }
  749. // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
  750. // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
  751. // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
  752. static inline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp)
  753. {
  754. return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff);
  755. }
  756. static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
  757. {
  758. u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
  759. return compute_ohci_iso_ctx_cycle_count(tstamp);
  760. }
  761. static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
  762. {
  763. cycle += addend;
  764. if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
  765. cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
  766. return cycle;
  767. }
  768. static inline u32 decrement_ohci_cycle_count(u32 minuend, u32 subtrahend)
  769. {
  770. if (minuend < subtrahend)
  771. minuend += OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
  772. return minuend - subtrahend;
  773. }
  774. static int compare_ohci_cycle_count(u32 lval, u32 rval)
  775. {
  776. if (lval == rval)
  777. return 0;
  778. else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
  779. return -1;
  780. else
  781. return 1;
  782. }
  783. // Align to actual cycle count for the packet which is going to be scheduled.
  784. // This module queued the same number of isochronous cycle as the size of queue
  785. // to kip isochronous cycle, therefore it's OK to just increment the cycle by
  786. // the size of queue for scheduled cycle.
  787. static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
  788. unsigned int queue_size)
  789. {
  790. u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
  791. return increment_ohci_cycle_count(cycle, queue_size);
  792. }
  793. static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
  794. const __be32 *ctx_header, unsigned int packet_count,
  795. unsigned int *desc_count)
  796. {
  797. unsigned int next_cycle = s->next_cycle;
  798. unsigned int dbc = s->data_block_counter;
  799. unsigned int packet_index = s->packet_index;
  800. unsigned int queue_size = s->queue_size;
  801. u32 curr_cycle_time = 0;
  802. int i;
  803. int err;
  804. if (trace_amdtp_packet_enabled())
  805. (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
  806. *desc_count = 0;
  807. for (i = 0; i < packet_count; ++i) {
  808. unsigned int cycle;
  809. bool lost;
  810. unsigned int data_blocks;
  811. unsigned int syt;
  812. cycle = compute_ohci_cycle_count(ctx_header[1]);
  813. lost = (next_cycle != cycle);
  814. if (lost) {
  815. if (s->flags & CIP_NO_HEADER) {
  816. // Fireface skips transmission just for an isoc cycle corresponding
  817. // to empty packet.
  818. unsigned int prev_cycle = next_cycle;
  819. next_cycle = increment_ohci_cycle_count(next_cycle, 1);
  820. lost = (next_cycle != cycle);
  821. if (!lost) {
  822. // Prepare a description for the skipped cycle for
  823. // sequence replay.
  824. desc->cycle = prev_cycle;
  825. desc->syt = 0;
  826. desc->data_blocks = 0;
  827. desc->data_block_counter = dbc;
  828. desc->ctx_payload = NULL;
  829. desc = amdtp_stream_next_packet_desc(s, desc);
  830. ++(*desc_count);
  831. }
  832. } else if (s->flags & CIP_JUMBO_PAYLOAD) {
  833. // OXFW970 skips transmission for several isoc cycles during
  834. // asynchronous transaction. The sequence replay is impossible due
  835. // to the reason.
  836. unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
  837. IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
  838. lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
  839. }
  840. if (lost) {
  841. dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
  842. next_cycle, cycle);
  843. return -EIO;
  844. }
  845. }
  846. err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
  847. packet_index, i, curr_cycle_time);
  848. if (err < 0)
  849. return err;
  850. desc->cycle = cycle;
  851. desc->syt = syt;
  852. desc->data_blocks = data_blocks;
  853. desc->data_block_counter = dbc;
  854. desc->ctx_payload = s->buffer.packets[packet_index].buffer;
  855. if (!(s->flags & CIP_DBC_IS_END_EVENT))
  856. dbc = (dbc + desc->data_blocks) & 0xff;
  857. next_cycle = increment_ohci_cycle_count(next_cycle, 1);
  858. desc = amdtp_stream_next_packet_desc(s, desc);
  859. ++(*desc_count);
  860. ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
  861. packet_index = (packet_index + 1) % queue_size;
  862. }
  863. s->next_cycle = next_cycle;
  864. s->data_block_counter = dbc;
  865. return 0;
  866. }
  867. static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
  868. unsigned int transfer_delay)
  869. {
  870. unsigned int syt;
  871. syt_offset += transfer_delay;
  872. syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
  873. (syt_offset % TICKS_PER_CYCLE);
  874. return syt & CIP_SYT_MASK;
  875. }
  876. static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
  877. const __be32 *ctx_header, unsigned int packet_count)
  878. {
  879. struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
  880. unsigned int seq_size = s->ctx_data.rx.seq.size;
  881. unsigned int seq_pos = s->ctx_data.rx.seq.pos;
  882. unsigned int dbc = s->data_block_counter;
  883. bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
  884. int i;
  885. pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count);
  886. for (i = 0; i < packet_count; ++i) {
  887. unsigned int index = (s->packet_index + i) % s->queue_size;
  888. const struct seq_desc *seq = seq_descs + seq_pos;
  889. desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
  890. if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
  891. desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
  892. else
  893. desc->syt = CIP_SYT_NO_INFO;
  894. desc->data_blocks = seq->data_blocks;
  895. if (s->flags & CIP_DBC_IS_END_EVENT)
  896. dbc = (dbc + desc->data_blocks) & 0xff;
  897. desc->data_block_counter = dbc;
  898. if (!(s->flags & CIP_DBC_IS_END_EVENT))
  899. dbc = (dbc + desc->data_blocks) & 0xff;
  900. desc->ctx_payload = s->buffer.packets[index].buffer;
  901. seq_pos = (seq_pos + 1) % seq_size;
  902. desc = amdtp_stream_next_packet_desc(s, desc);
  903. ++ctx_header;
  904. }
  905. s->data_block_counter = dbc;
  906. s->ctx_data.rx.seq.pos = seq_pos;
  907. }
  908. static inline void cancel_stream(struct amdtp_stream *s)
  909. {
  910. struct work_struct *work = current_work();
  911. s->packet_index = -1;
  912. // Detect work items for any isochronous context. The work item for pcm_period_work()
  913. // should be avoided since the call of snd_pcm_period_elapsed() can reach via
  914. // snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at
  915. // snd_pcm_stop_xrun().
  916. if (work && work != &s->period_work)
  917. amdtp_stream_pcm_abort(s);
  918. WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
  919. }
  920. static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s,
  921. const struct pkt_desc *desc, unsigned int count)
  922. {
  923. unsigned int data_block_count = 0;
  924. u32 latest_cycle;
  925. u32 cycle_time;
  926. u32 curr_cycle;
  927. u32 cycle_gap;
  928. int i, err;
  929. if (count == 0)
  930. goto end;
  931. // Forward to the latest record.
  932. for (i = 0; i < count - 1; ++i)
  933. desc = amdtp_stream_next_packet_desc(s, desc);
  934. latest_cycle = desc->cycle;
  935. err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time);
  936. if (err < 0)
  937. goto end;
  938. // Compute cycle count with lower 3 bits of second field and cycle field like timestamp
  939. // format of 1394 OHCI isochronous context.
  940. curr_cycle = compute_ohci_iso_ctx_cycle_count((cycle_time >> 12) & 0x0000ffff);
  941. if (s->direction == AMDTP_IN_STREAM) {
  942. // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
  943. // it corresponds to arrived isochronous packet.
  944. if (compare_ohci_cycle_count(latest_cycle, curr_cycle) > 0)
  945. goto end;
  946. cycle_gap = decrement_ohci_cycle_count(curr_cycle, latest_cycle);
  947. // NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated
  948. // value expectedly corresponds to a few packets (0-2) since the packet arrived at
  949. // the most recent isochronous cycle has been already processed.
  950. for (i = 0; i < cycle_gap; ++i) {
  951. desc = amdtp_stream_next_packet_desc(s, desc);
  952. data_block_count += desc->data_blocks;
  953. }
  954. } else {
  955. // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
  956. // since it was already scheduled.
  957. if (compare_ohci_cycle_count(latest_cycle, curr_cycle) < 0)
  958. goto end;
  959. cycle_gap = decrement_ohci_cycle_count(latest_cycle, curr_cycle);
  960. // NOTE: use history of scheduled packets.
  961. for (i = 0; i < cycle_gap; ++i) {
  962. data_block_count += desc->data_blocks;
  963. desc = prev_packet_desc(s, desc);
  964. }
  965. }
  966. end:
  967. return data_block_count * s->pcm_frame_multiplier;
  968. }
  969. static void process_ctx_payloads(struct amdtp_stream *s,
  970. const struct pkt_desc *desc,
  971. unsigned int count)
  972. {
  973. struct snd_pcm_substream *pcm;
  974. int i;
  975. pcm = READ_ONCE(s->pcm);
  976. s->process_ctx_payloads(s, desc, count, pcm);
  977. if (pcm) {
  978. unsigned int data_block_count = 0;
  979. pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count);
  980. for (i = 0; i < count; ++i) {
  981. data_block_count += desc->data_blocks;
  982. desc = amdtp_stream_next_packet_desc(s, desc);
  983. }
  984. update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier);
  985. }
  986. }
  987. static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
  988. void *header, void *private_data)
  989. {
  990. struct amdtp_stream *s = private_data;
  991. const struct amdtp_domain *d = s->domain;
  992. const __be32 *ctx_header = header;
  993. const unsigned int events_per_period = d->events_per_period;
  994. unsigned int event_count = s->ctx_data.rx.event_count;
  995. struct pkt_desc *desc = s->packet_descs_cursor;
  996. unsigned int pkt_header_length;
  997. unsigned int packets;
  998. u32 curr_cycle_time;
  999. bool need_hw_irq;
  1000. int i;
  1001. if (s->packet_index < 0)
  1002. return;
  1003. // Calculate the number of packets in buffer and check XRUN.
  1004. packets = header_length / sizeof(*ctx_header);
  1005. generate_rx_packet_descs(s, desc, ctx_header, packets);
  1006. process_ctx_payloads(s, desc, packets);
  1007. if (!(s->flags & CIP_NO_HEADER))
  1008. pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
  1009. else
  1010. pkt_header_length = 0;
  1011. if (s == d->irq_target) {
  1012. // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
  1013. // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
  1014. // with some requests, instead of scheduled hardware IRQ of an IT context.
  1015. struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
  1016. need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
  1017. } else {
  1018. need_hw_irq = false;
  1019. }
  1020. if (trace_amdtp_packet_enabled())
  1021. (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
  1022. for (i = 0; i < packets; ++i) {
  1023. DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS);
  1024. bool sched_irq = false;
  1025. build_it_pkt_header(s, desc->cycle, template, pkt_header_length,
  1026. desc->data_blocks, desc->data_block_counter,
  1027. desc->syt, i, curr_cycle_time);
  1028. if (s == s->domain->irq_target) {
  1029. event_count += desc->data_blocks;
  1030. if (event_count >= events_per_period) {
  1031. event_count -= events_per_period;
  1032. sched_irq = need_hw_irq;
  1033. }
  1034. }
  1035. if (queue_out_packet(s, template, sched_irq) < 0) {
  1036. cancel_stream(s);
  1037. return;
  1038. }
  1039. desc = amdtp_stream_next_packet_desc(s, desc);
  1040. }
  1041. s->ctx_data.rx.event_count = event_count;
  1042. s->packet_descs_cursor = desc;
  1043. }
  1044. static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
  1045. void *header, void *private_data)
  1046. {
  1047. struct amdtp_stream *s = private_data;
  1048. struct amdtp_domain *d = s->domain;
  1049. const __be32 *ctx_header = header;
  1050. unsigned int packets;
  1051. unsigned int cycle;
  1052. int i;
  1053. if (s->packet_index < 0)
  1054. return;
  1055. packets = header_length / sizeof(*ctx_header);
  1056. cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
  1057. s->next_cycle = increment_ohci_cycle_count(cycle, 1);
  1058. for (i = 0; i < packets; ++i) {
  1059. struct fw_iso_packet params = {
  1060. .header_length = 0,
  1061. .payload_length = 0,
  1062. };
  1063. bool sched_irq = (s == d->irq_target && i == packets - 1);
  1064. if (queue_out_packet(s, &params, sched_irq) < 0) {
  1065. cancel_stream(s);
  1066. return;
  1067. }
  1068. }
  1069. }
  1070. static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
  1071. void *header, void *private_data);
  1072. static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
  1073. size_t header_length, void *header, void *private_data)
  1074. {
  1075. struct amdtp_stream *s = private_data;
  1076. struct amdtp_domain *d = s->domain;
  1077. __be32 *ctx_header = header;
  1078. const unsigned int queue_size = s->queue_size;
  1079. unsigned int packets;
  1080. unsigned int offset;
  1081. if (s->packet_index < 0)
  1082. return;
  1083. packets = header_length / sizeof(*ctx_header);
  1084. offset = 0;
  1085. while (offset < packets) {
  1086. unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
  1087. if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
  1088. break;
  1089. ++offset;
  1090. }
  1091. if (offset > 0) {
  1092. unsigned int length = sizeof(*ctx_header) * offset;
  1093. skip_rx_packets(context, tstamp, length, ctx_header, private_data);
  1094. if (amdtp_streaming_error(s))
  1095. return;
  1096. ctx_header += offset;
  1097. header_length -= length;
  1098. }
  1099. if (offset < packets) {
  1100. s->ready_processing = true;
  1101. wake_up(&s->ready_wait);
  1102. if (d->replay.enable)
  1103. s->ctx_data.rx.cache_pos = 0;
  1104. process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
  1105. if (amdtp_streaming_error(s))
  1106. return;
  1107. if (s == d->irq_target)
  1108. s->context->callback.sc = irq_target_callback;
  1109. else
  1110. s->context->callback.sc = process_rx_packets;
  1111. }
  1112. }
  1113. static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
  1114. void *header, void *private_data)
  1115. {
  1116. struct amdtp_stream *s = private_data;
  1117. __be32 *ctx_header = header;
  1118. struct pkt_desc *desc = s->packet_descs_cursor;
  1119. unsigned int packet_count;
  1120. unsigned int desc_count;
  1121. int i;
  1122. int err;
  1123. if (s->packet_index < 0)
  1124. return;
  1125. // Calculate the number of packets in buffer and check XRUN.
  1126. packet_count = header_length / s->ctx_data.tx.ctx_header_size;
  1127. desc_count = 0;
  1128. err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count);
  1129. if (err < 0) {
  1130. if (err != -EAGAIN) {
  1131. cancel_stream(s);
  1132. return;
  1133. }
  1134. } else {
  1135. struct amdtp_domain *d = s->domain;
  1136. process_ctx_payloads(s, desc, desc_count);
  1137. if (d->replay.enable)
  1138. cache_seq(s, desc, desc_count);
  1139. for (i = 0; i < desc_count; ++i)
  1140. desc = amdtp_stream_next_packet_desc(s, desc);
  1141. s->packet_descs_cursor = desc;
  1142. }
  1143. for (i = 0; i < packet_count; ++i) {
  1144. struct fw_iso_packet params = {0};
  1145. if (queue_in_packet(s, &params) < 0) {
  1146. cancel_stream(s);
  1147. return;
  1148. }
  1149. }
  1150. }
  1151. static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
  1152. void *header, void *private_data)
  1153. {
  1154. struct amdtp_stream *s = private_data;
  1155. const __be32 *ctx_header = header;
  1156. unsigned int packets;
  1157. unsigned int cycle;
  1158. int i;
  1159. if (s->packet_index < 0)
  1160. return;
  1161. packets = header_length / s->ctx_data.tx.ctx_header_size;
  1162. ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
  1163. cycle = compute_ohci_cycle_count(ctx_header[1]);
  1164. s->next_cycle = increment_ohci_cycle_count(cycle, 1);
  1165. for (i = 0; i < packets; ++i) {
  1166. struct fw_iso_packet params = {0};
  1167. if (queue_in_packet(s, &params) < 0) {
  1168. cancel_stream(s);
  1169. return;
  1170. }
  1171. }
  1172. }
  1173. static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
  1174. size_t header_length, void *header, void *private_data)
  1175. {
  1176. struct amdtp_stream *s = private_data;
  1177. struct amdtp_domain *d = s->domain;
  1178. __be32 *ctx_header;
  1179. unsigned int packets;
  1180. unsigned int offset;
  1181. if (s->packet_index < 0)
  1182. return;
  1183. packets = header_length / s->ctx_data.tx.ctx_header_size;
  1184. offset = 0;
  1185. ctx_header = header;
  1186. while (offset < packets) {
  1187. unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
  1188. if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
  1189. break;
  1190. ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
  1191. ++offset;
  1192. }
  1193. ctx_header = header;
  1194. if (offset > 0) {
  1195. size_t length = s->ctx_data.tx.ctx_header_size * offset;
  1196. drop_tx_packets(context, tstamp, length, ctx_header, s);
  1197. if (amdtp_streaming_error(s))
  1198. return;
  1199. ctx_header += length / sizeof(*ctx_header);
  1200. header_length -= length;
  1201. }
  1202. if (offset < packets) {
  1203. s->ready_processing = true;
  1204. wake_up(&s->ready_wait);
  1205. process_tx_packets(context, tstamp, header_length, ctx_header, s);
  1206. if (amdtp_streaming_error(s))
  1207. return;
  1208. context->callback.sc = process_tx_packets;
  1209. }
  1210. }
  1211. static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
  1212. size_t header_length, void *header, void *private_data)
  1213. {
  1214. struct amdtp_stream *s = private_data;
  1215. struct amdtp_domain *d = s->domain;
  1216. __be32 *ctx_header;
  1217. unsigned int count;
  1218. unsigned int events;
  1219. int i;
  1220. if (s->packet_index < 0)
  1221. return;
  1222. count = header_length / s->ctx_data.tx.ctx_header_size;
  1223. // Attempt to detect any event in the batch of packets.
  1224. events = 0;
  1225. ctx_header = header;
  1226. for (i = 0; i < count; ++i) {
  1227. unsigned int payload_quads =
  1228. (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
  1229. unsigned int data_blocks;
  1230. if (s->flags & CIP_NO_HEADER) {
  1231. data_blocks = payload_quads / s->data_block_quadlets;
  1232. } else {
  1233. __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
  1234. if (payload_quads < CIP_HEADER_QUADLETS) {
  1235. data_blocks = 0;
  1236. } else {
  1237. payload_quads -= CIP_HEADER_QUADLETS;
  1238. if (s->flags & CIP_UNAWARE_SYT) {
  1239. data_blocks = payload_quads / s->data_block_quadlets;
  1240. } else {
  1241. u32 cip1 = be32_to_cpu(cip_headers[1]);
  1242. // NODATA packet can includes any data blocks but they are
  1243. // not available as event.
  1244. if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
  1245. data_blocks = 0;
  1246. else
  1247. data_blocks = payload_quads / s->data_block_quadlets;
  1248. }
  1249. }
  1250. }
  1251. events += data_blocks;
  1252. ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
  1253. }
  1254. drop_tx_packets(context, tstamp, header_length, header, s);
  1255. if (events > 0)
  1256. s->ctx_data.tx.event_starts = true;
  1257. // Decide the cycle count to begin processing content of packet in IR contexts.
  1258. {
  1259. unsigned int stream_count = 0;
  1260. unsigned int event_starts_count = 0;
  1261. unsigned int cycle = UINT_MAX;
  1262. list_for_each_entry(s, &d->streams, list) {
  1263. if (s->direction == AMDTP_IN_STREAM) {
  1264. ++stream_count;
  1265. if (s->ctx_data.tx.event_starts)
  1266. ++event_starts_count;
  1267. }
  1268. }
  1269. if (stream_count == event_starts_count) {
  1270. unsigned int next_cycle;
  1271. list_for_each_entry(s, &d->streams, list) {
  1272. if (s->direction != AMDTP_IN_STREAM)
  1273. continue;
  1274. next_cycle = increment_ohci_cycle_count(s->next_cycle,
  1275. d->processing_cycle.tx_init_skip);
  1276. if (cycle == UINT_MAX ||
  1277. compare_ohci_cycle_count(next_cycle, cycle) > 0)
  1278. cycle = next_cycle;
  1279. s->context->callback.sc = process_tx_packets_intermediately;
  1280. }
  1281. d->processing_cycle.tx_start = cycle;
  1282. }
  1283. }
  1284. }
  1285. static void process_ctxs_in_domain(struct amdtp_domain *d)
  1286. {
  1287. struct amdtp_stream *s;
  1288. list_for_each_entry(s, &d->streams, list) {
  1289. if (s != d->irq_target && amdtp_stream_running(s))
  1290. fw_iso_context_flush_completions(s->context);
  1291. if (amdtp_streaming_error(s))
  1292. goto error;
  1293. }
  1294. return;
  1295. error:
  1296. if (amdtp_stream_running(d->irq_target))
  1297. cancel_stream(d->irq_target);
  1298. list_for_each_entry(s, &d->streams, list) {
  1299. if (amdtp_stream_running(s))
  1300. cancel_stream(s);
  1301. }
  1302. }
  1303. static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
  1304. void *header, void *private_data)
  1305. {
  1306. struct amdtp_stream *s = private_data;
  1307. struct amdtp_domain *d = s->domain;
  1308. process_rx_packets(context, tstamp, header_length, header, private_data);
  1309. process_ctxs_in_domain(d);
  1310. }
  1311. static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
  1312. size_t header_length, void *header, void *private_data)
  1313. {
  1314. struct amdtp_stream *s = private_data;
  1315. struct amdtp_domain *d = s->domain;
  1316. process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
  1317. process_ctxs_in_domain(d);
  1318. }
  1319. static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
  1320. size_t header_length, void *header, void *private_data)
  1321. {
  1322. struct amdtp_stream *s = private_data;
  1323. struct amdtp_domain *d = s->domain;
  1324. bool ready_to_start;
  1325. skip_rx_packets(context, tstamp, header_length, header, private_data);
  1326. process_ctxs_in_domain(d);
  1327. if (d->replay.enable && !d->replay.on_the_fly) {
  1328. unsigned int rx_count = 0;
  1329. unsigned int rx_ready_count = 0;
  1330. struct amdtp_stream *rx;
  1331. list_for_each_entry(rx, &d->streams, list) {
  1332. struct amdtp_stream *tx;
  1333. unsigned int cached_cycles;
  1334. if (rx->direction != AMDTP_OUT_STREAM)
  1335. continue;
  1336. ++rx_count;
  1337. tx = rx->ctx_data.rx.replay_target;
  1338. cached_cycles = calculate_cached_cycle_count(tx, 0);
  1339. if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
  1340. ++rx_ready_count;
  1341. }
  1342. ready_to_start = (rx_count == rx_ready_count);
  1343. } else {
  1344. ready_to_start = true;
  1345. }
  1346. // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
  1347. // contexts are expected to start and get callback when reaching here.
  1348. if (ready_to_start) {
  1349. unsigned int cycle = s->next_cycle;
  1350. list_for_each_entry(s, &d->streams, list) {
  1351. if (s->direction != AMDTP_OUT_STREAM)
  1352. continue;
  1353. if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
  1354. cycle = s->next_cycle;
  1355. if (s == d->irq_target)
  1356. s->context->callback.sc = irq_target_callback_intermediately;
  1357. else
  1358. s->context->callback.sc = process_rx_packets_intermediately;
  1359. }
  1360. d->processing_cycle.rx_start = cycle;
  1361. }
  1362. }
  1363. // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
  1364. // transmit first packet.
  1365. static void amdtp_stream_first_callback(struct fw_iso_context *context,
  1366. u32 tstamp, size_t header_length,
  1367. void *header, void *private_data)
  1368. {
  1369. struct amdtp_stream *s = private_data;
  1370. struct amdtp_domain *d = s->domain;
  1371. if (s->direction == AMDTP_IN_STREAM) {
  1372. context->callback.sc = drop_tx_packets_initially;
  1373. } else {
  1374. if (s == d->irq_target)
  1375. context->callback.sc = irq_target_callback_skip;
  1376. else
  1377. context->callback.sc = skip_rx_packets;
  1378. }
  1379. context->callback.sc(context, tstamp, header_length, header, s);
  1380. }
  1381. /**
  1382. * amdtp_stream_start - start transferring packets
  1383. * @s: the AMDTP stream to start
  1384. * @channel: the isochronous channel on the bus
  1385. * @speed: firewire speed code
  1386. * @queue_size: The number of packets in the queue.
  1387. * @idle_irq_interval: the interval to queue packet during initial state.
  1388. *
  1389. * The stream cannot be started until it has been configured with
  1390. * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
  1391. * device can be started.
  1392. */
  1393. static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
  1394. unsigned int queue_size, unsigned int idle_irq_interval)
  1395. {
  1396. bool is_irq_target = (s == s->domain->irq_target);
  1397. unsigned int ctx_header_size;
  1398. unsigned int max_ctx_payload_size;
  1399. enum dma_data_direction dir;
  1400. struct pkt_desc *descs;
  1401. int i, type, tag, err;
  1402. mutex_lock(&s->mutex);
  1403. if (WARN_ON(amdtp_stream_running(s) ||
  1404. (s->data_block_quadlets < 1))) {
  1405. err = -EBADFD;
  1406. goto err_unlock;
  1407. }
  1408. if (s->direction == AMDTP_IN_STREAM) {
  1409. // NOTE: IT context should be used for constant IRQ.
  1410. if (is_irq_target) {
  1411. err = -EINVAL;
  1412. goto err_unlock;
  1413. }
  1414. s->data_block_counter = UINT_MAX;
  1415. } else {
  1416. s->data_block_counter = 0;
  1417. }
  1418. // initialize packet buffer.
  1419. if (s->direction == AMDTP_IN_STREAM) {
  1420. dir = DMA_FROM_DEVICE;
  1421. type = FW_ISO_CONTEXT_RECEIVE;
  1422. if (!(s->flags & CIP_NO_HEADER))
  1423. ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
  1424. else
  1425. ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
  1426. } else {
  1427. dir = DMA_TO_DEVICE;
  1428. type = FW_ISO_CONTEXT_TRANSMIT;
  1429. ctx_header_size = 0; // No effect for IT context.
  1430. }
  1431. max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
  1432. err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
  1433. if (err < 0)
  1434. goto err_unlock;
  1435. s->queue_size = queue_size;
  1436. s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
  1437. type, channel, speed, ctx_header_size,
  1438. amdtp_stream_first_callback, s);
  1439. if (IS_ERR(s->context)) {
  1440. err = PTR_ERR(s->context);
  1441. if (err == -EBUSY)
  1442. dev_err(&s->unit->device,
  1443. "no free stream on this controller\n");
  1444. goto err_buffer;
  1445. }
  1446. amdtp_stream_update(s);
  1447. if (s->direction == AMDTP_IN_STREAM) {
  1448. s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
  1449. s->ctx_data.tx.ctx_header_size = ctx_header_size;
  1450. s->ctx_data.tx.event_starts = false;
  1451. if (s->domain->replay.enable) {
  1452. // struct fw_iso_context.drop_overflow_headers is false therefore it's
  1453. // possible to cache much unexpectedly.
  1454. s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
  1455. queue_size * 3 / 2);
  1456. s->ctx_data.tx.cache.pos = 0;
  1457. s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
  1458. sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
  1459. if (!s->ctx_data.tx.cache.descs) {
  1460. err = -ENOMEM;
  1461. goto err_context;
  1462. }
  1463. }
  1464. } else {
  1465. static const struct {
  1466. unsigned int data_block;
  1467. unsigned int syt_offset;
  1468. } *entry, initial_state[] = {
  1469. [CIP_SFC_32000] = { 4, 3072 },
  1470. [CIP_SFC_48000] = { 6, 1024 },
  1471. [CIP_SFC_96000] = { 12, 1024 },
  1472. [CIP_SFC_192000] = { 24, 1024 },
  1473. [CIP_SFC_44100] = { 0, 67 },
  1474. [CIP_SFC_88200] = { 0, 67 },
  1475. [CIP_SFC_176400] = { 0, 67 },
  1476. };
  1477. s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
  1478. if (!s->ctx_data.rx.seq.descs) {
  1479. err = -ENOMEM;
  1480. goto err_context;
  1481. }
  1482. s->ctx_data.rx.seq.size = queue_size;
  1483. s->ctx_data.rx.seq.pos = 0;
  1484. entry = &initial_state[s->sfc];
  1485. s->ctx_data.rx.data_block_state = entry->data_block;
  1486. s->ctx_data.rx.syt_offset_state = entry->syt_offset;
  1487. s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
  1488. s->ctx_data.rx.event_count = 0;
  1489. }
  1490. if (s->flags & CIP_NO_HEADER)
  1491. s->tag = TAG_NO_CIP_HEADER;
  1492. else
  1493. s->tag = TAG_CIP;
  1494. // NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request
  1495. // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
  1496. // could take a round over queue of AMDTP packet descriptors and small loss of history. For
  1497. // safe, keep more 8 elements for the queue, equivalent to 1 ms.
  1498. descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL);
  1499. if (!descs) {
  1500. err = -ENOMEM;
  1501. goto err_context;
  1502. }
  1503. s->packet_descs = descs;
  1504. INIT_LIST_HEAD(&s->packet_descs_list);
  1505. for (i = 0; i < s->queue_size; ++i) {
  1506. INIT_LIST_HEAD(&descs->link);
  1507. list_add_tail(&descs->link, &s->packet_descs_list);
  1508. ++descs;
  1509. }
  1510. s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
  1511. s->packet_index = 0;
  1512. do {
  1513. struct fw_iso_packet params;
  1514. if (s->direction == AMDTP_IN_STREAM) {
  1515. err = queue_in_packet(s, &params);
  1516. } else {
  1517. bool sched_irq = false;
  1518. params.header_length = 0;
  1519. params.payload_length = 0;
  1520. if (is_irq_target) {
  1521. sched_irq = !((s->packet_index + 1) %
  1522. idle_irq_interval);
  1523. }
  1524. err = queue_out_packet(s, &params, sched_irq);
  1525. }
  1526. if (err < 0)
  1527. goto err_pkt_descs;
  1528. } while (s->packet_index > 0);
  1529. /* NOTE: TAG1 matches CIP. This just affects in stream. */
  1530. tag = FW_ISO_CONTEXT_MATCH_TAG1;
  1531. if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
  1532. tag |= FW_ISO_CONTEXT_MATCH_TAG0;
  1533. s->ready_processing = false;
  1534. err = fw_iso_context_start(s->context, -1, 0, tag);
  1535. if (err < 0)
  1536. goto err_pkt_descs;
  1537. mutex_unlock(&s->mutex);
  1538. return 0;
  1539. err_pkt_descs:
  1540. kfree(s->packet_descs);
  1541. s->packet_descs = NULL;
  1542. err_context:
  1543. if (s->direction == AMDTP_OUT_STREAM) {
  1544. kfree(s->ctx_data.rx.seq.descs);
  1545. } else {
  1546. if (s->domain->replay.enable)
  1547. kfree(s->ctx_data.tx.cache.descs);
  1548. }
  1549. fw_iso_context_destroy(s->context);
  1550. s->context = ERR_PTR(-1);
  1551. err_buffer:
  1552. iso_packets_buffer_destroy(&s->buffer, s->unit);
  1553. err_unlock:
  1554. mutex_unlock(&s->mutex);
  1555. return err;
  1556. }
  1557. /**
  1558. * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
  1559. * @d: the AMDTP domain.
  1560. * @s: the AMDTP stream that transports the PCM data
  1561. *
  1562. * Returns the current buffer position, in frames.
  1563. */
  1564. unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
  1565. struct amdtp_stream *s)
  1566. {
  1567. struct amdtp_stream *irq_target = d->irq_target;
  1568. if (irq_target && amdtp_stream_running(irq_target)) {
  1569. // The work item to call snd_pcm_period_elapsed() can reach here by the call of
  1570. // snd_pcm_ops.pointer(), however less packets would be available then. Therefore
  1571. // the following call is just for user process contexts.
  1572. if (current_work() != &s->period_work)
  1573. fw_iso_context_flush_completions(irq_target->context);
  1574. }
  1575. return READ_ONCE(s->pcm_buffer_pointer);
  1576. }
  1577. EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
  1578. /**
  1579. * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
  1580. * @d: the AMDTP domain.
  1581. * @s: the AMDTP stream that transfers the PCM frames
  1582. *
  1583. * Returns zero always.
  1584. */
  1585. int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
  1586. {
  1587. struct amdtp_stream *irq_target = d->irq_target;
  1588. // Process isochronous packets for recent isochronous cycle to handle
  1589. // queued PCM frames.
  1590. if (irq_target && amdtp_stream_running(irq_target))
  1591. fw_iso_context_flush_completions(irq_target->context);
  1592. return 0;
  1593. }
  1594. EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
  1595. /**
  1596. * amdtp_stream_update - update the stream after a bus reset
  1597. * @s: the AMDTP stream
  1598. */
  1599. void amdtp_stream_update(struct amdtp_stream *s)
  1600. {
  1601. /* Precomputing. */
  1602. WRITE_ONCE(s->source_node_id_field,
  1603. (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
  1604. }
  1605. EXPORT_SYMBOL(amdtp_stream_update);
  1606. /**
  1607. * amdtp_stream_stop - stop sending packets
  1608. * @s: the AMDTP stream to stop
  1609. *
  1610. * All PCM and MIDI devices of the stream must be stopped before the stream
  1611. * itself can be stopped.
  1612. */
  1613. static void amdtp_stream_stop(struct amdtp_stream *s)
  1614. {
  1615. mutex_lock(&s->mutex);
  1616. if (!amdtp_stream_running(s)) {
  1617. mutex_unlock(&s->mutex);
  1618. return;
  1619. }
  1620. cancel_work_sync(&s->period_work);
  1621. fw_iso_context_stop(s->context);
  1622. fw_iso_context_destroy(s->context);
  1623. s->context = ERR_PTR(-1);
  1624. iso_packets_buffer_destroy(&s->buffer, s->unit);
  1625. kfree(s->packet_descs);
  1626. s->packet_descs = NULL;
  1627. if (s->direction == AMDTP_OUT_STREAM) {
  1628. kfree(s->ctx_data.rx.seq.descs);
  1629. } else {
  1630. if (s->domain->replay.enable)
  1631. kfree(s->ctx_data.tx.cache.descs);
  1632. }
  1633. mutex_unlock(&s->mutex);
  1634. }
  1635. /**
  1636. * amdtp_stream_pcm_abort - abort the running PCM device
  1637. * @s: the AMDTP stream about to be stopped
  1638. *
  1639. * If the isochronous stream needs to be stopped asynchronously, call this
  1640. * function first to stop the PCM device.
  1641. */
  1642. void amdtp_stream_pcm_abort(struct amdtp_stream *s)
  1643. {
  1644. struct snd_pcm_substream *pcm;
  1645. pcm = READ_ONCE(s->pcm);
  1646. if (pcm)
  1647. snd_pcm_stop_xrun(pcm);
  1648. }
  1649. EXPORT_SYMBOL(amdtp_stream_pcm_abort);
  1650. /**
  1651. * amdtp_domain_init - initialize an AMDTP domain structure
  1652. * @d: the AMDTP domain to initialize.
  1653. */
  1654. int amdtp_domain_init(struct amdtp_domain *d)
  1655. {
  1656. INIT_LIST_HEAD(&d->streams);
  1657. d->events_per_period = 0;
  1658. return 0;
  1659. }
  1660. EXPORT_SYMBOL_GPL(amdtp_domain_init);
  1661. /**
  1662. * amdtp_domain_destroy - destroy an AMDTP domain structure
  1663. * @d: the AMDTP domain to destroy.
  1664. */
  1665. void amdtp_domain_destroy(struct amdtp_domain *d)
  1666. {
  1667. // At present nothing to do.
  1668. return;
  1669. }
  1670. EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
  1671. /**
  1672. * amdtp_domain_add_stream - register isoc context into the domain.
  1673. * @d: the AMDTP domain.
  1674. * @s: the AMDTP stream.
  1675. * @channel: the isochronous channel on the bus.
  1676. * @speed: firewire speed code.
  1677. */
  1678. int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
  1679. int channel, int speed)
  1680. {
  1681. struct amdtp_stream *tmp;
  1682. list_for_each_entry(tmp, &d->streams, list) {
  1683. if (s == tmp)
  1684. return -EBUSY;
  1685. }
  1686. list_add(&s->list, &d->streams);
  1687. s->channel = channel;
  1688. s->speed = speed;
  1689. s->domain = d;
  1690. return 0;
  1691. }
  1692. EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
  1693. // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
  1694. // is less than the number of rx streams, the first tx stream is selected.
  1695. static int make_association(struct amdtp_domain *d)
  1696. {
  1697. unsigned int dst_index = 0;
  1698. struct amdtp_stream *rx;
  1699. // Make association to replay target.
  1700. list_for_each_entry(rx, &d->streams, list) {
  1701. if (rx->direction == AMDTP_OUT_STREAM) {
  1702. unsigned int src_index = 0;
  1703. struct amdtp_stream *tx = NULL;
  1704. struct amdtp_stream *s;
  1705. list_for_each_entry(s, &d->streams, list) {
  1706. if (s->direction == AMDTP_IN_STREAM) {
  1707. if (dst_index == src_index) {
  1708. tx = s;
  1709. break;
  1710. }
  1711. ++src_index;
  1712. }
  1713. }
  1714. if (!tx) {
  1715. // Select the first entry.
  1716. list_for_each_entry(s, &d->streams, list) {
  1717. if (s->direction == AMDTP_IN_STREAM) {
  1718. tx = s;
  1719. break;
  1720. }
  1721. }
  1722. // No target is available to replay sequence.
  1723. if (!tx)
  1724. return -EINVAL;
  1725. }
  1726. rx->ctx_data.rx.replay_target = tx;
  1727. ++dst_index;
  1728. }
  1729. }
  1730. return 0;
  1731. }
  1732. /**
  1733. * amdtp_domain_start - start sending packets for isoc context in the domain.
  1734. * @d: the AMDTP domain.
  1735. * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
  1736. * contexts.
  1737. * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
  1738. * IT context.
  1739. * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
  1740. * according to arrival of events in tx packets.
  1741. */
  1742. int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
  1743. bool replay_on_the_fly)
  1744. {
  1745. unsigned int events_per_buffer = d->events_per_buffer;
  1746. unsigned int events_per_period = d->events_per_period;
  1747. unsigned int queue_size;
  1748. struct amdtp_stream *s;
  1749. bool found = false;
  1750. int err;
  1751. if (replay_seq) {
  1752. err = make_association(d);
  1753. if (err < 0)
  1754. return err;
  1755. }
  1756. d->replay.enable = replay_seq;
  1757. d->replay.on_the_fly = replay_on_the_fly;
  1758. // Select an IT context as IRQ target.
  1759. list_for_each_entry(s, &d->streams, list) {
  1760. if (s->direction == AMDTP_OUT_STREAM) {
  1761. found = true;
  1762. break;
  1763. }
  1764. }
  1765. if (!found)
  1766. return -ENXIO;
  1767. d->irq_target = s;
  1768. d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
  1769. // This is a case that AMDTP streams in domain run just for MIDI
  1770. // substream. Use the number of events equivalent to 10 msec as
  1771. // interval of hardware IRQ.
  1772. if (events_per_period == 0)
  1773. events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
  1774. if (events_per_buffer == 0)
  1775. events_per_buffer = events_per_period * 3;
  1776. queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
  1777. amdtp_rate_table[d->irq_target->sfc]);
  1778. list_for_each_entry(s, &d->streams, list) {
  1779. unsigned int idle_irq_interval = 0;
  1780. if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
  1781. idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
  1782. amdtp_rate_table[d->irq_target->sfc]);
  1783. }
  1784. // Starts immediately but actually DMA context starts several hundred cycles later.
  1785. err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
  1786. if (err < 0)
  1787. goto error;
  1788. }
  1789. return 0;
  1790. error:
  1791. list_for_each_entry(s, &d->streams, list)
  1792. amdtp_stream_stop(s);
  1793. return err;
  1794. }
  1795. EXPORT_SYMBOL_GPL(amdtp_domain_start);
  1796. /**
  1797. * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
  1798. * @d: the AMDTP domain to which the isoc contexts belong.
  1799. */
  1800. void amdtp_domain_stop(struct amdtp_domain *d)
  1801. {
  1802. struct amdtp_stream *s, *next;
  1803. if (d->irq_target)
  1804. amdtp_stream_stop(d->irq_target);
  1805. list_for_each_entry_safe(s, next, &d->streams, list) {
  1806. list_del(&s->list);
  1807. if (s != d->irq_target)
  1808. amdtp_stream_stop(s);
  1809. }
  1810. d->events_per_period = 0;
  1811. d->irq_target = NULL;
  1812. }
  1813. EXPORT_SYMBOL_GPL(amdtp_domain_stop);