wa-xfer.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * WUSB Wire Adapter
  4. * Data transfer and URB enqueing
  5. *
  6. * Copyright (C) 2005-2006 Intel Corporation
  7. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  8. *
  9. * How transfers work: get a buffer, break it up in segments (segment
  10. * size is a multiple of the maxpacket size). For each segment issue a
  11. * segment request (struct wa_xfer_*), then send the data buffer if
  12. * out or nothing if in (all over the DTO endpoint).
  13. *
  14. * For each submitted segment request, a notification will come over
  15. * the NEP endpoint and a transfer result (struct xfer_result) will
  16. * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  17. * data coming (inbound transfer), schedule a read and handle it.
  18. *
  19. * Sounds simple, it is a pain to implement.
  20. *
  21. *
  22. * ENTRY POINTS
  23. *
  24. * FIXME
  25. *
  26. * LIFE CYCLE / STATE DIAGRAM
  27. *
  28. * FIXME
  29. *
  30. * THIS CODE IS DISGUSTING
  31. *
  32. * Warned you are; it's my second try and still not happy with it.
  33. *
  34. * NOTES:
  35. *
  36. * - No iso
  37. *
  38. * - Supports DMA xfers, control, bulk and maybe interrupt
  39. *
  40. * - Does not recycle unused rpipes
  41. *
  42. * An rpipe is assigned to an endpoint the first time it is used,
  43. * and then it's there, assigned, until the endpoint is disabled
  44. * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  45. * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  46. * (should be a mutex).
  47. *
  48. * Two methods it could be done:
  49. *
  50. * (a) set up a timer every time an rpipe's use count drops to 1
  51. * (which means unused) or when a transfer ends. Reset the
  52. * timer when a xfer is queued. If the timer expires, release
  53. * the rpipe [see rpipe_ep_disable()].
  54. *
  55. * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  56. * when none are found go over the list, check their endpoint
  57. * and their activity record (if no last-xfer-done-ts in the
  58. * last x seconds) take it
  59. *
  60. * However, due to the fact that we have a set of limited
  61. * resources (max-segments-at-the-same-time per xfer,
  62. * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  63. * we are going to have to rebuild all this based on an scheduler,
  64. * to where we have a list of transactions to do and based on the
  65. * availability of the different required components (blocks,
  66. * rpipes, segment slots, etc), we go scheduling them. Painful.
  67. */
  68. #include <linux/spinlock.h>
  69. #include <linux/slab.h>
  70. #include <linux/hash.h>
  71. #include <linux/ratelimit.h>
  72. #include <linux/export.h>
  73. #include <linux/scatterlist.h>
  74. #include "wa-hc.h"
  75. #include "wusbhc.h"
  76. enum {
  77. /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
  78. WA_SEGS_MAX = 128,
  79. };
  80. enum wa_seg_status {
  81. WA_SEG_NOTREADY,
  82. WA_SEG_READY,
  83. WA_SEG_DELAYED,
  84. WA_SEG_SUBMITTED,
  85. WA_SEG_PENDING,
  86. WA_SEG_DTI_PENDING,
  87. WA_SEG_DONE,
  88. WA_SEG_ERROR,
  89. WA_SEG_ABORTED,
  90. };
  91. static void wa_xfer_delayed_run(struct wa_rpipe *);
  92. static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
  93. /*
  94. * Life cycle governed by 'struct urb' (the refcount of the struct is
  95. * that of the 'struct urb' and usb_free_urb() would free the whole
  96. * struct).
  97. */
  98. struct wa_seg {
  99. struct urb tr_urb; /* transfer request urb. */
  100. struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
  101. struct urb *dto_urb; /* for data output. */
  102. struct list_head list_node; /* for rpipe->req_list */
  103. struct wa_xfer *xfer; /* out xfer */
  104. u8 index; /* which segment we are */
  105. int isoc_frame_count; /* number of isoc frames in this segment. */
  106. int isoc_frame_offset; /* starting frame offset in the xfer URB. */
  107. /* Isoc frame that the current transfer buffer corresponds to. */
  108. int isoc_frame_index;
  109. int isoc_size; /* size of all isoc frames sent by this seg. */
  110. enum wa_seg_status status;
  111. ssize_t result; /* bytes xfered or error */
  112. struct wa_xfer_hdr xfer_hdr;
  113. };
  114. static inline void wa_seg_init(struct wa_seg *seg)
  115. {
  116. usb_init_urb(&seg->tr_urb);
  117. /* set the remaining memory to 0. */
  118. memset(((void *)seg) + sizeof(seg->tr_urb), 0,
  119. sizeof(*seg) - sizeof(seg->tr_urb));
  120. }
  121. /*
  122. * Protected by xfer->lock
  123. *
  124. */
  125. struct wa_xfer {
  126. struct kref refcnt;
  127. struct list_head list_node;
  128. spinlock_t lock;
  129. u32 id;
  130. struct wahc *wa; /* Wire adapter we are plugged to */
  131. struct usb_host_endpoint *ep;
  132. struct urb *urb; /* URB we are transferring for */
  133. struct wa_seg **seg; /* transfer segments */
  134. u8 segs, segs_submitted, segs_done;
  135. unsigned is_inbound:1;
  136. unsigned is_dma:1;
  137. size_t seg_size;
  138. int result;
  139. gfp_t gfp; /* allocation mask */
  140. struct wusb_dev *wusb_dev; /* for activity timestamps */
  141. };
  142. static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
  143. struct wa_seg *seg, int curr_iso_frame);
  144. static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
  145. int starting_index, enum wa_seg_status status);
  146. static inline void wa_xfer_init(struct wa_xfer *xfer)
  147. {
  148. kref_init(&xfer->refcnt);
  149. INIT_LIST_HEAD(&xfer->list_node);
  150. spin_lock_init(&xfer->lock);
  151. }
  152. /*
  153. * Destroy a transfer structure
  154. *
  155. * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
  156. * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
  157. */
  158. static void wa_xfer_destroy(struct kref *_xfer)
  159. {
  160. struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
  161. if (xfer->seg) {
  162. unsigned cnt;
  163. for (cnt = 0; cnt < xfer->segs; cnt++) {
  164. struct wa_seg *seg = xfer->seg[cnt];
  165. if (seg) {
  166. usb_free_urb(seg->isoc_pack_desc_urb);
  167. if (seg->dto_urb) {
  168. kfree(seg->dto_urb->sg);
  169. usb_free_urb(seg->dto_urb);
  170. }
  171. usb_free_urb(&seg->tr_urb);
  172. }
  173. }
  174. kfree(xfer->seg);
  175. }
  176. kfree(xfer);
  177. }
  178. static void wa_xfer_get(struct wa_xfer *xfer)
  179. {
  180. kref_get(&xfer->refcnt);
  181. }
  182. static void wa_xfer_put(struct wa_xfer *xfer)
  183. {
  184. kref_put(&xfer->refcnt, wa_xfer_destroy);
  185. }
  186. /*
  187. * Try to get exclusive access to the DTO endpoint resource. Return true
  188. * if successful.
  189. */
  190. static inline int __wa_dto_try_get(struct wahc *wa)
  191. {
  192. return (test_and_set_bit(0, &wa->dto_in_use) == 0);
  193. }
  194. /* Release the DTO endpoint resource. */
  195. static inline void __wa_dto_put(struct wahc *wa)
  196. {
  197. clear_bit_unlock(0, &wa->dto_in_use);
  198. }
  199. /* Service RPIPEs that are waiting on the DTO resource. */
  200. static void wa_check_for_delayed_rpipes(struct wahc *wa)
  201. {
  202. unsigned long flags;
  203. int dto_waiting = 0;
  204. struct wa_rpipe *rpipe;
  205. spin_lock_irqsave(&wa->rpipe_lock, flags);
  206. while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
  207. rpipe = list_first_entry(&wa->rpipe_delayed_list,
  208. struct wa_rpipe, list_node);
  209. __wa_xfer_delayed_run(rpipe, &dto_waiting);
  210. /* remove this RPIPE from the list if it is not waiting. */
  211. if (!dto_waiting) {
  212. pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
  213. __func__,
  214. le16_to_cpu(rpipe->descr.wRPipeIndex));
  215. list_del_init(&rpipe->list_node);
  216. }
  217. }
  218. spin_unlock_irqrestore(&wa->rpipe_lock, flags);
  219. }
  220. /* add this RPIPE to the end of the delayed RPIPE list. */
  221. static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
  222. {
  223. unsigned long flags;
  224. spin_lock_irqsave(&wa->rpipe_lock, flags);
  225. /* add rpipe to the list if it is not already on it. */
  226. if (list_empty(&rpipe->list_node)) {
  227. pr_debug("%s: adding RPIPE %d to the delayed list.\n",
  228. __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
  229. list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
  230. }
  231. spin_unlock_irqrestore(&wa->rpipe_lock, flags);
  232. }
  233. /*
  234. * xfer is referenced
  235. *
  236. * xfer->lock has to be unlocked
  237. *
  238. * We take xfer->lock for setting the result; this is a barrier
  239. * against drivers/usb/core/hcd.c:unlink1() being called after we call
  240. * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
  241. * reference to the transfer.
  242. */
  243. static void wa_xfer_giveback(struct wa_xfer *xfer)
  244. {
  245. unsigned long flags;
  246. spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
  247. list_del_init(&xfer->list_node);
  248. usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
  249. spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
  250. /* FIXME: segmentation broken -- kills DWA */
  251. wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
  252. wa_put(xfer->wa);
  253. wa_xfer_put(xfer);
  254. }
  255. /*
  256. * xfer is referenced
  257. *
  258. * xfer->lock has to be unlocked
  259. */
  260. static void wa_xfer_completion(struct wa_xfer *xfer)
  261. {
  262. if (xfer->wusb_dev)
  263. wusb_dev_put(xfer->wusb_dev);
  264. rpipe_put(xfer->ep->hcpriv);
  265. wa_xfer_giveback(xfer);
  266. }
  267. /*
  268. * Initialize a transfer's ID
  269. *
  270. * We need to use a sequential number; if we use the pointer or the
  271. * hash of the pointer, it can repeat over sequential transfers and
  272. * then it will confuse the HWA....wonder why in hell they put a 32
  273. * bit handle in there then.
  274. */
  275. static void wa_xfer_id_init(struct wa_xfer *xfer)
  276. {
  277. xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
  278. }
  279. /* Return the xfer's ID. */
  280. static inline u32 wa_xfer_id(struct wa_xfer *xfer)
  281. {
  282. return xfer->id;
  283. }
  284. /* Return the xfer's ID in transport format (little endian). */
  285. static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
  286. {
  287. return cpu_to_le32(xfer->id);
  288. }
  289. /*
  290. * If transfer is done, wrap it up and return true
  291. *
  292. * xfer->lock has to be locked
  293. */
  294. static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
  295. {
  296. struct device *dev = &xfer->wa->usb_iface->dev;
  297. unsigned result, cnt;
  298. struct wa_seg *seg;
  299. struct urb *urb = xfer->urb;
  300. unsigned found_short = 0;
  301. result = xfer->segs_done == xfer->segs_submitted;
  302. if (result == 0)
  303. goto out;
  304. urb->actual_length = 0;
  305. for (cnt = 0; cnt < xfer->segs; cnt++) {
  306. seg = xfer->seg[cnt];
  307. switch (seg->status) {
  308. case WA_SEG_DONE:
  309. if (found_short && seg->result > 0) {
  310. dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
  311. xfer, wa_xfer_id(xfer), cnt,
  312. seg->result);
  313. urb->status = -EINVAL;
  314. goto out;
  315. }
  316. urb->actual_length += seg->result;
  317. if (!(usb_pipeisoc(xfer->urb->pipe))
  318. && seg->result < xfer->seg_size
  319. && cnt != xfer->segs-1)
  320. found_short = 1;
  321. dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
  322. "result %zu urb->actual_length %d\n",
  323. xfer, wa_xfer_id(xfer), seg->index, found_short,
  324. seg->result, urb->actual_length);
  325. break;
  326. case WA_SEG_ERROR:
  327. xfer->result = seg->result;
  328. dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
  329. xfer, wa_xfer_id(xfer), seg->index, seg->result,
  330. seg->result);
  331. goto out;
  332. case WA_SEG_ABORTED:
  333. xfer->result = seg->result;
  334. dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
  335. xfer, wa_xfer_id(xfer), seg->index, seg->result,
  336. seg->result);
  337. goto out;
  338. default:
  339. dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
  340. xfer, wa_xfer_id(xfer), cnt, seg->status);
  341. xfer->result = -EINVAL;
  342. goto out;
  343. }
  344. }
  345. xfer->result = 0;
  346. out:
  347. return result;
  348. }
  349. /*
  350. * Mark the given segment as done. Return true if this completes the xfer.
  351. * This should only be called for segs that have been submitted to an RPIPE.
  352. * Delayed segs are not marked as submitted so they do not need to be marked
  353. * as done when cleaning up.
  354. *
  355. * xfer->lock has to be locked
  356. */
  357. static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
  358. struct wa_seg *seg, enum wa_seg_status status)
  359. {
  360. seg->status = status;
  361. xfer->segs_done++;
  362. /* check for done. */
  363. return __wa_xfer_is_done(xfer);
  364. }
  365. /*
  366. * Search for a transfer list ID on the HCD's URB list
  367. *
  368. * For 32 bit architectures, we use the pointer itself; for 64 bits, a
  369. * 32-bit hash of the pointer.
  370. *
  371. * @returns NULL if not found.
  372. */
  373. static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
  374. {
  375. unsigned long flags;
  376. struct wa_xfer *xfer_itr;
  377. spin_lock_irqsave(&wa->xfer_list_lock, flags);
  378. list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
  379. if (id == xfer_itr->id) {
  380. wa_xfer_get(xfer_itr);
  381. goto out;
  382. }
  383. }
  384. xfer_itr = NULL;
  385. out:
  386. spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
  387. return xfer_itr;
  388. }
  389. struct wa_xfer_abort_buffer {
  390. struct urb urb;
  391. struct wahc *wa;
  392. struct wa_xfer_abort cmd;
  393. };
  394. static void __wa_xfer_abort_cb(struct urb *urb)
  395. {
  396. struct wa_xfer_abort_buffer *b = urb->context;
  397. struct wahc *wa = b->wa;
  398. /*
  399. * If the abort request URB failed, then the HWA did not get the abort
  400. * command. Forcibly clean up the xfer without waiting for a Transfer
  401. * Result from the HWA.
  402. */
  403. if (urb->status < 0) {
  404. struct wa_xfer *xfer;
  405. struct device *dev = &wa->usb_iface->dev;
  406. xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
  407. dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
  408. __func__, urb->status);
  409. if (xfer) {
  410. unsigned long flags;
  411. int done, seg_index = 0;
  412. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  413. dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
  414. __func__, xfer, wa_xfer_id(xfer));
  415. spin_lock_irqsave(&xfer->lock, flags);
  416. /* skip done segs. */
  417. while (seg_index < xfer->segs) {
  418. struct wa_seg *seg = xfer->seg[seg_index];
  419. if ((seg->status == WA_SEG_DONE) ||
  420. (seg->status == WA_SEG_ERROR)) {
  421. ++seg_index;
  422. } else {
  423. break;
  424. }
  425. }
  426. /* mark remaining segs as aborted. */
  427. wa_complete_remaining_xfer_segs(xfer, seg_index,
  428. WA_SEG_ABORTED);
  429. done = __wa_xfer_is_done(xfer);
  430. spin_unlock_irqrestore(&xfer->lock, flags);
  431. if (done)
  432. wa_xfer_completion(xfer);
  433. wa_xfer_delayed_run(rpipe);
  434. wa_xfer_put(xfer);
  435. } else {
  436. dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
  437. __func__, le32_to_cpu(b->cmd.dwTransferID));
  438. }
  439. }
  440. wa_put(wa); /* taken in __wa_xfer_abort */
  441. usb_put_urb(&b->urb);
  442. }
  443. /*
  444. * Aborts an ongoing transaction
  445. *
  446. * Assumes the transfer is referenced and locked and in a submitted
  447. * state (mainly that there is an endpoint/rpipe assigned).
  448. *
  449. * The callback (see above) does nothing but freeing up the data by
  450. * putting the URB. Because the URB is allocated at the head of the
  451. * struct, the whole space we allocated is kfreed. *
  452. */
  453. static int __wa_xfer_abort(struct wa_xfer *xfer)
  454. {
  455. int result = -ENOMEM;
  456. struct device *dev = &xfer->wa->usb_iface->dev;
  457. struct wa_xfer_abort_buffer *b;
  458. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  459. b = kmalloc(sizeof(*b), GFP_ATOMIC);
  460. if (b == NULL)
  461. goto error_kmalloc;
  462. b->cmd.bLength = sizeof(b->cmd);
  463. b->cmd.bRequestType = WA_XFER_ABORT;
  464. b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
  465. b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
  466. b->wa = wa_get(xfer->wa);
  467. usb_init_urb(&b->urb);
  468. usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
  469. usb_sndbulkpipe(xfer->wa->usb_dev,
  470. xfer->wa->dto_epd->bEndpointAddress),
  471. &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
  472. result = usb_submit_urb(&b->urb, GFP_ATOMIC);
  473. if (result < 0)
  474. goto error_submit;
  475. return result; /* callback frees! */
  476. error_submit:
  477. wa_put(xfer->wa);
  478. if (printk_ratelimit())
  479. dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
  480. xfer, result);
  481. kfree(b);
  482. error_kmalloc:
  483. return result;
  484. }
  485. /*
  486. * Calculate the number of isoc frames starting from isoc_frame_offset
  487. * that will fit a in transfer segment.
  488. */
  489. static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
  490. int isoc_frame_offset, int *total_size)
  491. {
  492. int segment_size = 0, frame_count = 0;
  493. int index = isoc_frame_offset;
  494. struct usb_iso_packet_descriptor *iso_frame_desc =
  495. xfer->urb->iso_frame_desc;
  496. while ((index < xfer->urb->number_of_packets)
  497. && ((segment_size + iso_frame_desc[index].length)
  498. <= xfer->seg_size)) {
  499. /*
  500. * For Alereon HWA devices, only include an isoc frame in an
  501. * out segment if it is physically contiguous with the previous
  502. * frame. This is required because those devices expect
  503. * the isoc frames to be sent as a single USB transaction as
  504. * opposed to one transaction per frame with standard HWA.
  505. */
  506. if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
  507. && (xfer->is_inbound == 0)
  508. && (index > isoc_frame_offset)
  509. && ((iso_frame_desc[index - 1].offset +
  510. iso_frame_desc[index - 1].length) !=
  511. iso_frame_desc[index].offset))
  512. break;
  513. /* this frame fits. count it. */
  514. ++frame_count;
  515. segment_size += iso_frame_desc[index].length;
  516. /* move to the next isoc frame. */
  517. ++index;
  518. }
  519. *total_size = segment_size;
  520. return frame_count;
  521. }
  522. /*
  523. *
  524. * @returns < 0 on error, transfer segment request size if ok
  525. */
  526. static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
  527. enum wa_xfer_type *pxfer_type)
  528. {
  529. ssize_t result;
  530. struct device *dev = &xfer->wa->usb_iface->dev;
  531. size_t maxpktsize;
  532. struct urb *urb = xfer->urb;
  533. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  534. switch (rpipe->descr.bmAttribute & 0x3) {
  535. case USB_ENDPOINT_XFER_CONTROL:
  536. *pxfer_type = WA_XFER_TYPE_CTL;
  537. result = sizeof(struct wa_xfer_ctl);
  538. break;
  539. case USB_ENDPOINT_XFER_INT:
  540. case USB_ENDPOINT_XFER_BULK:
  541. *pxfer_type = WA_XFER_TYPE_BI;
  542. result = sizeof(struct wa_xfer_bi);
  543. break;
  544. case USB_ENDPOINT_XFER_ISOC:
  545. *pxfer_type = WA_XFER_TYPE_ISO;
  546. result = sizeof(struct wa_xfer_hwaiso);
  547. break;
  548. default:
  549. /* never happens */
  550. BUG();
  551. result = -EINVAL; /* shut gcc up */
  552. }
  553. xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
  554. xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
  555. maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
  556. xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
  557. * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
  558. /* Compute the segment size and make sure it is a multiple of
  559. * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
  560. * a check (FIXME) */
  561. if (xfer->seg_size < maxpktsize) {
  562. dev_err(dev,
  563. "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
  564. xfer->seg_size, maxpktsize);
  565. result = -EINVAL;
  566. goto error;
  567. }
  568. xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
  569. if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
  570. int index = 0;
  571. xfer->segs = 0;
  572. /*
  573. * loop over urb->number_of_packets to determine how many
  574. * xfer segments will be needed to send the isoc frames.
  575. */
  576. while (index < urb->number_of_packets) {
  577. int seg_size; /* don't care. */
  578. index += __wa_seg_calculate_isoc_frame_count(xfer,
  579. index, &seg_size);
  580. ++xfer->segs;
  581. }
  582. } else {
  583. xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
  584. xfer->seg_size);
  585. if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
  586. xfer->segs = 1;
  587. }
  588. if (xfer->segs > WA_SEGS_MAX) {
  589. dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
  590. (urb->transfer_buffer_length/xfer->seg_size),
  591. WA_SEGS_MAX);
  592. result = -EINVAL;
  593. goto error;
  594. }
  595. error:
  596. return result;
  597. }
  598. static void __wa_setup_isoc_packet_descr(
  599. struct wa_xfer_packet_info_hwaiso *packet_desc,
  600. struct wa_xfer *xfer,
  601. struct wa_seg *seg) {
  602. struct usb_iso_packet_descriptor *iso_frame_desc =
  603. xfer->urb->iso_frame_desc;
  604. int frame_index;
  605. /* populate isoc packet descriptor. */
  606. packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
  607. packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
  608. (sizeof(packet_desc->PacketLength[0]) *
  609. seg->isoc_frame_count));
  610. for (frame_index = 0; frame_index < seg->isoc_frame_count;
  611. ++frame_index) {
  612. int offset_index = frame_index + seg->isoc_frame_offset;
  613. packet_desc->PacketLength[frame_index] =
  614. cpu_to_le16(iso_frame_desc[offset_index].length);
  615. }
  616. }
  617. /* Fill in the common request header and xfer-type specific data. */
  618. static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
  619. struct wa_xfer_hdr *xfer_hdr0,
  620. enum wa_xfer_type xfer_type,
  621. size_t xfer_hdr_size)
  622. {
  623. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  624. struct wa_seg *seg = xfer->seg[0];
  625. xfer_hdr0 = &seg->xfer_hdr;
  626. xfer_hdr0->bLength = xfer_hdr_size;
  627. xfer_hdr0->bRequestType = xfer_type;
  628. xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
  629. xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
  630. xfer_hdr0->bTransferSegment = 0;
  631. switch (xfer_type) {
  632. case WA_XFER_TYPE_CTL: {
  633. struct wa_xfer_ctl *xfer_ctl =
  634. container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
  635. xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
  636. memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
  637. sizeof(xfer_ctl->baSetupData));
  638. break;
  639. }
  640. case WA_XFER_TYPE_BI:
  641. break;
  642. case WA_XFER_TYPE_ISO: {
  643. struct wa_xfer_hwaiso *xfer_iso =
  644. container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
  645. struct wa_xfer_packet_info_hwaiso *packet_desc =
  646. ((void *)xfer_iso) + xfer_hdr_size;
  647. /* populate the isoc section of the transfer request. */
  648. xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
  649. /* populate isoc packet descriptor. */
  650. __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
  651. break;
  652. }
  653. default:
  654. BUG();
  655. };
  656. }
  657. /*
  658. * Callback for the OUT data phase of the segment request
  659. *
  660. * Check wa_seg_tr_cb(); most comments also apply here because this
  661. * function does almost the same thing and they work closely
  662. * together.
  663. *
  664. * If the seg request has failed but this DTO phase has succeeded,
  665. * wa_seg_tr_cb() has already failed the segment and moved the
  666. * status to WA_SEG_ERROR, so this will go through 'case 0' and
  667. * effectively do nothing.
  668. */
  669. static void wa_seg_dto_cb(struct urb *urb)
  670. {
  671. struct wa_seg *seg = urb->context;
  672. struct wa_xfer *xfer = seg->xfer;
  673. struct wahc *wa;
  674. struct device *dev;
  675. struct wa_rpipe *rpipe;
  676. unsigned long flags;
  677. unsigned rpipe_ready = 0;
  678. int data_send_done = 1, release_dto = 0, holding_dto = 0;
  679. u8 done = 0;
  680. int result;
  681. /* free the sg if it was used. */
  682. kfree(urb->sg);
  683. urb->sg = NULL;
  684. spin_lock_irqsave(&xfer->lock, flags);
  685. wa = xfer->wa;
  686. dev = &wa->usb_iface->dev;
  687. if (usb_pipeisoc(xfer->urb->pipe)) {
  688. /* Alereon HWA sends all isoc frames in a single transfer. */
  689. if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
  690. seg->isoc_frame_index += seg->isoc_frame_count;
  691. else
  692. seg->isoc_frame_index += 1;
  693. if (seg->isoc_frame_index < seg->isoc_frame_count) {
  694. data_send_done = 0;
  695. holding_dto = 1; /* checked in error cases. */
  696. /*
  697. * if this is the last isoc frame of the segment, we
  698. * can release DTO after sending this frame.
  699. */
  700. if ((seg->isoc_frame_index + 1) >=
  701. seg->isoc_frame_count)
  702. release_dto = 1;
  703. }
  704. dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
  705. wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
  706. holding_dto, release_dto);
  707. }
  708. spin_unlock_irqrestore(&xfer->lock, flags);
  709. switch (urb->status) {
  710. case 0:
  711. spin_lock_irqsave(&xfer->lock, flags);
  712. seg->result += urb->actual_length;
  713. if (data_send_done) {
  714. dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
  715. wa_xfer_id(xfer), seg->index, seg->result);
  716. if (seg->status < WA_SEG_PENDING)
  717. seg->status = WA_SEG_PENDING;
  718. } else {
  719. /* should only hit this for isoc xfers. */
  720. /*
  721. * Populate the dto URB with the next isoc frame buffer,
  722. * send the URB and release DTO if we no longer need it.
  723. */
  724. __wa_populate_dto_urb_isoc(xfer, seg,
  725. seg->isoc_frame_offset + seg->isoc_frame_index);
  726. /* resubmit the URB with the next isoc frame. */
  727. /* take a ref on resubmit. */
  728. wa_xfer_get(xfer);
  729. result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
  730. if (result < 0) {
  731. dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
  732. wa_xfer_id(xfer), seg->index, result);
  733. spin_unlock_irqrestore(&xfer->lock, flags);
  734. goto error_dto_submit;
  735. }
  736. }
  737. spin_unlock_irqrestore(&xfer->lock, flags);
  738. if (release_dto) {
  739. __wa_dto_put(wa);
  740. wa_check_for_delayed_rpipes(wa);
  741. }
  742. break;
  743. case -ECONNRESET: /* URB unlinked; no need to do anything */
  744. case -ENOENT: /* as it was done by the who unlinked us */
  745. if (holding_dto) {
  746. __wa_dto_put(wa);
  747. wa_check_for_delayed_rpipes(wa);
  748. }
  749. break;
  750. default: /* Other errors ... */
  751. dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
  752. wa_xfer_id(xfer), seg->index, urb->status);
  753. goto error_default;
  754. }
  755. /* taken when this URB was submitted. */
  756. wa_xfer_put(xfer);
  757. return;
  758. error_dto_submit:
  759. /* taken on resubmit attempt. */
  760. wa_xfer_put(xfer);
  761. error_default:
  762. spin_lock_irqsave(&xfer->lock, flags);
  763. rpipe = xfer->ep->hcpriv;
  764. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  765. EDC_ERROR_TIMEFRAME)){
  766. dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
  767. wa_reset_all(wa);
  768. }
  769. if (seg->status != WA_SEG_ERROR) {
  770. seg->result = urb->status;
  771. __wa_xfer_abort(xfer);
  772. rpipe_ready = rpipe_avail_inc(rpipe);
  773. done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
  774. }
  775. spin_unlock_irqrestore(&xfer->lock, flags);
  776. if (holding_dto) {
  777. __wa_dto_put(wa);
  778. wa_check_for_delayed_rpipes(wa);
  779. }
  780. if (done)
  781. wa_xfer_completion(xfer);
  782. if (rpipe_ready)
  783. wa_xfer_delayed_run(rpipe);
  784. /* taken when this URB was submitted. */
  785. wa_xfer_put(xfer);
  786. }
  787. /*
  788. * Callback for the isoc packet descriptor phase of the segment request
  789. *
  790. * Check wa_seg_tr_cb(); most comments also apply here because this
  791. * function does almost the same thing and they work closely
  792. * together.
  793. *
  794. * If the seg request has failed but this phase has succeeded,
  795. * wa_seg_tr_cb() has already failed the segment and moved the
  796. * status to WA_SEG_ERROR, so this will go through 'case 0' and
  797. * effectively do nothing.
  798. */
  799. static void wa_seg_iso_pack_desc_cb(struct urb *urb)
  800. {
  801. struct wa_seg *seg = urb->context;
  802. struct wa_xfer *xfer = seg->xfer;
  803. struct wahc *wa;
  804. struct device *dev;
  805. struct wa_rpipe *rpipe;
  806. unsigned long flags;
  807. unsigned rpipe_ready = 0;
  808. u8 done = 0;
  809. switch (urb->status) {
  810. case 0:
  811. spin_lock_irqsave(&xfer->lock, flags);
  812. wa = xfer->wa;
  813. dev = &wa->usb_iface->dev;
  814. dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
  815. wa_xfer_id(xfer), seg->index);
  816. if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
  817. seg->status = WA_SEG_PENDING;
  818. spin_unlock_irqrestore(&xfer->lock, flags);
  819. break;
  820. case -ECONNRESET: /* URB unlinked; no need to do anything */
  821. case -ENOENT: /* as it was done by the who unlinked us */
  822. break;
  823. default: /* Other errors ... */
  824. spin_lock_irqsave(&xfer->lock, flags);
  825. wa = xfer->wa;
  826. dev = &wa->usb_iface->dev;
  827. rpipe = xfer->ep->hcpriv;
  828. pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
  829. wa_xfer_id(xfer), seg->index, urb->status);
  830. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  831. EDC_ERROR_TIMEFRAME)){
  832. dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
  833. wa_reset_all(wa);
  834. }
  835. if (seg->status != WA_SEG_ERROR) {
  836. usb_unlink_urb(seg->dto_urb);
  837. seg->result = urb->status;
  838. __wa_xfer_abort(xfer);
  839. rpipe_ready = rpipe_avail_inc(rpipe);
  840. done = __wa_xfer_mark_seg_as_done(xfer, seg,
  841. WA_SEG_ERROR);
  842. }
  843. spin_unlock_irqrestore(&xfer->lock, flags);
  844. if (done)
  845. wa_xfer_completion(xfer);
  846. if (rpipe_ready)
  847. wa_xfer_delayed_run(rpipe);
  848. }
  849. /* taken when this URB was submitted. */
  850. wa_xfer_put(xfer);
  851. }
  852. /*
  853. * Callback for the segment request
  854. *
  855. * If successful transition state (unless already transitioned or
  856. * outbound transfer); otherwise, take a note of the error, mark this
  857. * segment done and try completion.
  858. *
  859. * Note we don't access until we are sure that the transfer hasn't
  860. * been cancelled (ECONNRESET, ENOENT), which could mean that
  861. * seg->xfer could be already gone.
  862. *
  863. * We have to check before setting the status to WA_SEG_PENDING
  864. * because sometimes the xfer result callback arrives before this
  865. * callback (geeeeeeze), so it might happen that we are already in
  866. * another state. As well, we don't set it if the transfer is not inbound,
  867. * as in that case, wa_seg_dto_cb will do it when the OUT data phase
  868. * finishes.
  869. */
  870. static void wa_seg_tr_cb(struct urb *urb)
  871. {
  872. struct wa_seg *seg = urb->context;
  873. struct wa_xfer *xfer = seg->xfer;
  874. struct wahc *wa;
  875. struct device *dev;
  876. struct wa_rpipe *rpipe;
  877. unsigned long flags;
  878. unsigned rpipe_ready;
  879. u8 done = 0;
  880. switch (urb->status) {
  881. case 0:
  882. spin_lock_irqsave(&xfer->lock, flags);
  883. wa = xfer->wa;
  884. dev = &wa->usb_iface->dev;
  885. dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
  886. xfer, wa_xfer_id(xfer), seg->index);
  887. if (xfer->is_inbound &&
  888. seg->status < WA_SEG_PENDING &&
  889. !(usb_pipeisoc(xfer->urb->pipe)))
  890. seg->status = WA_SEG_PENDING;
  891. spin_unlock_irqrestore(&xfer->lock, flags);
  892. break;
  893. case -ECONNRESET: /* URB unlinked; no need to do anything */
  894. case -ENOENT: /* as it was done by the who unlinked us */
  895. break;
  896. default: /* Other errors ... */
  897. spin_lock_irqsave(&xfer->lock, flags);
  898. wa = xfer->wa;
  899. dev = &wa->usb_iface->dev;
  900. rpipe = xfer->ep->hcpriv;
  901. if (printk_ratelimit())
  902. dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
  903. xfer, wa_xfer_id(xfer), seg->index,
  904. urb->status);
  905. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  906. EDC_ERROR_TIMEFRAME)){
  907. dev_err(dev, "DTO: URB max acceptable errors "
  908. "exceeded, resetting device\n");
  909. wa_reset_all(wa);
  910. }
  911. usb_unlink_urb(seg->isoc_pack_desc_urb);
  912. usb_unlink_urb(seg->dto_urb);
  913. seg->result = urb->status;
  914. __wa_xfer_abort(xfer);
  915. rpipe_ready = rpipe_avail_inc(rpipe);
  916. done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
  917. spin_unlock_irqrestore(&xfer->lock, flags);
  918. if (done)
  919. wa_xfer_completion(xfer);
  920. if (rpipe_ready)
  921. wa_xfer_delayed_run(rpipe);
  922. }
  923. /* taken when this URB was submitted. */
  924. wa_xfer_put(xfer);
  925. }
  926. /*
  927. * Allocate an SG list to store bytes_to_transfer bytes and copy the
  928. * subset of the in_sg that matches the buffer subset
  929. * we are about to transfer.
  930. */
  931. static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
  932. const unsigned int bytes_transferred,
  933. const unsigned int bytes_to_transfer, int *out_num_sgs)
  934. {
  935. struct scatterlist *out_sg;
  936. unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
  937. nents;
  938. struct scatterlist *current_xfer_sg = in_sg;
  939. struct scatterlist *current_seg_sg, *last_seg_sg;
  940. /* skip previously transferred pages. */
  941. while ((current_xfer_sg) &&
  942. (bytes_processed < bytes_transferred)) {
  943. bytes_processed += current_xfer_sg->length;
  944. /* advance the sg if current segment starts on or past the
  945. next page. */
  946. if (bytes_processed <= bytes_transferred)
  947. current_xfer_sg = sg_next(current_xfer_sg);
  948. }
  949. /* the data for the current segment starts in current_xfer_sg.
  950. calculate the offset. */
  951. if (bytes_processed > bytes_transferred) {
  952. offset_into_current_page_data = current_xfer_sg->length -
  953. (bytes_processed - bytes_transferred);
  954. }
  955. /* calculate the number of pages needed by this segment. */
  956. nents = DIV_ROUND_UP((bytes_to_transfer +
  957. offset_into_current_page_data +
  958. current_xfer_sg->offset),
  959. PAGE_SIZE);
  960. out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
  961. if (out_sg) {
  962. sg_init_table(out_sg, nents);
  963. /* copy the portion of the incoming SG that correlates to the
  964. * data to be transferred by this segment to the segment SG. */
  965. last_seg_sg = current_seg_sg = out_sg;
  966. bytes_processed = 0;
  967. /* reset nents and calculate the actual number of sg entries
  968. needed. */
  969. nents = 0;
  970. while ((bytes_processed < bytes_to_transfer) &&
  971. current_seg_sg && current_xfer_sg) {
  972. unsigned int page_len = min((current_xfer_sg->length -
  973. offset_into_current_page_data),
  974. (bytes_to_transfer - bytes_processed));
  975. sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
  976. page_len,
  977. current_xfer_sg->offset +
  978. offset_into_current_page_data);
  979. bytes_processed += page_len;
  980. last_seg_sg = current_seg_sg;
  981. current_seg_sg = sg_next(current_seg_sg);
  982. current_xfer_sg = sg_next(current_xfer_sg);
  983. /* only the first page may require additional offset. */
  984. offset_into_current_page_data = 0;
  985. nents++;
  986. }
  987. /* update num_sgs and terminate the list since we may have
  988. * concatenated pages. */
  989. sg_mark_end(last_seg_sg);
  990. *out_num_sgs = nents;
  991. }
  992. return out_sg;
  993. }
  994. /*
  995. * Populate DMA buffer info for the isoc dto urb.
  996. */
  997. static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
  998. struct wa_seg *seg, int curr_iso_frame)
  999. {
  1000. seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  1001. seg->dto_urb->sg = NULL;
  1002. seg->dto_urb->num_sgs = 0;
  1003. /* dto urb buffer address pulled from iso_frame_desc. */
  1004. seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
  1005. xfer->urb->iso_frame_desc[curr_iso_frame].offset;
  1006. /* The Alereon HWA sends a single URB with all isoc segs. */
  1007. if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
  1008. seg->dto_urb->transfer_buffer_length = seg->isoc_size;
  1009. else
  1010. seg->dto_urb->transfer_buffer_length =
  1011. xfer->urb->iso_frame_desc[curr_iso_frame].length;
  1012. }
  1013. /*
  1014. * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
  1015. */
  1016. static int __wa_populate_dto_urb(struct wa_xfer *xfer,
  1017. struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
  1018. {
  1019. int result = 0;
  1020. if (xfer->is_dma) {
  1021. seg->dto_urb->transfer_dma =
  1022. xfer->urb->transfer_dma + buf_itr_offset;
  1023. seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  1024. seg->dto_urb->sg = NULL;
  1025. seg->dto_urb->num_sgs = 0;
  1026. } else {
  1027. /* do buffer or SG processing. */
  1028. seg->dto_urb->transfer_flags &=
  1029. ~URB_NO_TRANSFER_DMA_MAP;
  1030. /* this should always be 0 before a resubmit. */
  1031. seg->dto_urb->num_mapped_sgs = 0;
  1032. if (xfer->urb->transfer_buffer) {
  1033. seg->dto_urb->transfer_buffer =
  1034. xfer->urb->transfer_buffer +
  1035. buf_itr_offset;
  1036. seg->dto_urb->sg = NULL;
  1037. seg->dto_urb->num_sgs = 0;
  1038. } else {
  1039. seg->dto_urb->transfer_buffer = NULL;
  1040. /*
  1041. * allocate an SG list to store seg_size bytes
  1042. * and copy the subset of the xfer->urb->sg that
  1043. * matches the buffer subset we are about to
  1044. * read.
  1045. */
  1046. seg->dto_urb->sg = wa_xfer_create_subset_sg(
  1047. xfer->urb->sg,
  1048. buf_itr_offset, buf_itr_size,
  1049. &(seg->dto_urb->num_sgs));
  1050. if (!(seg->dto_urb->sg))
  1051. result = -ENOMEM;
  1052. }
  1053. }
  1054. seg->dto_urb->transfer_buffer_length = buf_itr_size;
  1055. return result;
  1056. }
  1057. /*
  1058. * Allocate the segs array and initialize each of them
  1059. *
  1060. * The segments are freed by wa_xfer_destroy() when the xfer use count
  1061. * drops to zero; however, because each segment is given the same life
  1062. * cycle as the USB URB it contains, it is actually freed by
  1063. * usb_put_urb() on the contained USB URB (twisted, eh?).
  1064. */
  1065. static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
  1066. {
  1067. int result, cnt, isoc_frame_offset = 0;
  1068. size_t alloc_size = sizeof(*xfer->seg[0])
  1069. - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
  1070. struct usb_device *usb_dev = xfer->wa->usb_dev;
  1071. const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
  1072. struct wa_seg *seg;
  1073. size_t buf_itr, buf_size, buf_itr_size;
  1074. result = -ENOMEM;
  1075. xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
  1076. if (xfer->seg == NULL)
  1077. goto error_segs_kzalloc;
  1078. buf_itr = 0;
  1079. buf_size = xfer->urb->transfer_buffer_length;
  1080. for (cnt = 0; cnt < xfer->segs; cnt++) {
  1081. size_t iso_pkt_descr_size = 0;
  1082. int seg_isoc_frame_count = 0, seg_isoc_size = 0;
  1083. /*
  1084. * Adjust the size of the segment object to contain space for
  1085. * the isoc packet descriptor buffer.
  1086. */
  1087. if (usb_pipeisoc(xfer->urb->pipe)) {
  1088. seg_isoc_frame_count =
  1089. __wa_seg_calculate_isoc_frame_count(xfer,
  1090. isoc_frame_offset, &seg_isoc_size);
  1091. iso_pkt_descr_size =
  1092. sizeof(struct wa_xfer_packet_info_hwaiso) +
  1093. (seg_isoc_frame_count * sizeof(__le16));
  1094. }
  1095. result = -ENOMEM;
  1096. seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
  1097. GFP_ATOMIC);
  1098. if (seg == NULL)
  1099. goto error_seg_kmalloc;
  1100. wa_seg_init(seg);
  1101. seg->xfer = xfer;
  1102. seg->index = cnt;
  1103. usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
  1104. usb_sndbulkpipe(usb_dev,
  1105. dto_epd->bEndpointAddress),
  1106. &seg->xfer_hdr, xfer_hdr_size,
  1107. wa_seg_tr_cb, seg);
  1108. buf_itr_size = min(buf_size, xfer->seg_size);
  1109. if (usb_pipeisoc(xfer->urb->pipe)) {
  1110. seg->isoc_frame_count = seg_isoc_frame_count;
  1111. seg->isoc_frame_offset = isoc_frame_offset;
  1112. seg->isoc_size = seg_isoc_size;
  1113. /* iso packet descriptor. */
  1114. seg->isoc_pack_desc_urb =
  1115. usb_alloc_urb(0, GFP_ATOMIC);
  1116. if (seg->isoc_pack_desc_urb == NULL)
  1117. goto error_iso_pack_desc_alloc;
  1118. /*
  1119. * The buffer for the isoc packet descriptor starts
  1120. * after the transfer request header in the
  1121. * segment object memory buffer.
  1122. */
  1123. usb_fill_bulk_urb(
  1124. seg->isoc_pack_desc_urb, usb_dev,
  1125. usb_sndbulkpipe(usb_dev,
  1126. dto_epd->bEndpointAddress),
  1127. (void *)(&seg->xfer_hdr) +
  1128. xfer_hdr_size,
  1129. iso_pkt_descr_size,
  1130. wa_seg_iso_pack_desc_cb, seg);
  1131. /* adjust starting frame offset for next seg. */
  1132. isoc_frame_offset += seg_isoc_frame_count;
  1133. }
  1134. if (xfer->is_inbound == 0 && buf_size > 0) {
  1135. /* outbound data. */
  1136. seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
  1137. if (seg->dto_urb == NULL)
  1138. goto error_dto_alloc;
  1139. usb_fill_bulk_urb(
  1140. seg->dto_urb, usb_dev,
  1141. usb_sndbulkpipe(usb_dev,
  1142. dto_epd->bEndpointAddress),
  1143. NULL, 0, wa_seg_dto_cb, seg);
  1144. if (usb_pipeisoc(xfer->urb->pipe)) {
  1145. /*
  1146. * Fill in the xfer buffer information for the
  1147. * first isoc frame. Subsequent frames in this
  1148. * segment will be filled in and sent from the
  1149. * DTO completion routine, if needed.
  1150. */
  1151. __wa_populate_dto_urb_isoc(xfer, seg,
  1152. seg->isoc_frame_offset);
  1153. } else {
  1154. /* fill in the xfer buffer information. */
  1155. result = __wa_populate_dto_urb(xfer, seg,
  1156. buf_itr, buf_itr_size);
  1157. if (result < 0)
  1158. goto error_seg_outbound_populate;
  1159. buf_itr += buf_itr_size;
  1160. buf_size -= buf_itr_size;
  1161. }
  1162. }
  1163. seg->status = WA_SEG_READY;
  1164. }
  1165. return 0;
  1166. /*
  1167. * Free the memory for the current segment which failed to init.
  1168. * Use the fact that cnt is left at were it failed. The remaining
  1169. * segments will be cleaned up by wa_xfer_destroy.
  1170. */
  1171. error_seg_outbound_populate:
  1172. usb_free_urb(xfer->seg[cnt]->dto_urb);
  1173. error_dto_alloc:
  1174. usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
  1175. error_iso_pack_desc_alloc:
  1176. kfree(xfer->seg[cnt]);
  1177. xfer->seg[cnt] = NULL;
  1178. error_seg_kmalloc:
  1179. error_segs_kzalloc:
  1180. return result;
  1181. }
  1182. /*
  1183. * Allocates all the stuff needed to submit a transfer
  1184. *
  1185. * Breaks the whole data buffer in a list of segments, each one has a
  1186. * structure allocated to it and linked in xfer->seg[index]
  1187. *
  1188. * FIXME: merge setup_segs() and the last part of this function, no
  1189. * need to do two for loops when we could run everything in a
  1190. * single one
  1191. */
  1192. static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
  1193. {
  1194. int result;
  1195. struct device *dev = &xfer->wa->usb_iface->dev;
  1196. enum wa_xfer_type xfer_type = 0; /* shut up GCC */
  1197. size_t xfer_hdr_size, cnt, transfer_size;
  1198. struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
  1199. result = __wa_xfer_setup_sizes(xfer, &xfer_type);
  1200. if (result < 0)
  1201. goto error_setup_sizes;
  1202. xfer_hdr_size = result;
  1203. result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
  1204. if (result < 0) {
  1205. dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
  1206. xfer, xfer->segs, result);
  1207. goto error_setup_segs;
  1208. }
  1209. /* Fill the first header */
  1210. xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
  1211. wa_xfer_id_init(xfer);
  1212. __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
  1213. /* Fill remaining headers */
  1214. xfer_hdr = xfer_hdr0;
  1215. if (xfer_type == WA_XFER_TYPE_ISO) {
  1216. xfer_hdr0->dwTransferLength =
  1217. cpu_to_le32(xfer->seg[0]->isoc_size);
  1218. for (cnt = 1; cnt < xfer->segs; cnt++) {
  1219. struct wa_xfer_packet_info_hwaiso *packet_desc;
  1220. struct wa_seg *seg = xfer->seg[cnt];
  1221. struct wa_xfer_hwaiso *xfer_iso;
  1222. xfer_hdr = &seg->xfer_hdr;
  1223. xfer_iso = container_of(xfer_hdr,
  1224. struct wa_xfer_hwaiso, hdr);
  1225. packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
  1226. /*
  1227. * Copy values from the 0th header. Segment specific
  1228. * values are set below.
  1229. */
  1230. memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
  1231. xfer_hdr->bTransferSegment = cnt;
  1232. xfer_hdr->dwTransferLength =
  1233. cpu_to_le32(seg->isoc_size);
  1234. xfer_iso->dwNumOfPackets =
  1235. cpu_to_le32(seg->isoc_frame_count);
  1236. __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
  1237. seg->status = WA_SEG_READY;
  1238. }
  1239. } else {
  1240. transfer_size = urb->transfer_buffer_length;
  1241. xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
  1242. cpu_to_le32(xfer->seg_size) :
  1243. cpu_to_le32(transfer_size);
  1244. transfer_size -= xfer->seg_size;
  1245. for (cnt = 1; cnt < xfer->segs; cnt++) {
  1246. xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
  1247. memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
  1248. xfer_hdr->bTransferSegment = cnt;
  1249. xfer_hdr->dwTransferLength =
  1250. transfer_size > xfer->seg_size ?
  1251. cpu_to_le32(xfer->seg_size)
  1252. : cpu_to_le32(transfer_size);
  1253. xfer->seg[cnt]->status = WA_SEG_READY;
  1254. transfer_size -= xfer->seg_size;
  1255. }
  1256. }
  1257. xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
  1258. result = 0;
  1259. error_setup_segs:
  1260. error_setup_sizes:
  1261. return result;
  1262. }
  1263. /*
  1264. *
  1265. *
  1266. * rpipe->seg_lock is held!
  1267. */
  1268. static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
  1269. struct wa_seg *seg, int *dto_done)
  1270. {
  1271. int result;
  1272. /* default to done unless we encounter a multi-frame isoc segment. */
  1273. *dto_done = 1;
  1274. /*
  1275. * Take a ref for each segment urb so the xfer cannot disappear until
  1276. * all of the callbacks run.
  1277. */
  1278. wa_xfer_get(xfer);
  1279. /* submit the transfer request. */
  1280. seg->status = WA_SEG_SUBMITTED;
  1281. result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
  1282. if (result < 0) {
  1283. pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
  1284. __func__, xfer, seg->index, result);
  1285. wa_xfer_put(xfer);
  1286. goto error_tr_submit;
  1287. }
  1288. /* submit the isoc packet descriptor if present. */
  1289. if (seg->isoc_pack_desc_urb) {
  1290. wa_xfer_get(xfer);
  1291. result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
  1292. seg->isoc_frame_index = 0;
  1293. if (result < 0) {
  1294. pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
  1295. __func__, xfer, seg->index, result);
  1296. wa_xfer_put(xfer);
  1297. goto error_iso_pack_desc_submit;
  1298. }
  1299. }
  1300. /* submit the out data if this is an out request. */
  1301. if (seg->dto_urb) {
  1302. struct wahc *wa = xfer->wa;
  1303. wa_xfer_get(xfer);
  1304. result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
  1305. if (result < 0) {
  1306. pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
  1307. __func__, xfer, seg->index, result);
  1308. wa_xfer_put(xfer);
  1309. goto error_dto_submit;
  1310. }
  1311. /*
  1312. * If this segment contains more than one isoc frame, hold
  1313. * onto the dto resource until we send all frames.
  1314. * Only applies to non-Alereon devices.
  1315. */
  1316. if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
  1317. && (seg->isoc_frame_count > 1))
  1318. *dto_done = 0;
  1319. }
  1320. rpipe_avail_dec(rpipe);
  1321. return 0;
  1322. error_dto_submit:
  1323. usb_unlink_urb(seg->isoc_pack_desc_urb);
  1324. error_iso_pack_desc_submit:
  1325. usb_unlink_urb(&seg->tr_urb);
  1326. error_tr_submit:
  1327. seg->status = WA_SEG_ERROR;
  1328. seg->result = result;
  1329. *dto_done = 1;
  1330. return result;
  1331. }
  1332. /*
  1333. * Execute more queued request segments until the maximum concurrent allowed.
  1334. * Return true if the DTO resource was acquired and released.
  1335. *
  1336. * The ugly unlock/lock sequence on the error path is needed as the
  1337. * xfer->lock normally nests the seg_lock and not viceversa.
  1338. */
  1339. static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
  1340. {
  1341. int result, dto_acquired = 0, dto_done = 0;
  1342. struct device *dev = &rpipe->wa->usb_iface->dev;
  1343. struct wa_seg *seg;
  1344. struct wa_xfer *xfer;
  1345. unsigned long flags;
  1346. *dto_waiting = 0;
  1347. spin_lock_irqsave(&rpipe->seg_lock, flags);
  1348. while (atomic_read(&rpipe->segs_available) > 0
  1349. && !list_empty(&rpipe->seg_list)
  1350. && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
  1351. seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
  1352. list_node);
  1353. list_del(&seg->list_node);
  1354. xfer = seg->xfer;
  1355. /*
  1356. * Get a reference to the xfer in case the callbacks for the
  1357. * URBs submitted by __wa_seg_submit attempt to complete
  1358. * the xfer before this function completes.
  1359. */
  1360. wa_xfer_get(xfer);
  1361. result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
  1362. /* release the dto resource if this RPIPE is done with it. */
  1363. if (dto_done)
  1364. __wa_dto_put(rpipe->wa);
  1365. dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
  1366. xfer, wa_xfer_id(xfer), seg->index,
  1367. atomic_read(&rpipe->segs_available), result);
  1368. if (unlikely(result < 0)) {
  1369. int done;
  1370. spin_unlock_irqrestore(&rpipe->seg_lock, flags);
  1371. spin_lock_irqsave(&xfer->lock, flags);
  1372. __wa_xfer_abort(xfer);
  1373. /*
  1374. * This seg was marked as submitted when it was put on
  1375. * the RPIPE seg_list. Mark it done.
  1376. */
  1377. xfer->segs_done++;
  1378. done = __wa_xfer_is_done(xfer);
  1379. spin_unlock_irqrestore(&xfer->lock, flags);
  1380. if (done)
  1381. wa_xfer_completion(xfer);
  1382. spin_lock_irqsave(&rpipe->seg_lock, flags);
  1383. }
  1384. wa_xfer_put(xfer);
  1385. }
  1386. /*
  1387. * Mark this RPIPE as waiting if dto was not acquired, there are
  1388. * delayed segs and no active transfers to wake us up later.
  1389. */
  1390. if (!dto_acquired && !list_empty(&rpipe->seg_list)
  1391. && (atomic_read(&rpipe->segs_available) ==
  1392. le16_to_cpu(rpipe->descr.wRequests)))
  1393. *dto_waiting = 1;
  1394. spin_unlock_irqrestore(&rpipe->seg_lock, flags);
  1395. return dto_done;
  1396. }
  1397. static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
  1398. {
  1399. int dto_waiting;
  1400. int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
  1401. /*
  1402. * If this RPIPE is waiting on the DTO resource, add it to the tail of
  1403. * the waiting list.
  1404. * Otherwise, if the WA DTO resource was acquired and released by
  1405. * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
  1406. * DTO and failed during that time. Check the delayed list and process
  1407. * any waiters. Start searching from the next RPIPE index.
  1408. */
  1409. if (dto_waiting)
  1410. wa_add_delayed_rpipe(rpipe->wa, rpipe);
  1411. else if (dto_done)
  1412. wa_check_for_delayed_rpipes(rpipe->wa);
  1413. }
  1414. /*
  1415. *
  1416. * xfer->lock is taken
  1417. *
  1418. * On failure submitting we just stop submitting and return error;
  1419. * wa_urb_enqueue_b() will execute the completion path
  1420. */
  1421. static int __wa_xfer_submit(struct wa_xfer *xfer)
  1422. {
  1423. int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
  1424. struct wahc *wa = xfer->wa;
  1425. struct device *dev = &wa->usb_iface->dev;
  1426. unsigned cnt;
  1427. struct wa_seg *seg;
  1428. unsigned long flags;
  1429. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  1430. size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
  1431. u8 available;
  1432. u8 empty;
  1433. spin_lock_irqsave(&wa->xfer_list_lock, flags);
  1434. list_add_tail(&xfer->list_node, &wa->xfer_list);
  1435. spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
  1436. BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
  1437. result = 0;
  1438. spin_lock_irqsave(&rpipe->seg_lock, flags);
  1439. for (cnt = 0; cnt < xfer->segs; cnt++) {
  1440. int delay_seg = 1;
  1441. available = atomic_read(&rpipe->segs_available);
  1442. empty = list_empty(&rpipe->seg_list);
  1443. seg = xfer->seg[cnt];
  1444. if (available && empty) {
  1445. /*
  1446. * Only attempt to acquire DTO if we have a segment
  1447. * to send.
  1448. */
  1449. dto_acquired = __wa_dto_try_get(rpipe->wa);
  1450. if (dto_acquired) {
  1451. delay_seg = 0;
  1452. result = __wa_seg_submit(rpipe, xfer, seg,
  1453. &dto_done);
  1454. dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
  1455. xfer, wa_xfer_id(xfer), cnt, available,
  1456. empty);
  1457. if (dto_done)
  1458. __wa_dto_put(rpipe->wa);
  1459. if (result < 0) {
  1460. __wa_xfer_abort(xfer);
  1461. goto error_seg_submit;
  1462. }
  1463. }
  1464. }
  1465. if (delay_seg) {
  1466. dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
  1467. xfer, wa_xfer_id(xfer), cnt, available, empty);
  1468. seg->status = WA_SEG_DELAYED;
  1469. list_add_tail(&seg->list_node, &rpipe->seg_list);
  1470. }
  1471. xfer->segs_submitted++;
  1472. }
  1473. error_seg_submit:
  1474. /*
  1475. * Mark this RPIPE as waiting if dto was not acquired, there are
  1476. * delayed segs and no active transfers to wake us up later.
  1477. */
  1478. if (!dto_acquired && !list_empty(&rpipe->seg_list)
  1479. && (atomic_read(&rpipe->segs_available) ==
  1480. le16_to_cpu(rpipe->descr.wRequests)))
  1481. dto_waiting = 1;
  1482. spin_unlock_irqrestore(&rpipe->seg_lock, flags);
  1483. if (dto_waiting)
  1484. wa_add_delayed_rpipe(rpipe->wa, rpipe);
  1485. else if (dto_done)
  1486. wa_check_for_delayed_rpipes(rpipe->wa);
  1487. return result;
  1488. }
  1489. /*
  1490. * Second part of a URB/transfer enqueuement
  1491. *
  1492. * Assumes this comes from wa_urb_enqueue() [maybe through
  1493. * wa_urb_enqueue_run()]. At this point:
  1494. *
  1495. * xfer->wa filled and refcounted
  1496. * xfer->ep filled with rpipe refcounted if
  1497. * delayed == 0
  1498. * xfer->urb filled and refcounted (this is the case when called
  1499. * from wa_urb_enqueue() as we come from usb_submit_urb()
  1500. * and when called by wa_urb_enqueue_run(), as we took an
  1501. * extra ref dropped by _run() after we return).
  1502. * xfer->gfp filled
  1503. *
  1504. * If we fail at __wa_xfer_submit(), then we just check if we are done
  1505. * and if so, we run the completion procedure. However, if we are not
  1506. * yet done, we do nothing and wait for the completion handlers from
  1507. * the submitted URBs or from the xfer-result path to kick in. If xfer
  1508. * result never kicks in, the xfer will timeout from the USB code and
  1509. * dequeue() will be called.
  1510. */
  1511. static int wa_urb_enqueue_b(struct wa_xfer *xfer)
  1512. {
  1513. int result;
  1514. unsigned long flags;
  1515. struct urb *urb = xfer->urb;
  1516. struct wahc *wa = xfer->wa;
  1517. struct wusbhc *wusbhc = wa->wusb;
  1518. struct wusb_dev *wusb_dev;
  1519. unsigned done;
  1520. result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
  1521. if (result < 0) {
  1522. pr_err("%s: error_rpipe_get\n", __func__);
  1523. goto error_rpipe_get;
  1524. }
  1525. result = -ENODEV;
  1526. /* FIXME: segmentation broken -- kills DWA */
  1527. mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
  1528. if (urb->dev == NULL) {
  1529. mutex_unlock(&wusbhc->mutex);
  1530. pr_err("%s: error usb dev gone\n", __func__);
  1531. goto error_dev_gone;
  1532. }
  1533. wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
  1534. if (wusb_dev == NULL) {
  1535. mutex_unlock(&wusbhc->mutex);
  1536. dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
  1537. __func__);
  1538. goto error_dev_gone;
  1539. }
  1540. mutex_unlock(&wusbhc->mutex);
  1541. spin_lock_irqsave(&xfer->lock, flags);
  1542. xfer->wusb_dev = wusb_dev;
  1543. result = urb->status;
  1544. if (urb->status != -EINPROGRESS) {
  1545. dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
  1546. goto error_dequeued;
  1547. }
  1548. result = __wa_xfer_setup(xfer, urb);
  1549. if (result < 0) {
  1550. dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
  1551. goto error_xfer_setup;
  1552. }
  1553. /*
  1554. * Get a xfer reference since __wa_xfer_submit starts asynchronous
  1555. * operations that may try to complete the xfer before this function
  1556. * exits.
  1557. */
  1558. wa_xfer_get(xfer);
  1559. result = __wa_xfer_submit(xfer);
  1560. if (result < 0) {
  1561. dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
  1562. goto error_xfer_submit;
  1563. }
  1564. spin_unlock_irqrestore(&xfer->lock, flags);
  1565. wa_xfer_put(xfer);
  1566. return 0;
  1567. /*
  1568. * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
  1569. * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
  1570. * setup().
  1571. */
  1572. error_xfer_setup:
  1573. error_dequeued:
  1574. spin_unlock_irqrestore(&xfer->lock, flags);
  1575. /* FIXME: segmentation broken, kills DWA */
  1576. if (wusb_dev)
  1577. wusb_dev_put(wusb_dev);
  1578. error_dev_gone:
  1579. rpipe_put(xfer->ep->hcpriv);
  1580. error_rpipe_get:
  1581. xfer->result = result;
  1582. return result;
  1583. error_xfer_submit:
  1584. done = __wa_xfer_is_done(xfer);
  1585. xfer->result = result;
  1586. spin_unlock_irqrestore(&xfer->lock, flags);
  1587. if (done)
  1588. wa_xfer_completion(xfer);
  1589. wa_xfer_put(xfer);
  1590. /* return success since the completion routine will run. */
  1591. return 0;
  1592. }
  1593. /*
  1594. * Execute the delayed transfers in the Wire Adapter @wa
  1595. *
  1596. * We need to be careful here, as dequeue() could be called in the
  1597. * middle. That's why we do the whole thing under the
  1598. * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
  1599. * and then checks the list -- so as we would be acquiring in inverse
  1600. * order, we move the delayed list to a separate list while locked and then
  1601. * submit them without the list lock held.
  1602. */
  1603. void wa_urb_enqueue_run(struct work_struct *ws)
  1604. {
  1605. struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
  1606. struct wa_xfer *xfer, *next;
  1607. struct urb *urb;
  1608. LIST_HEAD(tmp_list);
  1609. /* Create a copy of the wa->xfer_delayed_list while holding the lock */
  1610. spin_lock_irq(&wa->xfer_list_lock);
  1611. list_cut_position(&tmp_list, &wa->xfer_delayed_list,
  1612. wa->xfer_delayed_list.prev);
  1613. spin_unlock_irq(&wa->xfer_list_lock);
  1614. /*
  1615. * enqueue from temp list without list lock held since wa_urb_enqueue_b
  1616. * can take xfer->lock as well as lock mutexes.
  1617. */
  1618. list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
  1619. list_del_init(&xfer->list_node);
  1620. urb = xfer->urb;
  1621. if (wa_urb_enqueue_b(xfer) < 0)
  1622. wa_xfer_giveback(xfer);
  1623. usb_put_urb(urb); /* taken when queuing */
  1624. }
  1625. }
  1626. EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
  1627. /*
  1628. * Process the errored transfers on the Wire Adapter outside of interrupt.
  1629. */
  1630. void wa_process_errored_transfers_run(struct work_struct *ws)
  1631. {
  1632. struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
  1633. struct wa_xfer *xfer, *next;
  1634. LIST_HEAD(tmp_list);
  1635. pr_info("%s: Run delayed STALL processing.\n", __func__);
  1636. /* Create a copy of the wa->xfer_errored_list while holding the lock */
  1637. spin_lock_irq(&wa->xfer_list_lock);
  1638. list_cut_position(&tmp_list, &wa->xfer_errored_list,
  1639. wa->xfer_errored_list.prev);
  1640. spin_unlock_irq(&wa->xfer_list_lock);
  1641. /*
  1642. * run rpipe_clear_feature_stalled from temp list without list lock
  1643. * held.
  1644. */
  1645. list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
  1646. struct usb_host_endpoint *ep;
  1647. unsigned long flags;
  1648. struct wa_rpipe *rpipe;
  1649. spin_lock_irqsave(&xfer->lock, flags);
  1650. ep = xfer->ep;
  1651. rpipe = ep->hcpriv;
  1652. spin_unlock_irqrestore(&xfer->lock, flags);
  1653. /* clear RPIPE feature stalled without holding a lock. */
  1654. rpipe_clear_feature_stalled(wa, ep);
  1655. /* complete the xfer. This removes it from the tmp list. */
  1656. wa_xfer_completion(xfer);
  1657. /* check for work. */
  1658. wa_xfer_delayed_run(rpipe);
  1659. }
  1660. }
  1661. EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
  1662. /*
  1663. * Submit a transfer to the Wire Adapter in a delayed way
  1664. *
  1665. * The process of enqueuing involves possible sleeps() [see
  1666. * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
  1667. * in an atomic section, we defer the enqueue_b() call--else we call direct.
  1668. *
  1669. * @urb: We own a reference to it done by the HCI Linux USB stack that
  1670. * will be given up by calling usb_hcd_giveback_urb() or by
  1671. * returning error from this function -> ergo we don't have to
  1672. * refcount it.
  1673. */
  1674. int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
  1675. struct urb *urb, gfp_t gfp)
  1676. {
  1677. int result;
  1678. struct device *dev = &wa->usb_iface->dev;
  1679. struct wa_xfer *xfer;
  1680. unsigned long my_flags;
  1681. unsigned cant_sleep = irqs_disabled() | in_atomic();
  1682. if ((urb->transfer_buffer == NULL)
  1683. && (urb->sg == NULL)
  1684. && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  1685. && urb->transfer_buffer_length != 0) {
  1686. dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
  1687. dump_stack();
  1688. }
  1689. spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
  1690. result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
  1691. spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
  1692. if (result < 0)
  1693. goto error_link_urb;
  1694. result = -ENOMEM;
  1695. xfer = kzalloc(sizeof(*xfer), gfp);
  1696. if (xfer == NULL)
  1697. goto error_kmalloc;
  1698. result = -ENOENT;
  1699. if (urb->status != -EINPROGRESS) /* cancelled */
  1700. goto error_dequeued; /* before starting? */
  1701. wa_xfer_init(xfer);
  1702. xfer->wa = wa_get(wa);
  1703. xfer->urb = urb;
  1704. xfer->gfp = gfp;
  1705. xfer->ep = ep;
  1706. urb->hcpriv = xfer;
  1707. dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
  1708. xfer, urb, urb->pipe, urb->transfer_buffer_length,
  1709. urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
  1710. urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
  1711. cant_sleep ? "deferred" : "inline");
  1712. if (cant_sleep) {
  1713. usb_get_urb(urb);
  1714. spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
  1715. list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
  1716. spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
  1717. queue_work(wusbd, &wa->xfer_enqueue_work);
  1718. } else {
  1719. result = wa_urb_enqueue_b(xfer);
  1720. if (result < 0) {
  1721. /*
  1722. * URB submit/enqueue failed. Clean up, return an
  1723. * error and do not run the callback. This avoids
  1724. * an infinite submit/complete loop.
  1725. */
  1726. dev_err(dev, "%s: URB enqueue failed: %d\n",
  1727. __func__, result);
  1728. wa_put(xfer->wa);
  1729. wa_xfer_put(xfer);
  1730. spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
  1731. usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
  1732. spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
  1733. return result;
  1734. }
  1735. }
  1736. return 0;
  1737. error_dequeued:
  1738. kfree(xfer);
  1739. error_kmalloc:
  1740. spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
  1741. usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
  1742. spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
  1743. error_link_urb:
  1744. return result;
  1745. }
  1746. EXPORT_SYMBOL_GPL(wa_urb_enqueue);
  1747. /*
  1748. * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
  1749. * handler] is called.
  1750. *
  1751. * Until a transfer goes successfully through wa_urb_enqueue() it
  1752. * needs to be dequeued with completion calling; when stuck in delayed
  1753. * or before wa_xfer_setup() is called, we need to do completion.
  1754. *
  1755. * not setup If there is no hcpriv yet, that means that that enqueue
  1756. * still had no time to set the xfer up. Because
  1757. * urb->status should be other than -EINPROGRESS,
  1758. * enqueue() will catch that and bail out.
  1759. *
  1760. * If the transfer has gone through setup, we just need to clean it
  1761. * up. If it has gone through submit(), we have to abort it [with an
  1762. * asynch request] and then make sure we cancel each segment.
  1763. *
  1764. */
  1765. int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
  1766. {
  1767. unsigned long flags;
  1768. struct wa_xfer *xfer;
  1769. struct wa_seg *seg;
  1770. struct wa_rpipe *rpipe;
  1771. unsigned cnt, done = 0, xfer_abort_pending;
  1772. unsigned rpipe_ready = 0;
  1773. int result;
  1774. /* check if it is safe to unlink. */
  1775. spin_lock_irqsave(&wa->xfer_list_lock, flags);
  1776. result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
  1777. if ((result == 0) && urb->hcpriv) {
  1778. /*
  1779. * Get a xfer ref to prevent a race with wa_xfer_giveback
  1780. * cleaning up the xfer while we are working with it.
  1781. */
  1782. wa_xfer_get(urb->hcpriv);
  1783. }
  1784. spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
  1785. if (result)
  1786. return result;
  1787. xfer = urb->hcpriv;
  1788. if (xfer == NULL)
  1789. return -ENOENT;
  1790. spin_lock_irqsave(&xfer->lock, flags);
  1791. pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
  1792. rpipe = xfer->ep->hcpriv;
  1793. if (rpipe == NULL) {
  1794. pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
  1795. __func__, xfer, wa_xfer_id(xfer),
  1796. "Probably already aborted.\n" );
  1797. result = -ENOENT;
  1798. goto out_unlock;
  1799. }
  1800. /*
  1801. * Check for done to avoid racing with wa_xfer_giveback and completing
  1802. * twice.
  1803. */
  1804. if (__wa_xfer_is_done(xfer)) {
  1805. pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
  1806. xfer, wa_xfer_id(xfer));
  1807. result = -ENOENT;
  1808. goto out_unlock;
  1809. }
  1810. /* Check the delayed list -> if there, release and complete */
  1811. spin_lock(&wa->xfer_list_lock);
  1812. if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
  1813. goto dequeue_delayed;
  1814. spin_unlock(&wa->xfer_list_lock);
  1815. if (xfer->seg == NULL) /* still hasn't reached */
  1816. goto out_unlock; /* setup(), enqueue_b() completes */
  1817. /* Ok, the xfer is in flight already, it's been setup and submitted.*/
  1818. xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
  1819. /*
  1820. * grab the rpipe->seg_lock here to prevent racing with
  1821. * __wa_xfer_delayed_run.
  1822. */
  1823. spin_lock(&rpipe->seg_lock);
  1824. for (cnt = 0; cnt < xfer->segs; cnt++) {
  1825. seg = xfer->seg[cnt];
  1826. pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
  1827. __func__, wa_xfer_id(xfer), cnt, seg->status);
  1828. switch (seg->status) {
  1829. case WA_SEG_NOTREADY:
  1830. case WA_SEG_READY:
  1831. printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
  1832. xfer, cnt, seg->status);
  1833. WARN_ON(1);
  1834. break;
  1835. case WA_SEG_DELAYED:
  1836. /*
  1837. * delete from rpipe delayed list. If no segments on
  1838. * this xfer have been submitted, __wa_xfer_is_done will
  1839. * trigger a giveback below. Otherwise, the submitted
  1840. * segments will be completed in the DTI interrupt.
  1841. */
  1842. seg->status = WA_SEG_ABORTED;
  1843. seg->result = -ENOENT;
  1844. list_del(&seg->list_node);
  1845. xfer->segs_done++;
  1846. break;
  1847. case WA_SEG_DONE:
  1848. case WA_SEG_ERROR:
  1849. case WA_SEG_ABORTED:
  1850. break;
  1851. /*
  1852. * The buf_in data for a segment in the
  1853. * WA_SEG_DTI_PENDING state is actively being read.
  1854. * Let wa_buf_in_cb handle it since it will be called
  1855. * and will increment xfer->segs_done. Cleaning up
  1856. * here could cause wa_buf_in_cb to access the xfer
  1857. * after it has been completed/freed.
  1858. */
  1859. case WA_SEG_DTI_PENDING:
  1860. break;
  1861. /*
  1862. * In the states below, the HWA device already knows
  1863. * about the transfer. If an abort request was sent,
  1864. * allow the HWA to process it and wait for the
  1865. * results. Otherwise, the DTI state and seg completed
  1866. * counts can get out of sync.
  1867. */
  1868. case WA_SEG_SUBMITTED:
  1869. case WA_SEG_PENDING:
  1870. /*
  1871. * Check if the abort was successfully sent. This could
  1872. * be false if the HWA has been removed but we haven't
  1873. * gotten the disconnect notification yet.
  1874. */
  1875. if (!xfer_abort_pending) {
  1876. seg->status = WA_SEG_ABORTED;
  1877. rpipe_ready = rpipe_avail_inc(rpipe);
  1878. xfer->segs_done++;
  1879. }
  1880. break;
  1881. }
  1882. }
  1883. spin_unlock(&rpipe->seg_lock);
  1884. xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
  1885. done = __wa_xfer_is_done(xfer);
  1886. spin_unlock_irqrestore(&xfer->lock, flags);
  1887. if (done)
  1888. wa_xfer_completion(xfer);
  1889. if (rpipe_ready)
  1890. wa_xfer_delayed_run(rpipe);
  1891. wa_xfer_put(xfer);
  1892. return result;
  1893. out_unlock:
  1894. spin_unlock_irqrestore(&xfer->lock, flags);
  1895. wa_xfer_put(xfer);
  1896. return result;
  1897. dequeue_delayed:
  1898. list_del_init(&xfer->list_node);
  1899. spin_unlock(&wa->xfer_list_lock);
  1900. xfer->result = urb->status;
  1901. spin_unlock_irqrestore(&xfer->lock, flags);
  1902. wa_xfer_giveback(xfer);
  1903. wa_xfer_put(xfer);
  1904. usb_put_urb(urb); /* we got a ref in enqueue() */
  1905. return 0;
  1906. }
  1907. EXPORT_SYMBOL_GPL(wa_urb_dequeue);
  1908. /*
  1909. * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
  1910. * codes
  1911. *
  1912. * Positive errno values are internal inconsistencies and should be
  1913. * flagged louder. Negative are to be passed up to the user in the
  1914. * normal way.
  1915. *
  1916. * @status: USB WA status code -- high two bits are stripped.
  1917. */
  1918. static int wa_xfer_status_to_errno(u8 status)
  1919. {
  1920. int errno;
  1921. u8 real_status = status;
  1922. static int xlat[] = {
  1923. [WA_XFER_STATUS_SUCCESS] = 0,
  1924. [WA_XFER_STATUS_HALTED] = -EPIPE,
  1925. [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
  1926. [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
  1927. [WA_XFER_RESERVED] = EINVAL,
  1928. [WA_XFER_STATUS_NOT_FOUND] = 0,
  1929. [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
  1930. [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
  1931. [WA_XFER_STATUS_ABORTED] = -ENOENT,
  1932. [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
  1933. [WA_XFER_INVALID_FORMAT] = EINVAL,
  1934. [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
  1935. [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
  1936. };
  1937. status &= 0x3f;
  1938. if (status == 0)
  1939. return 0;
  1940. if (status >= ARRAY_SIZE(xlat)) {
  1941. printk_ratelimited(KERN_ERR "%s(): BUG? "
  1942. "Unknown WA transfer status 0x%02x\n",
  1943. __func__, real_status);
  1944. return -EINVAL;
  1945. }
  1946. errno = xlat[status];
  1947. if (unlikely(errno > 0)) {
  1948. printk_ratelimited(KERN_ERR "%s(): BUG? "
  1949. "Inconsistent WA status: 0x%02x\n",
  1950. __func__, real_status);
  1951. errno = -errno;
  1952. }
  1953. return errno;
  1954. }
  1955. /*
  1956. * If a last segment flag and/or a transfer result error is encountered,
  1957. * no other segment transfer results will be returned from the device.
  1958. * Mark the remaining submitted or pending xfers as completed so that
  1959. * the xfer will complete cleanly.
  1960. *
  1961. * xfer->lock must be held
  1962. *
  1963. */
  1964. static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
  1965. int starting_index, enum wa_seg_status status)
  1966. {
  1967. int index;
  1968. struct wa_rpipe *rpipe = xfer->ep->hcpriv;
  1969. for (index = starting_index; index < xfer->segs_submitted; index++) {
  1970. struct wa_seg *current_seg = xfer->seg[index];
  1971. BUG_ON(current_seg == NULL);
  1972. switch (current_seg->status) {
  1973. case WA_SEG_SUBMITTED:
  1974. case WA_SEG_PENDING:
  1975. case WA_SEG_DTI_PENDING:
  1976. rpipe_avail_inc(rpipe);
  1977. /*
  1978. * do not increment RPIPE avail for the WA_SEG_DELAYED case
  1979. * since it has not been submitted to the RPIPE.
  1980. */
  1981. /* fall through */
  1982. case WA_SEG_DELAYED:
  1983. xfer->segs_done++;
  1984. current_seg->status = status;
  1985. break;
  1986. case WA_SEG_ABORTED:
  1987. break;
  1988. default:
  1989. WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
  1990. __func__, wa_xfer_id(xfer), index,
  1991. current_seg->status);
  1992. break;
  1993. }
  1994. }
  1995. }
  1996. /* Populate the given urb based on the current isoc transfer state. */
  1997. static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
  1998. struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
  1999. {
  2000. int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
  2001. int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
  2002. struct usb_iso_packet_descriptor *iso_frame_desc =
  2003. xfer->urb->iso_frame_desc;
  2004. const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
  2005. int next_frame_contiguous;
  2006. struct usb_iso_packet_descriptor *iso_frame;
  2007. BUG_ON(buf_in_urb->status == -EINPROGRESS);
  2008. /*
  2009. * If the current frame actual_length is contiguous with the next frame
  2010. * and actual_length is a multiple of the DTI endpoint max packet size,
  2011. * combine the current frame with the next frame in a single URB. This
  2012. * reduces the number of URBs that must be submitted in that case.
  2013. */
  2014. seg_index = seg->isoc_frame_index;
  2015. do {
  2016. next_frame_contiguous = 0;
  2017. iso_frame = &iso_frame_desc[urb_frame_index];
  2018. total_len += iso_frame->actual_length;
  2019. ++urb_frame_index;
  2020. ++seg_index;
  2021. if (seg_index < seg->isoc_frame_count) {
  2022. struct usb_iso_packet_descriptor *next_iso_frame;
  2023. next_iso_frame = &iso_frame_desc[urb_frame_index];
  2024. if ((iso_frame->offset + iso_frame->actual_length) ==
  2025. next_iso_frame->offset)
  2026. next_frame_contiguous = 1;
  2027. }
  2028. } while (next_frame_contiguous
  2029. && ((iso_frame->actual_length % dti_packet_size) == 0));
  2030. /* this should always be 0 before a resubmit. */
  2031. buf_in_urb->num_mapped_sgs = 0;
  2032. buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
  2033. iso_frame_desc[urb_start_frame].offset;
  2034. buf_in_urb->transfer_buffer_length = total_len;
  2035. buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  2036. buf_in_urb->transfer_buffer = NULL;
  2037. buf_in_urb->sg = NULL;
  2038. buf_in_urb->num_sgs = 0;
  2039. buf_in_urb->context = seg;
  2040. /* return the number of frames included in this URB. */
  2041. return seg_index - seg->isoc_frame_index;
  2042. }
  2043. /* Populate the given urb based on the current transfer state. */
  2044. static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
  2045. unsigned int seg_idx, unsigned int bytes_transferred)
  2046. {
  2047. int result = 0;
  2048. struct wa_seg *seg = xfer->seg[seg_idx];
  2049. BUG_ON(buf_in_urb->status == -EINPROGRESS);
  2050. /* this should always be 0 before a resubmit. */
  2051. buf_in_urb->num_mapped_sgs = 0;
  2052. if (xfer->is_dma) {
  2053. buf_in_urb->transfer_dma = xfer->urb->transfer_dma
  2054. + (seg_idx * xfer->seg_size);
  2055. buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  2056. buf_in_urb->transfer_buffer = NULL;
  2057. buf_in_urb->sg = NULL;
  2058. buf_in_urb->num_sgs = 0;
  2059. } else {
  2060. /* do buffer or SG processing. */
  2061. buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
  2062. if (xfer->urb->transfer_buffer) {
  2063. buf_in_urb->transfer_buffer =
  2064. xfer->urb->transfer_buffer
  2065. + (seg_idx * xfer->seg_size);
  2066. buf_in_urb->sg = NULL;
  2067. buf_in_urb->num_sgs = 0;
  2068. } else {
  2069. /* allocate an SG list to store seg_size bytes
  2070. and copy the subset of the xfer->urb->sg
  2071. that matches the buffer subset we are
  2072. about to read. */
  2073. buf_in_urb->sg = wa_xfer_create_subset_sg(
  2074. xfer->urb->sg,
  2075. seg_idx * xfer->seg_size,
  2076. bytes_transferred,
  2077. &(buf_in_urb->num_sgs));
  2078. if (!(buf_in_urb->sg)) {
  2079. buf_in_urb->num_sgs = 0;
  2080. result = -ENOMEM;
  2081. }
  2082. buf_in_urb->transfer_buffer = NULL;
  2083. }
  2084. }
  2085. buf_in_urb->transfer_buffer_length = bytes_transferred;
  2086. buf_in_urb->context = seg;
  2087. return result;
  2088. }
  2089. /*
  2090. * Process a xfer result completion message
  2091. *
  2092. * inbound transfers: need to schedule a buf_in_urb read
  2093. *
  2094. * FIXME: this function needs to be broken up in parts
  2095. */
  2096. static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
  2097. struct wa_xfer_result *xfer_result)
  2098. {
  2099. int result;
  2100. struct device *dev = &wa->usb_iface->dev;
  2101. unsigned long flags;
  2102. unsigned int seg_idx;
  2103. struct wa_seg *seg;
  2104. struct wa_rpipe *rpipe;
  2105. unsigned done = 0;
  2106. u8 usb_status;
  2107. unsigned rpipe_ready = 0;
  2108. unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
  2109. struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
  2110. spin_lock_irqsave(&xfer->lock, flags);
  2111. seg_idx = xfer_result->bTransferSegment & 0x7f;
  2112. if (unlikely(seg_idx >= xfer->segs))
  2113. goto error_bad_seg;
  2114. seg = xfer->seg[seg_idx];
  2115. rpipe = xfer->ep->hcpriv;
  2116. usb_status = xfer_result->bTransferStatus;
  2117. dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
  2118. xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
  2119. if (seg->status == WA_SEG_ABORTED
  2120. || seg->status == WA_SEG_ERROR) /* already handled */
  2121. goto segment_aborted;
  2122. if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
  2123. seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
  2124. if (seg->status != WA_SEG_PENDING) {
  2125. if (printk_ratelimit())
  2126. dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
  2127. xfer, seg_idx, seg->status);
  2128. seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
  2129. }
  2130. if (usb_status & 0x80) {
  2131. seg->result = wa_xfer_status_to_errno(usb_status);
  2132. dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
  2133. xfer, xfer->id, seg->index, usb_status);
  2134. seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
  2135. WA_SEG_ABORTED : WA_SEG_ERROR;
  2136. goto error_complete;
  2137. }
  2138. /* FIXME: we ignore warnings, tally them for stats */
  2139. if (usb_status & 0x40) /* Warning?... */
  2140. usb_status = 0; /* ... pass */
  2141. /*
  2142. * If the last segment bit is set, complete the remaining segments.
  2143. * When the current segment is completed, either in wa_buf_in_cb for
  2144. * transfers with data or below for no data, the xfer will complete.
  2145. */
  2146. if (xfer_result->bTransferSegment & 0x80)
  2147. wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
  2148. WA_SEG_DONE);
  2149. if (usb_pipeisoc(xfer->urb->pipe)
  2150. && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
  2151. /* set up WA state to read the isoc packet status next. */
  2152. wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
  2153. wa->dti_isoc_xfer_seg = seg_idx;
  2154. wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
  2155. } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
  2156. && (bytes_transferred > 0)) {
  2157. /* IN data phase: read to buffer */
  2158. seg->status = WA_SEG_DTI_PENDING;
  2159. result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
  2160. bytes_transferred);
  2161. if (result < 0)
  2162. goto error_buf_in_populate;
  2163. ++(wa->active_buf_in_urbs);
  2164. result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
  2165. if (result < 0) {
  2166. --(wa->active_buf_in_urbs);
  2167. goto error_submit_buf_in;
  2168. }
  2169. } else {
  2170. /* OUT data phase or no data, complete it -- */
  2171. seg->result = bytes_transferred;
  2172. rpipe_ready = rpipe_avail_inc(rpipe);
  2173. done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
  2174. }
  2175. spin_unlock_irqrestore(&xfer->lock, flags);
  2176. if (done)
  2177. wa_xfer_completion(xfer);
  2178. if (rpipe_ready)
  2179. wa_xfer_delayed_run(rpipe);
  2180. return;
  2181. error_submit_buf_in:
  2182. if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
  2183. dev_err(dev, "DTI: URB max acceptable errors "
  2184. "exceeded, resetting device\n");
  2185. wa_reset_all(wa);
  2186. }
  2187. if (printk_ratelimit())
  2188. dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
  2189. xfer, seg_idx, result);
  2190. seg->result = result;
  2191. kfree(buf_in_urb->sg);
  2192. buf_in_urb->sg = NULL;
  2193. error_buf_in_populate:
  2194. __wa_xfer_abort(xfer);
  2195. seg->status = WA_SEG_ERROR;
  2196. error_complete:
  2197. xfer->segs_done++;
  2198. rpipe_ready = rpipe_avail_inc(rpipe);
  2199. wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
  2200. done = __wa_xfer_is_done(xfer);
  2201. /*
  2202. * queue work item to clear STALL for control endpoints.
  2203. * Otherwise, let endpoint_reset take care of it.
  2204. */
  2205. if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
  2206. usb_endpoint_xfer_control(&xfer->ep->desc) &&
  2207. done) {
  2208. dev_info(dev, "Control EP stall. Queue delayed work.\n");
  2209. spin_lock(&wa->xfer_list_lock);
  2210. /* move xfer from xfer_list to xfer_errored_list. */
  2211. list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
  2212. spin_unlock(&wa->xfer_list_lock);
  2213. spin_unlock_irqrestore(&xfer->lock, flags);
  2214. queue_work(wusbd, &wa->xfer_error_work);
  2215. } else {
  2216. spin_unlock_irqrestore(&xfer->lock, flags);
  2217. if (done)
  2218. wa_xfer_completion(xfer);
  2219. if (rpipe_ready)
  2220. wa_xfer_delayed_run(rpipe);
  2221. }
  2222. return;
  2223. error_bad_seg:
  2224. spin_unlock_irqrestore(&xfer->lock, flags);
  2225. wa_urb_dequeue(wa, xfer->urb, -ENOENT);
  2226. if (printk_ratelimit())
  2227. dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
  2228. if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
  2229. dev_err(dev, "DTI: URB max acceptable errors "
  2230. "exceeded, resetting device\n");
  2231. wa_reset_all(wa);
  2232. }
  2233. return;
  2234. segment_aborted:
  2235. /* nothing to do, as the aborter did the completion */
  2236. spin_unlock_irqrestore(&xfer->lock, flags);
  2237. }
  2238. /*
  2239. * Process a isochronous packet status message
  2240. *
  2241. * inbound transfers: need to schedule a buf_in_urb read
  2242. */
  2243. static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
  2244. {
  2245. struct device *dev = &wa->usb_iface->dev;
  2246. struct wa_xfer_packet_status_hwaiso *packet_status;
  2247. struct wa_xfer_packet_status_len_hwaiso *status_array;
  2248. struct wa_xfer *xfer;
  2249. unsigned long flags;
  2250. struct wa_seg *seg;
  2251. struct wa_rpipe *rpipe;
  2252. unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
  2253. unsigned first_frame_index = 0, rpipe_ready = 0;
  2254. int expected_size;
  2255. /* We have a xfer result buffer; check it */
  2256. dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
  2257. urb->actual_length, urb->transfer_buffer);
  2258. packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
  2259. if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
  2260. dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
  2261. packet_status->bPacketType);
  2262. goto error_parse_buffer;
  2263. }
  2264. xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
  2265. if (xfer == NULL) {
  2266. dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
  2267. wa->dti_isoc_xfer_in_progress);
  2268. goto error_parse_buffer;
  2269. }
  2270. spin_lock_irqsave(&xfer->lock, flags);
  2271. if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
  2272. goto error_bad_seg;
  2273. seg = xfer->seg[wa->dti_isoc_xfer_seg];
  2274. rpipe = xfer->ep->hcpriv;
  2275. expected_size = sizeof(*packet_status) +
  2276. (sizeof(packet_status->PacketStatus[0]) *
  2277. seg->isoc_frame_count);
  2278. if (urb->actual_length != expected_size) {
  2279. dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
  2280. urb->actual_length, expected_size);
  2281. goto error_bad_seg;
  2282. }
  2283. if (le16_to_cpu(packet_status->wLength) != expected_size) {
  2284. dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
  2285. le16_to_cpu(packet_status->wLength));
  2286. goto error_bad_seg;
  2287. }
  2288. /* write isoc packet status and lengths back to the xfer urb. */
  2289. status_array = packet_status->PacketStatus;
  2290. xfer->urb->start_frame =
  2291. wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
  2292. for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
  2293. struct usb_iso_packet_descriptor *iso_frame_desc =
  2294. xfer->urb->iso_frame_desc;
  2295. const int xfer_frame_index =
  2296. seg->isoc_frame_offset + seg_index;
  2297. iso_frame_desc[xfer_frame_index].status =
  2298. wa_xfer_status_to_errno(
  2299. le16_to_cpu(status_array[seg_index].PacketStatus));
  2300. iso_frame_desc[xfer_frame_index].actual_length =
  2301. le16_to_cpu(status_array[seg_index].PacketLength);
  2302. /* track the number of frames successfully transferred. */
  2303. if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
  2304. /* save the starting frame index for buf_in_urb. */
  2305. if (!data_frame_count)
  2306. first_frame_index = seg_index;
  2307. ++data_frame_count;
  2308. }
  2309. }
  2310. if (xfer->is_inbound && data_frame_count) {
  2311. int result, total_frames_read = 0, urb_index = 0;
  2312. struct urb *buf_in_urb;
  2313. /* IN data phase: read to buffer */
  2314. seg->status = WA_SEG_DTI_PENDING;
  2315. /* start with the first frame with data. */
  2316. seg->isoc_frame_index = first_frame_index;
  2317. /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
  2318. do {
  2319. int urb_frame_index, urb_frame_count;
  2320. struct usb_iso_packet_descriptor *iso_frame_desc;
  2321. buf_in_urb = &(wa->buf_in_urbs[urb_index]);
  2322. urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
  2323. buf_in_urb, xfer, seg);
  2324. /* advance frame index to start of next read URB. */
  2325. seg->isoc_frame_index += urb_frame_count;
  2326. total_frames_read += urb_frame_count;
  2327. ++(wa->active_buf_in_urbs);
  2328. result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
  2329. /* skip 0-byte frames. */
  2330. urb_frame_index =
  2331. seg->isoc_frame_offset + seg->isoc_frame_index;
  2332. iso_frame_desc =
  2333. &(xfer->urb->iso_frame_desc[urb_frame_index]);
  2334. while ((seg->isoc_frame_index <
  2335. seg->isoc_frame_count) &&
  2336. (iso_frame_desc->actual_length == 0)) {
  2337. ++(seg->isoc_frame_index);
  2338. ++iso_frame_desc;
  2339. }
  2340. ++urb_index;
  2341. } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
  2342. && (seg->isoc_frame_index <
  2343. seg->isoc_frame_count));
  2344. if (result < 0) {
  2345. --(wa->active_buf_in_urbs);
  2346. dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
  2347. result);
  2348. wa_reset_all(wa);
  2349. } else if (data_frame_count > total_frames_read)
  2350. /* If we need to read more frames, set DTI busy. */
  2351. dti_busy = 1;
  2352. } else {
  2353. /* OUT transfer or no more IN data, complete it -- */
  2354. rpipe_ready = rpipe_avail_inc(rpipe);
  2355. done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
  2356. }
  2357. spin_unlock_irqrestore(&xfer->lock, flags);
  2358. if (dti_busy)
  2359. wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
  2360. else
  2361. wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
  2362. if (done)
  2363. wa_xfer_completion(xfer);
  2364. if (rpipe_ready)
  2365. wa_xfer_delayed_run(rpipe);
  2366. wa_xfer_put(xfer);
  2367. return dti_busy;
  2368. error_bad_seg:
  2369. spin_unlock_irqrestore(&xfer->lock, flags);
  2370. wa_xfer_put(xfer);
  2371. error_parse_buffer:
  2372. return dti_busy;
  2373. }
  2374. /*
  2375. * Callback for the IN data phase
  2376. *
  2377. * If successful transition state; otherwise, take a note of the
  2378. * error, mark this segment done and try completion.
  2379. *
  2380. * Note we don't access until we are sure that the transfer hasn't
  2381. * been cancelled (ECONNRESET, ENOENT), which could mean that
  2382. * seg->xfer could be already gone.
  2383. */
  2384. static void wa_buf_in_cb(struct urb *urb)
  2385. {
  2386. struct wa_seg *seg = urb->context;
  2387. struct wa_xfer *xfer = seg->xfer;
  2388. struct wahc *wa;
  2389. struct device *dev;
  2390. struct wa_rpipe *rpipe;
  2391. unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
  2392. unsigned long flags;
  2393. int resubmit_dti = 0, active_buf_in_urbs;
  2394. u8 done = 0;
  2395. /* free the sg if it was used. */
  2396. kfree(urb->sg);
  2397. urb->sg = NULL;
  2398. spin_lock_irqsave(&xfer->lock, flags);
  2399. wa = xfer->wa;
  2400. dev = &wa->usb_iface->dev;
  2401. --(wa->active_buf_in_urbs);
  2402. active_buf_in_urbs = wa->active_buf_in_urbs;
  2403. rpipe = xfer->ep->hcpriv;
  2404. if (usb_pipeisoc(xfer->urb->pipe)) {
  2405. struct usb_iso_packet_descriptor *iso_frame_desc =
  2406. xfer->urb->iso_frame_desc;
  2407. int seg_index;
  2408. /*
  2409. * Find the next isoc frame with data and count how many
  2410. * frames with data remain.
  2411. */
  2412. seg_index = seg->isoc_frame_index;
  2413. while (seg_index < seg->isoc_frame_count) {
  2414. const int urb_frame_index =
  2415. seg->isoc_frame_offset + seg_index;
  2416. if (iso_frame_desc[urb_frame_index].actual_length > 0) {
  2417. /* save the index of the next frame with data */
  2418. if (!isoc_data_frame_count)
  2419. seg->isoc_frame_index = seg_index;
  2420. ++isoc_data_frame_count;
  2421. }
  2422. ++seg_index;
  2423. }
  2424. }
  2425. spin_unlock_irqrestore(&xfer->lock, flags);
  2426. switch (urb->status) {
  2427. case 0:
  2428. spin_lock_irqsave(&xfer->lock, flags);
  2429. seg->result += urb->actual_length;
  2430. if (isoc_data_frame_count > 0) {
  2431. int result, urb_frame_count;
  2432. /* submit a read URB for the next frame with data. */
  2433. urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
  2434. xfer, seg);
  2435. /* advance index to start of next read URB. */
  2436. seg->isoc_frame_index += urb_frame_count;
  2437. ++(wa->active_buf_in_urbs);
  2438. result = usb_submit_urb(urb, GFP_ATOMIC);
  2439. if (result < 0) {
  2440. --(wa->active_buf_in_urbs);
  2441. dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
  2442. result);
  2443. wa_reset_all(wa);
  2444. }
  2445. /*
  2446. * If we are in this callback and
  2447. * isoc_data_frame_count > 0, it means that the dti_urb
  2448. * submission was delayed in wa_dti_cb. Once
  2449. * we submit the last buf_in_urb, we can submit the
  2450. * delayed dti_urb.
  2451. */
  2452. resubmit_dti = (isoc_data_frame_count ==
  2453. urb_frame_count);
  2454. } else if (active_buf_in_urbs == 0) {
  2455. dev_dbg(dev,
  2456. "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
  2457. xfer, wa_xfer_id(xfer), seg->index,
  2458. seg->result);
  2459. rpipe_ready = rpipe_avail_inc(rpipe);
  2460. done = __wa_xfer_mark_seg_as_done(xfer, seg,
  2461. WA_SEG_DONE);
  2462. }
  2463. spin_unlock_irqrestore(&xfer->lock, flags);
  2464. if (done)
  2465. wa_xfer_completion(xfer);
  2466. if (rpipe_ready)
  2467. wa_xfer_delayed_run(rpipe);
  2468. break;
  2469. case -ECONNRESET: /* URB unlinked; no need to do anything */
  2470. case -ENOENT: /* as it was done by the who unlinked us */
  2471. break;
  2472. default: /* Other errors ... */
  2473. /*
  2474. * Error on data buf read. Only resubmit DTI if it hasn't
  2475. * already been done by previously hitting this error or by a
  2476. * successful completion of the previous buf_in_urb.
  2477. */
  2478. resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
  2479. spin_lock_irqsave(&xfer->lock, flags);
  2480. if (printk_ratelimit())
  2481. dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
  2482. xfer, wa_xfer_id(xfer), seg->index,
  2483. urb->status);
  2484. if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
  2485. EDC_ERROR_TIMEFRAME)){
  2486. dev_err(dev, "DTO: URB max acceptable errors "
  2487. "exceeded, resetting device\n");
  2488. wa_reset_all(wa);
  2489. }
  2490. seg->result = urb->status;
  2491. rpipe_ready = rpipe_avail_inc(rpipe);
  2492. if (active_buf_in_urbs == 0)
  2493. done = __wa_xfer_mark_seg_as_done(xfer, seg,
  2494. WA_SEG_ERROR);
  2495. else
  2496. __wa_xfer_abort(xfer);
  2497. spin_unlock_irqrestore(&xfer->lock, flags);
  2498. if (done)
  2499. wa_xfer_completion(xfer);
  2500. if (rpipe_ready)
  2501. wa_xfer_delayed_run(rpipe);
  2502. }
  2503. if (resubmit_dti) {
  2504. int result;
  2505. wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
  2506. result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
  2507. if (result < 0) {
  2508. dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
  2509. result);
  2510. wa_reset_all(wa);
  2511. }
  2512. }
  2513. }
  2514. /*
  2515. * Handle an incoming transfer result buffer
  2516. *
  2517. * Given a transfer result buffer, it completes the transfer (possibly
  2518. * scheduling and buffer in read) and then resubmits the DTI URB for a
  2519. * new transfer result read.
  2520. *
  2521. *
  2522. * The xfer_result DTI URB state machine
  2523. *
  2524. * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
  2525. *
  2526. * We start in OFF mode, the first xfer_result notification [through
  2527. * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
  2528. * read.
  2529. *
  2530. * We receive a buffer -- if it is not a xfer_result, we complain and
  2531. * repost the DTI-URB. If it is a xfer_result then do the xfer seg
  2532. * request accounting. If it is an IN segment, we move to RBI and post
  2533. * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
  2534. * repost the DTI-URB and move to RXR state. if there was no IN
  2535. * segment, it will repost the DTI-URB.
  2536. *
  2537. * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
  2538. * errors) in the URBs.
  2539. */
  2540. static void wa_dti_cb(struct urb *urb)
  2541. {
  2542. int result, dti_busy = 0;
  2543. struct wahc *wa = urb->context;
  2544. struct device *dev = &wa->usb_iface->dev;
  2545. u32 xfer_id;
  2546. u8 usb_status;
  2547. BUG_ON(wa->dti_urb != urb);
  2548. switch (wa->dti_urb->status) {
  2549. case 0:
  2550. if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
  2551. struct wa_xfer_result *xfer_result;
  2552. struct wa_xfer *xfer;
  2553. /* We have a xfer result buffer; check it */
  2554. dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
  2555. urb->actual_length, urb->transfer_buffer);
  2556. if (urb->actual_length != sizeof(*xfer_result)) {
  2557. dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
  2558. urb->actual_length,
  2559. sizeof(*xfer_result));
  2560. break;
  2561. }
  2562. xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
  2563. if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
  2564. dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
  2565. xfer_result->hdr.bLength);
  2566. break;
  2567. }
  2568. if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
  2569. dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
  2570. xfer_result->hdr.bNotifyType);
  2571. break;
  2572. }
  2573. xfer_id = le32_to_cpu(xfer_result->dwTransferID);
  2574. usb_status = xfer_result->bTransferStatus & 0x3f;
  2575. if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
  2576. /* taken care of already */
  2577. dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
  2578. __func__, xfer_id,
  2579. xfer_result->bTransferSegment & 0x7f);
  2580. break;
  2581. }
  2582. xfer = wa_xfer_get_by_id(wa, xfer_id);
  2583. if (xfer == NULL) {
  2584. /* FIXME: transaction not found. */
  2585. dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
  2586. xfer_id, usb_status);
  2587. break;
  2588. }
  2589. wa_xfer_result_chew(wa, xfer, xfer_result);
  2590. wa_xfer_put(xfer);
  2591. } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
  2592. dti_busy = wa_process_iso_packet_status(wa, urb);
  2593. } else {
  2594. dev_err(dev, "DTI Error: unexpected EP state = %d\n",
  2595. wa->dti_state);
  2596. }
  2597. break;
  2598. case -ENOENT: /* (we killed the URB)...so, no broadcast */
  2599. case -ESHUTDOWN: /* going away! */
  2600. dev_dbg(dev, "DTI: going down! %d\n", urb->status);
  2601. goto out;
  2602. default:
  2603. /* Unknown error */
  2604. if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
  2605. EDC_ERROR_TIMEFRAME)) {
  2606. dev_err(dev, "DTI: URB max acceptable errors "
  2607. "exceeded, resetting device\n");
  2608. wa_reset_all(wa);
  2609. goto out;
  2610. }
  2611. if (printk_ratelimit())
  2612. dev_err(dev, "DTI: URB error %d\n", urb->status);
  2613. break;
  2614. }
  2615. /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
  2616. if (!dti_busy) {
  2617. result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
  2618. if (result < 0) {
  2619. dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
  2620. result);
  2621. wa_reset_all(wa);
  2622. }
  2623. }
  2624. out:
  2625. return;
  2626. }
  2627. /*
  2628. * Initialize the DTI URB for reading transfer result notifications and also
  2629. * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
  2630. */
  2631. int wa_dti_start(struct wahc *wa)
  2632. {
  2633. const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
  2634. struct device *dev = &wa->usb_iface->dev;
  2635. int result = -ENOMEM, index;
  2636. if (wa->dti_urb != NULL) /* DTI URB already started */
  2637. goto out;
  2638. wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
  2639. if (wa->dti_urb == NULL)
  2640. goto error_dti_urb_alloc;
  2641. usb_fill_bulk_urb(
  2642. wa->dti_urb, wa->usb_dev,
  2643. usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
  2644. wa->dti_buf, wa->dti_buf_size,
  2645. wa_dti_cb, wa);
  2646. /* init the buf in URBs */
  2647. for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
  2648. usb_fill_bulk_urb(
  2649. &(wa->buf_in_urbs[index]), wa->usb_dev,
  2650. usb_rcvbulkpipe(wa->usb_dev,
  2651. 0x80 | dti_epd->bEndpointAddress),
  2652. NULL, 0, wa_buf_in_cb, wa);
  2653. }
  2654. result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
  2655. if (result < 0) {
  2656. dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
  2657. result);
  2658. goto error_dti_urb_submit;
  2659. }
  2660. out:
  2661. return 0;
  2662. error_dti_urb_submit:
  2663. usb_put_urb(wa->dti_urb);
  2664. wa->dti_urb = NULL;
  2665. error_dti_urb_alloc:
  2666. return result;
  2667. }
  2668. EXPORT_SYMBOL_GPL(wa_dti_start);
  2669. /*
  2670. * Transfer complete notification
  2671. *
  2672. * Called from the notif.c code. We get a notification on EP2 saying
  2673. * that some endpoint has some transfer result data available. We are
  2674. * about to read it.
  2675. *
  2676. * To speed up things, we always have a URB reading the DTI URB; we
  2677. * don't really set it up and start it until the first xfer complete
  2678. * notification arrives, which is what we do here.
  2679. *
  2680. * Follow up in wa_dti_cb(), as that's where the whole state
  2681. * machine starts.
  2682. *
  2683. * @wa shall be referenced
  2684. */
  2685. void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
  2686. {
  2687. struct device *dev = &wa->usb_iface->dev;
  2688. struct wa_notif_xfer *notif_xfer;
  2689. const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
  2690. notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
  2691. BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
  2692. if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
  2693. /* FIXME: hardcoded limitation, adapt */
  2694. dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
  2695. notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
  2696. goto error;
  2697. }
  2698. /* attempt to start the DTI ep processing. */
  2699. if (wa_dti_start(wa) < 0)
  2700. goto error;
  2701. return;
  2702. error:
  2703. wa_reset_all(wa);
  2704. }