bdc_ep.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
  4. *
  5. * Copyright (C) 2014 Broadcom Corporation
  6. *
  7. * Author: Ashwini Pahuja
  8. *
  9. * Based on drivers under drivers/usb/
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/kernel.h>
  15. #include <linux/delay.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/ioport.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/timer.h>
  23. #include <linux/list.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/device.h>
  27. #include <linux/usb/ch9.h>
  28. #include <linux/usb/gadget.h>
  29. #include <linux/usb/otg.h>
  30. #include <linux/pm.h>
  31. #include <linux/io.h>
  32. #include <linux/irq.h>
  33. #include <asm/unaligned.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/usb/composite.h>
  36. #include "bdc.h"
  37. #include "bdc_ep.h"
  38. #include "bdc_cmd.h"
  39. #include "bdc_dbg.h"
  40. static const char * const ep0_state_string[] = {
  41. "WAIT_FOR_SETUP",
  42. "WAIT_FOR_DATA_START",
  43. "WAIT_FOR_DATA_XMIT",
  44. "WAIT_FOR_STATUS_START",
  45. "WAIT_FOR_STATUS_XMIT",
  46. "STATUS_PENDING"
  47. };
  48. /* Free the bdl during ep disable */
  49. static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
  50. {
  51. struct bd_list *bd_list = &ep->bd_list;
  52. struct bdc *bdc = ep->bdc;
  53. struct bd_table *bd_table;
  54. int index;
  55. dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
  56. __func__, ep->name, num_tabs);
  57. if (!bd_list->bd_table_array) {
  58. dev_dbg(bdc->dev, "%s already freed\n", ep->name);
  59. return;
  60. }
  61. for (index = 0; index < num_tabs; index++) {
  62. /*
  63. * check if the bd_table struct is allocated ?
  64. * if yes, then check if bd memory has been allocated, then
  65. * free the dma_pool and also the bd_table struct memory
  66. */
  67. bd_table = bd_list->bd_table_array[index];
  68. dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
  69. if (!bd_table) {
  70. dev_dbg(bdc->dev, "bd_table not allocated\n");
  71. continue;
  72. }
  73. if (!bd_table->start_bd) {
  74. dev_dbg(bdc->dev, "bd dma pool not allocated\n");
  75. continue;
  76. }
  77. dev_dbg(bdc->dev,
  78. "Free dma pool start_bd:%p dma:%llx\n",
  79. bd_table->start_bd,
  80. (unsigned long long)bd_table->dma);
  81. dma_pool_free(bdc->bd_table_pool,
  82. bd_table->start_bd,
  83. bd_table->dma);
  84. /* Free the bd_table structure */
  85. kfree(bd_table);
  86. }
  87. /* Free the bd table array */
  88. kfree(ep->bd_list.bd_table_array);
  89. }
  90. /*
  91. * chain the tables, by insteting a chain bd at the end of prev_table, pointing
  92. * to next_table
  93. */
  94. static inline void chain_table(struct bd_table *prev_table,
  95. struct bd_table *next_table,
  96. u32 bd_p_tab)
  97. {
  98. /* Chain the prev table to next table */
  99. prev_table->start_bd[bd_p_tab-1].offset[0] =
  100. cpu_to_le32(lower_32_bits(next_table->dma));
  101. prev_table->start_bd[bd_p_tab-1].offset[1] =
  102. cpu_to_le32(upper_32_bits(next_table->dma));
  103. prev_table->start_bd[bd_p_tab-1].offset[2] =
  104. 0x0;
  105. prev_table->start_bd[bd_p_tab-1].offset[3] =
  106. cpu_to_le32(MARK_CHAIN_BD);
  107. }
  108. /* Allocate the bdl for ep, during config ep */
  109. static int ep_bd_list_alloc(struct bdc_ep *ep)
  110. {
  111. struct bd_table *prev_table = NULL;
  112. int index, num_tabs, bd_p_tab;
  113. struct bdc *bdc = ep->bdc;
  114. struct bd_table *bd_table;
  115. dma_addr_t dma;
  116. if (usb_endpoint_xfer_isoc(ep->desc))
  117. num_tabs = NUM_TABLES_ISOCH;
  118. else
  119. num_tabs = NUM_TABLES;
  120. bd_p_tab = NUM_BDS_PER_TABLE;
  121. /* if there is only 1 table in bd list then loop chain to self */
  122. dev_dbg(bdc->dev,
  123. "%s ep:%p num_tabs:%d\n",
  124. __func__, ep, num_tabs);
  125. /* Allocate memory for table array */
  126. ep->bd_list.bd_table_array = kcalloc(num_tabs,
  127. sizeof(struct bd_table *),
  128. GFP_ATOMIC);
  129. if (!ep->bd_list.bd_table_array)
  130. return -ENOMEM;
  131. /* Allocate memory for each table */
  132. for (index = 0; index < num_tabs; index++) {
  133. /* Allocate memory for bd_table structure */
  134. bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
  135. if (!bd_table)
  136. goto fail;
  137. bd_table->start_bd = dma_pool_zalloc(bdc->bd_table_pool,
  138. GFP_ATOMIC,
  139. &dma);
  140. if (!bd_table->start_bd) {
  141. kfree(bd_table);
  142. goto fail;
  143. }
  144. bd_table->dma = dma;
  145. dev_dbg(bdc->dev,
  146. "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
  147. index, bd_table->start_bd,
  148. (unsigned long long)bd_table->dma, prev_table);
  149. ep->bd_list.bd_table_array[index] = bd_table;
  150. if (prev_table)
  151. chain_table(prev_table, bd_table, bd_p_tab);
  152. prev_table = bd_table;
  153. }
  154. chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
  155. /* Memory allocation is successful, now init the internal fields */
  156. ep->bd_list.num_tabs = num_tabs;
  157. ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
  158. ep->bd_list.num_tabs = num_tabs;
  159. ep->bd_list.num_bds_table = bd_p_tab;
  160. ep->bd_list.eqp_bdi = 0;
  161. ep->bd_list.hwd_bdi = 0;
  162. return 0;
  163. fail:
  164. /* Free the bd_table_array, bd_table struct, bd's */
  165. ep_bd_list_free(ep, num_tabs);
  166. return -ENOMEM;
  167. }
  168. /* returns how many bd's are need for this transfer */
  169. static inline int bd_needed_req(struct bdc_req *req)
  170. {
  171. int bd_needed = 0;
  172. int remaining;
  173. /* 1 bd needed for 0 byte transfer */
  174. if (req->usb_req.length == 0)
  175. return 1;
  176. /* remaining bytes after tranfering all max BD size BD's */
  177. remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
  178. if (remaining)
  179. bd_needed++;
  180. /* How many maximum BUFF size BD's ? */
  181. remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
  182. bd_needed += remaining;
  183. return bd_needed;
  184. }
  185. /* returns the bd index(bdi) corresponding to bd dma address */
  186. static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
  187. {
  188. struct bd_list *bd_list = &ep->bd_list;
  189. dma_addr_t dma_first_bd, dma_last_bd;
  190. struct bdc *bdc = ep->bdc;
  191. struct bd_table *bd_table;
  192. bool found = false;
  193. int tbi, bdi;
  194. dma_first_bd = dma_last_bd = 0;
  195. dev_dbg(bdc->dev, "%s %llx\n",
  196. __func__, (unsigned long long)bd_dma_addr);
  197. /*
  198. * Find in which table this bd_dma_addr belongs?, go through the table
  199. * array and compare addresses of first and last address of bd of each
  200. * table
  201. */
  202. for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
  203. bd_table = bd_list->bd_table_array[tbi];
  204. dma_first_bd = bd_table->dma;
  205. dma_last_bd = bd_table->dma +
  206. (sizeof(struct bdc_bd) *
  207. (bd_list->num_bds_table - 1));
  208. dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
  209. (unsigned long long)dma_first_bd,
  210. (unsigned long long)dma_last_bd);
  211. if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
  212. found = true;
  213. break;
  214. }
  215. }
  216. if (unlikely(!found)) {
  217. dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
  218. return -EINVAL;
  219. }
  220. /* Now we know the table, find the bdi */
  221. bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
  222. /* return the global bdi, to compare with ep eqp_bdi */
  223. return (bdi + (tbi * bd_list->num_bds_table));
  224. }
  225. /* returns the table index(tbi) of the given bdi */
  226. static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
  227. {
  228. int tbi;
  229. tbi = bdi / ep->bd_list.num_bds_table;
  230. dev_vdbg(ep->bdc->dev,
  231. "bdi:%d num_bds_table:%d tbi:%d\n",
  232. bdi, ep->bd_list.num_bds_table, tbi);
  233. return tbi;
  234. }
  235. /* Find the bdi last bd in the transfer */
  236. static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
  237. {
  238. int end_bdi;
  239. end_bdi = next_hwd_bdi - 1;
  240. if (end_bdi < 0)
  241. end_bdi = ep->bd_list.max_bdi - 1;
  242. else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
  243. end_bdi--;
  244. return end_bdi;
  245. }
  246. /*
  247. * How many transfer bd's are available on this ep bdl, chain bds are not
  248. * counted in available bds
  249. */
  250. static int bd_available_ep(struct bdc_ep *ep)
  251. {
  252. struct bd_list *bd_list = &ep->bd_list;
  253. int available1, available2;
  254. struct bdc *bdc = ep->bdc;
  255. int chain_bd1, chain_bd2;
  256. int available_bd = 0;
  257. available1 = available2 = chain_bd1 = chain_bd2 = 0;
  258. /* if empty then we have all bd's available - number of chain bd's */
  259. if (bd_list->eqp_bdi == bd_list->hwd_bdi)
  260. return bd_list->max_bdi - bd_list->num_tabs;
  261. /*
  262. * Depending upon where eqp and dqp pointers are, caculate number
  263. * of avaialble bd's
  264. */
  265. if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
  266. /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
  267. available1 = bd_list->max_bdi - bd_list->eqp_bdi;
  268. available2 = bd_list->hwd_bdi;
  269. chain_bd1 = available1 / bd_list->num_bds_table;
  270. chain_bd2 = available2 / bd_list->num_bds_table;
  271. dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
  272. chain_bd1, chain_bd2);
  273. available_bd = available1 + available2 - chain_bd1 - chain_bd2;
  274. } else {
  275. /* available bd's are from eqp..dqp - number of chain bd's */
  276. available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
  277. /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
  278. if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
  279. <= bd_list->num_bds_table) {
  280. /* If there any chain bd in between */
  281. if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
  282. == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
  283. available_bd = available1 - 1;
  284. }
  285. } else {
  286. chain_bd1 = available1 / bd_list->num_bds_table;
  287. available_bd = available1 - chain_bd1;
  288. }
  289. }
  290. /*
  291. * we need to keep one extra bd to check if ring is full or empty so
  292. * reduce by 1
  293. */
  294. available_bd--;
  295. dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
  296. return available_bd;
  297. }
  298. /* Notify the hardware after queueing the bd to bdl */
  299. void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
  300. {
  301. struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
  302. dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
  303. /*
  304. * We don't have anyway to check if ep state is running,
  305. * except the software flags.
  306. */
  307. if (unlikely(ep->flags & BDC_EP_STOP))
  308. ep->flags &= ~BDC_EP_STOP;
  309. bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
  310. }
  311. /* returns the bd corresponding to bdi */
  312. static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
  313. {
  314. int tbi = bdi_to_tbi(ep, bdi);
  315. int local_bdi = 0;
  316. local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
  317. dev_vdbg(ep->bdc->dev,
  318. "%s bdi:%d local_bdi:%d\n",
  319. __func__, bdi, local_bdi);
  320. return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
  321. }
  322. /* Advance the enqueue pointer */
  323. static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
  324. {
  325. ep->bd_list.eqp_bdi++;
  326. /* if it's chain bd, then move to next */
  327. if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
  328. ep->bd_list.eqp_bdi++;
  329. /* if the eqp is pointing to last + 1 then move back to 0 */
  330. if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
  331. ep->bd_list.eqp_bdi = 0;
  332. }
  333. /* Setup the first bd for ep0 transfer */
  334. static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
  335. {
  336. u16 wValue;
  337. u32 req_len;
  338. req->ep->dir = 0;
  339. req_len = req->usb_req.length;
  340. switch (bdc->ep0_state) {
  341. case WAIT_FOR_DATA_START:
  342. *dword3 |= BD_TYPE_DS;
  343. if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
  344. *dword3 |= BD_DIR_IN;
  345. /* check if zlp will be needed */
  346. wValue = le16_to_cpu(bdc->setup_pkt.wValue);
  347. if ((wValue > req_len) &&
  348. (req_len % bdc->gadget.ep0->maxpacket == 0)) {
  349. dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
  350. wValue, req_len,
  351. bdc->gadget.ep0->maxpacket);
  352. bdc->zlp_needed = true;
  353. }
  354. break;
  355. case WAIT_FOR_STATUS_START:
  356. *dword3 |= BD_TYPE_SS;
  357. if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
  358. !(bdc->setup_pkt.bRequestType & USB_DIR_IN))
  359. *dword3 |= BD_DIR_IN;
  360. break;
  361. default:
  362. dev_err(bdc->dev,
  363. "Unknown ep0 state for queueing bd ep0_state:%s\n",
  364. ep0_state_string[bdc->ep0_state]);
  365. return -EINVAL;
  366. }
  367. return 0;
  368. }
  369. /* Setup the bd dma descriptor for a given request */
  370. static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
  371. {
  372. dma_addr_t buf_add = req->usb_req.dma;
  373. u32 maxp, tfs, dword2, dword3;
  374. struct bd_transfer *bd_xfr;
  375. struct bd_list *bd_list;
  376. struct bdc_ep *ep;
  377. struct bdc_bd *bd;
  378. int ret, bdnum;
  379. u32 req_len;
  380. ep = req->ep;
  381. bd_list = &ep->bd_list;
  382. bd_xfr = &req->bd_xfr;
  383. bd_xfr->req = req;
  384. bd_xfr->start_bdi = bd_list->eqp_bdi;
  385. bd = bdi_to_bd(ep, bd_list->eqp_bdi);
  386. req_len = req->usb_req.length;
  387. maxp = usb_endpoint_maxp(ep->desc);
  388. tfs = roundup(req->usb_req.length, maxp);
  389. tfs = tfs/maxp;
  390. dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
  391. __func__, ep->name, num_bds, tfs, req_len, bd);
  392. for (bdnum = 0; bdnum < num_bds; bdnum++) {
  393. dword2 = dword3 = 0;
  394. /* First bd */
  395. if (!bdnum) {
  396. dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
  397. dword2 |= BD_LTF;
  398. /* format of first bd for ep0 is different than other */
  399. if (ep->ep_num == 1) {
  400. ret = setup_first_bd_ep0(bdc, req, &dword3);
  401. if (ret)
  402. return ret;
  403. }
  404. }
  405. if (!req->ep->dir)
  406. dword3 |= BD_ISP;
  407. if (req_len > BD_MAX_BUFF_SIZE) {
  408. dword2 |= BD_MAX_BUFF_SIZE;
  409. req_len -= BD_MAX_BUFF_SIZE;
  410. } else {
  411. /* this should be the last bd */
  412. dword2 |= req_len;
  413. dword3 |= BD_IOC;
  414. dword3 |= BD_EOT;
  415. }
  416. /* Currently only 1 INT target is supported */
  417. dword2 |= BD_INTR_TARGET(0);
  418. bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
  419. if (unlikely(!bd)) {
  420. dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
  421. return -EINVAL;
  422. }
  423. /* write bd */
  424. bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
  425. bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
  426. bd->offset[2] = cpu_to_le32(dword2);
  427. bd->offset[3] = cpu_to_le32(dword3);
  428. /* advance eqp pointer */
  429. ep_bdlist_eqp_adv(ep);
  430. /* advance the buff pointer */
  431. buf_add += BD_MAX_BUFF_SIZE;
  432. dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
  433. (unsigned long long)buf_add, req_len, bd,
  434. ep->bd_list.eqp_bdi);
  435. bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
  436. bd->offset[3] = cpu_to_le32(BD_SBF);
  437. }
  438. /* clear the STOP BD fetch bit from the first bd of this xfr */
  439. bd = bdi_to_bd(ep, bd_xfr->start_bdi);
  440. bd->offset[3] &= cpu_to_le32(~BD_SBF);
  441. /* the new eqp will be next hw dqp */
  442. bd_xfr->num_bds = num_bds;
  443. bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
  444. /* everything is written correctly before notifying the HW */
  445. wmb();
  446. return 0;
  447. }
  448. /* Queue the xfr */
  449. static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
  450. {
  451. int num_bds, bd_available;
  452. struct bdc_ep *ep;
  453. int ret;
  454. ep = req->ep;
  455. dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
  456. dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
  457. ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
  458. num_bds = bd_needed_req(req);
  459. bd_available = bd_available_ep(ep);
  460. /* how many bd's are avaialble on ep */
  461. if (num_bds > bd_available)
  462. return -ENOMEM;
  463. ret = setup_bd_list_xfr(bdc, req, num_bds);
  464. if (ret)
  465. return ret;
  466. list_add_tail(&req->queue, &ep->queue);
  467. bdc_dbg_bd_list(bdc, ep);
  468. bdc_notify_xfr(bdc, ep->ep_num);
  469. return 0;
  470. }
  471. /* callback to gadget layer when xfr completes */
  472. static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
  473. int status)
  474. {
  475. struct bdc *bdc = ep->bdc;
  476. if (req == NULL)
  477. return;
  478. dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
  479. list_del(&req->queue);
  480. req->usb_req.status = status;
  481. usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
  482. if (req->usb_req.complete) {
  483. spin_unlock(&bdc->lock);
  484. usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
  485. spin_lock(&bdc->lock);
  486. }
  487. }
  488. /* Disable the endpoint */
  489. int bdc_ep_disable(struct bdc_ep *ep)
  490. {
  491. struct bdc_req *req;
  492. struct bdc *bdc;
  493. int ret;
  494. ret = 0;
  495. bdc = ep->bdc;
  496. dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
  497. /* Stop the endpoint */
  498. ret = bdc_stop_ep(bdc, ep->ep_num);
  499. /*
  500. * Intentionally don't check the ret value of stop, it can fail in
  501. * disconnect scenarios, continue with dconfig
  502. */
  503. /* de-queue any pending requests */
  504. while (!list_empty(&ep->queue)) {
  505. req = list_entry(ep->queue.next, struct bdc_req,
  506. queue);
  507. bdc_req_complete(ep, req, -ESHUTDOWN);
  508. }
  509. /* deconfigure the endpoint */
  510. ret = bdc_dconfig_ep(bdc, ep);
  511. if (ret)
  512. dev_warn(bdc->dev,
  513. "dconfig fail but continue with memory free");
  514. ep->flags = 0;
  515. /* ep0 memory is not freed, but reused on next connect sr */
  516. if (ep->ep_num == 1)
  517. return 0;
  518. /* Free the bdl memory */
  519. ep_bd_list_free(ep, ep->bd_list.num_tabs);
  520. ep->desc = NULL;
  521. ep->comp_desc = NULL;
  522. ep->usb_ep.desc = NULL;
  523. ep->ep_type = 0;
  524. return ret;
  525. }
  526. /* Enable the ep */
  527. int bdc_ep_enable(struct bdc_ep *ep)
  528. {
  529. struct bdc *bdc;
  530. int ret = 0;
  531. bdc = ep->bdc;
  532. dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
  533. __func__, NUM_TABLES, NUM_TABLES_ISOCH);
  534. ret = ep_bd_list_alloc(ep);
  535. if (ret) {
  536. dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
  537. return -ENOMEM;
  538. }
  539. bdc_dbg_bd_list(bdc, ep);
  540. /* only for ep0: config ep is called for ep0 from connect event */
  541. if (ep->ep_num == 1)
  542. return ret;
  543. /* Issue a configure endpoint command */
  544. ret = bdc_config_ep(bdc, ep);
  545. if (ret)
  546. return ret;
  547. ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
  548. ep->usb_ep.desc = ep->desc;
  549. ep->usb_ep.comp_desc = ep->comp_desc;
  550. ep->ep_type = usb_endpoint_type(ep->desc);
  551. ep->flags |= BDC_EP_ENABLED;
  552. return 0;
  553. }
  554. /* EP0 related code */
  555. /* Queue a status stage BD */
  556. static int ep0_queue_status_stage(struct bdc *bdc)
  557. {
  558. struct bdc_req *status_req;
  559. struct bdc_ep *ep;
  560. status_req = &bdc->status_req;
  561. ep = bdc->bdc_ep_array[1];
  562. status_req->ep = ep;
  563. status_req->usb_req.length = 0;
  564. status_req->usb_req.status = -EINPROGRESS;
  565. status_req->usb_req.actual = 0;
  566. status_req->usb_req.complete = NULL;
  567. bdc_queue_xfr(bdc, status_req);
  568. return 0;
  569. }
  570. /* Queue xfr on ep0 */
  571. static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
  572. {
  573. struct bdc *bdc;
  574. int ret;
  575. bdc = ep->bdc;
  576. dev_dbg(bdc->dev, "%s()\n", __func__);
  577. req->usb_req.actual = 0;
  578. req->usb_req.status = -EINPROGRESS;
  579. req->epnum = ep->ep_num;
  580. if (bdc->delayed_status) {
  581. bdc->delayed_status = false;
  582. /* if status stage was delayed? */
  583. if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
  584. /* Queue a status stage BD */
  585. ep0_queue_status_stage(bdc);
  586. bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
  587. return 0;
  588. }
  589. } else {
  590. /*
  591. * if delayed status is false and 0 length transfer is requested
  592. * i.e. for status stage of some setup request, then just
  593. * return from here the status stage is queued independently
  594. */
  595. if (req->usb_req.length == 0)
  596. return 0;
  597. }
  598. ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
  599. if (ret) {
  600. dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
  601. return ret;
  602. }
  603. return bdc_queue_xfr(bdc, req);
  604. }
  605. /* Queue data stage */
  606. static int ep0_queue_data_stage(struct bdc *bdc)
  607. {
  608. struct bdc_ep *ep;
  609. dev_dbg(bdc->dev, "%s\n", __func__);
  610. ep = bdc->bdc_ep_array[1];
  611. bdc->ep0_req.ep = ep;
  612. bdc->ep0_req.usb_req.complete = NULL;
  613. return ep0_queue(ep, &bdc->ep0_req);
  614. }
  615. /* Queue req on ep */
  616. static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
  617. {
  618. struct bdc *bdc;
  619. int ret = 0;
  620. if (!req || !ep->usb_ep.desc)
  621. return -EINVAL;
  622. bdc = ep->bdc;
  623. req->usb_req.actual = 0;
  624. req->usb_req.status = -EINPROGRESS;
  625. req->epnum = ep->ep_num;
  626. ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
  627. if (ret) {
  628. dev_err(bdc->dev, "dma mapping failed\n");
  629. return ret;
  630. }
  631. return bdc_queue_xfr(bdc, req);
  632. }
  633. /* Dequeue a request from ep */
  634. static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
  635. {
  636. int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
  637. bool start_pending, end_pending;
  638. bool first_remove = false;
  639. struct bdc_req *first_req;
  640. struct bdc_bd *bd_start;
  641. struct bd_table *table;
  642. dma_addr_t next_bd_dma;
  643. u64 deq_ptr_64 = 0;
  644. struct bdc *bdc;
  645. u32 tmp_32;
  646. int ret;
  647. bdc = ep->bdc;
  648. start_pending = end_pending = false;
  649. eqp_bdi = ep->bd_list.eqp_bdi - 1;
  650. if (eqp_bdi < 0)
  651. eqp_bdi = ep->bd_list.max_bdi;
  652. start_bdi = req->bd_xfr.start_bdi;
  653. end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
  654. dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
  655. __func__, ep->name, start_bdi, end_bdi);
  656. dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
  657. ep, (void *)ep->usb_ep.desc);
  658. /* if still connected, stop the ep to see where the HW is ? */
  659. if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
  660. ret = bdc_stop_ep(bdc, ep->ep_num);
  661. /* if there is an issue, then no need to go further */
  662. if (ret)
  663. return 0;
  664. } else
  665. return 0;
  666. /*
  667. * After endpoint is stopped, there can be 3 cases, the request
  668. * is processed, pending or in the middle of processing
  669. */
  670. /* The current hw dequeue pointer */
  671. tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0);
  672. deq_ptr_64 = tmp_32;
  673. tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1);
  674. deq_ptr_64 |= ((u64)tmp_32 << 32);
  675. /* we have the dma addr of next bd that will be fetched by hardware */
  676. curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
  677. if (curr_hw_dqpi < 0)
  678. return curr_hw_dqpi;
  679. /*
  680. * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
  681. * curr_hw_dqbdi..eqp_bdi.
  682. */
  683. /* Check if start_bdi and end_bdi are in range of HW owned BD's */
  684. if (curr_hw_dqpi > eqp_bdi) {
  685. /* there is a wrap from last to 0 */
  686. if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
  687. start_pending = true;
  688. end_pending = true;
  689. } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
  690. end_pending = true;
  691. }
  692. } else {
  693. if (start_bdi >= curr_hw_dqpi) {
  694. start_pending = true;
  695. end_pending = true;
  696. } else if (end_bdi >= curr_hw_dqpi) {
  697. end_pending = true;
  698. }
  699. }
  700. dev_dbg(bdc->dev,
  701. "start_pending:%d end_pending:%d speed:%d\n",
  702. start_pending, end_pending, bdc->gadget.speed);
  703. /* If both start till end are processes, we cannot deq req */
  704. if (!start_pending && !end_pending)
  705. return -EINVAL;
  706. /*
  707. * if ep_dequeue is called after disconnect then just return
  708. * success from here
  709. */
  710. if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
  711. return 0;
  712. tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
  713. table = ep->bd_list.bd_table_array[tbi];
  714. next_bd_dma = table->dma +
  715. sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
  716. tbi * ep->bd_list.num_bds_table);
  717. first_req = list_first_entry(&ep->queue, struct bdc_req,
  718. queue);
  719. if (req == first_req)
  720. first_remove = true;
  721. /*
  722. * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
  723. * incase if start is pending this is the first request in the list
  724. * then issue ep_bla instead of marking as chain bd
  725. */
  726. if (start_pending && !first_remove) {
  727. /*
  728. * Mark the start bd as Chain bd, and point the chain
  729. * bd to next_bd_dma
  730. */
  731. bd_start = bdi_to_bd(ep, start_bdi);
  732. bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
  733. bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
  734. bd_start->offset[2] = 0x0;
  735. bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
  736. bdc_dbg_bd_list(bdc, ep);
  737. } else if (end_pending) {
  738. /*
  739. * The transfer is stopped in the middle, move the
  740. * HW deq pointer to next_bd_dma
  741. */
  742. ret = bdc_ep_bla(bdc, ep, next_bd_dma);
  743. if (ret) {
  744. dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
  745. return ret;
  746. }
  747. }
  748. return 0;
  749. }
  750. /* Halt/Clear the ep based on value */
  751. static int ep_set_halt(struct bdc_ep *ep, u32 value)
  752. {
  753. struct bdc *bdc;
  754. int ret;
  755. bdc = ep->bdc;
  756. dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
  757. if (value) {
  758. dev_dbg(bdc->dev, "Halt\n");
  759. if (ep->ep_num == 1)
  760. bdc->ep0_state = WAIT_FOR_SETUP;
  761. ret = bdc_ep_set_stall(bdc, ep->ep_num);
  762. if (ret)
  763. dev_err(bdc->dev, "failed to set STALL on %s\n",
  764. ep->name);
  765. else
  766. ep->flags |= BDC_EP_STALL;
  767. } else {
  768. /* Clear */
  769. dev_dbg(bdc->dev, "Before Clear\n");
  770. ret = bdc_ep_clear_stall(bdc, ep->ep_num);
  771. if (ret)
  772. dev_err(bdc->dev, "failed to clear STALL on %s\n",
  773. ep->name);
  774. else
  775. ep->flags &= ~BDC_EP_STALL;
  776. dev_dbg(bdc->dev, "After Clear\n");
  777. }
  778. return ret;
  779. }
  780. /* Free all the ep */
  781. void bdc_free_ep(struct bdc *bdc)
  782. {
  783. struct bdc_ep *ep;
  784. u8 epnum;
  785. dev_dbg(bdc->dev, "%s\n", __func__);
  786. for (epnum = 1; epnum < bdc->num_eps; epnum++) {
  787. ep = bdc->bdc_ep_array[epnum];
  788. if (!ep)
  789. continue;
  790. if (ep->flags & BDC_EP_ENABLED)
  791. ep_bd_list_free(ep, ep->bd_list.num_tabs);
  792. /* ep0 is not in this gadget list */
  793. if (epnum != 1)
  794. list_del(&ep->usb_ep.ep_list);
  795. kfree(ep);
  796. }
  797. }
  798. /* USB2 spec, section 7.1.20 */
  799. static int bdc_set_test_mode(struct bdc *bdc)
  800. {
  801. u32 usb2_pm;
  802. usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
  803. usb2_pm &= ~BDC_PTC_MASK;
  804. dev_dbg(bdc->dev, "%s\n", __func__);
  805. switch (bdc->test_mode) {
  806. case TEST_J:
  807. case TEST_K:
  808. case TEST_SE0_NAK:
  809. case TEST_PACKET:
  810. case TEST_FORCE_EN:
  811. usb2_pm |= bdc->test_mode << 28;
  812. break;
  813. default:
  814. return -EINVAL;
  815. }
  816. dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
  817. bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
  818. return 0;
  819. }
  820. /*
  821. * Helper function to handle Transfer status report with status as either
  822. * success or short
  823. */
  824. static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
  825. struct bdc_sr *sreport)
  826. {
  827. int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
  828. struct bd_list *bd_list = &ep->bd_list;
  829. int actual_length, length_short;
  830. struct bd_transfer *bd_xfr;
  831. struct bdc_bd *short_bd;
  832. struct bdc_req *req;
  833. u64 deq_ptr_64 = 0;
  834. int status = 0;
  835. int sr_status;
  836. u32 tmp_32;
  837. dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
  838. bdc_dbg_srr(bdc, 0);
  839. /* do not process thie sr if ignore flag is set */
  840. if (ep->ignore_next_sr) {
  841. ep->ignore_next_sr = false;
  842. return;
  843. }
  844. if (unlikely(list_empty(&ep->queue))) {
  845. dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
  846. return;
  847. }
  848. req = list_entry(ep->queue.next, struct bdc_req,
  849. queue);
  850. bd_xfr = &req->bd_xfr;
  851. sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
  852. /*
  853. * sr_status is short and this transfer has more than 1 bd then it needs
  854. * special handling, this is only applicable for bulk and ctrl
  855. */
  856. if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
  857. /*
  858. * This is multi bd xfr, lets see which bd
  859. * caused short transfer and how many bytes have been
  860. * transferred so far.
  861. */
  862. tmp_32 = le32_to_cpu(sreport->offset[0]);
  863. deq_ptr_64 = tmp_32;
  864. tmp_32 = le32_to_cpu(sreport->offset[1]);
  865. deq_ptr_64 |= ((u64)tmp_32 << 32);
  866. short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
  867. if (unlikely(short_bdi < 0))
  868. dev_warn(bdc->dev, "bd doesn't exist?\n");
  869. start_bdi = bd_xfr->start_bdi;
  870. /*
  871. * We know the start_bdi and short_bdi, how many xfr
  872. * bds in between
  873. */
  874. if (start_bdi <= short_bdi) {
  875. max_len_bds = short_bdi - start_bdi;
  876. if (max_len_bds <= bd_list->num_bds_table) {
  877. if (!(bdi_to_tbi(ep, start_bdi) ==
  878. bdi_to_tbi(ep, short_bdi)))
  879. max_len_bds--;
  880. } else {
  881. chain_bds = max_len_bds/bd_list->num_bds_table;
  882. max_len_bds -= chain_bds;
  883. }
  884. } else {
  885. /* there is a wrap in the ring within a xfr */
  886. chain_bds = (bd_list->max_bdi - start_bdi)/
  887. bd_list->num_bds_table;
  888. chain_bds += short_bdi/bd_list->num_bds_table;
  889. max_len_bds = bd_list->max_bdi - start_bdi;
  890. max_len_bds += short_bdi;
  891. max_len_bds -= chain_bds;
  892. }
  893. /* max_len_bds is the number of full length bds */
  894. end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
  895. if (!(end_bdi == short_bdi))
  896. ep->ignore_next_sr = true;
  897. actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
  898. short_bd = bdi_to_bd(ep, short_bdi);
  899. /* length queued */
  900. length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
  901. /* actual length trensfered */
  902. length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
  903. actual_length += length_short;
  904. req->usb_req.actual = actual_length;
  905. } else {
  906. req->usb_req.actual = req->usb_req.length -
  907. SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
  908. dev_dbg(bdc->dev,
  909. "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
  910. req->usb_req.length, req->usb_req.actual,
  911. bd_xfr->next_hwd_bdi);
  912. }
  913. /* Update the dequeue pointer */
  914. ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
  915. if (req->usb_req.actual < req->usb_req.length) {
  916. dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
  917. if (req->usb_req.short_not_ok)
  918. status = -EREMOTEIO;
  919. }
  920. bdc_req_complete(ep, bd_xfr->req, status);
  921. }
  922. /* EP0 setup related packet handlers */
  923. /*
  924. * Setup packet received, just store the packet and process on next DS or SS
  925. * started SR
  926. */
  927. void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
  928. {
  929. struct usb_ctrlrequest *setup_pkt;
  930. u32 len;
  931. dev_dbg(bdc->dev,
  932. "%s ep0_state:%s\n",
  933. __func__, ep0_state_string[bdc->ep0_state]);
  934. /* Store received setup packet */
  935. setup_pkt = &bdc->setup_pkt;
  936. memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
  937. len = le16_to_cpu(setup_pkt->wLength);
  938. if (!len)
  939. bdc->ep0_state = WAIT_FOR_STATUS_START;
  940. else
  941. bdc->ep0_state = WAIT_FOR_DATA_START;
  942. dev_dbg(bdc->dev,
  943. "%s exit ep0_state:%s\n",
  944. __func__, ep0_state_string[bdc->ep0_state]);
  945. }
  946. /* Stall ep0 */
  947. static void ep0_stall(struct bdc *bdc)
  948. {
  949. struct bdc_ep *ep = bdc->bdc_ep_array[1];
  950. struct bdc_req *req;
  951. dev_dbg(bdc->dev, "%s\n", __func__);
  952. bdc->delayed_status = false;
  953. ep_set_halt(ep, 1);
  954. /* de-queue any pendig requests */
  955. while (!list_empty(&ep->queue)) {
  956. req = list_entry(ep->queue.next, struct bdc_req,
  957. queue);
  958. bdc_req_complete(ep, req, -ESHUTDOWN);
  959. }
  960. }
  961. /* SET_ADD handlers */
  962. static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
  963. {
  964. enum usb_device_state state = bdc->gadget.state;
  965. int ret = 0;
  966. u32 addr;
  967. addr = le16_to_cpu(ctrl->wValue);
  968. dev_dbg(bdc->dev,
  969. "%s addr:%d dev state:%d\n",
  970. __func__, addr, state);
  971. if (addr > 127)
  972. return -EINVAL;
  973. switch (state) {
  974. case USB_STATE_DEFAULT:
  975. case USB_STATE_ADDRESS:
  976. /* Issue Address device command */
  977. ret = bdc_address_device(bdc, addr);
  978. if (ret)
  979. return ret;
  980. if (addr)
  981. usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
  982. else
  983. usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
  984. bdc->dev_addr = addr;
  985. break;
  986. default:
  987. dev_warn(bdc->dev,
  988. "SET Address in wrong device state %d\n",
  989. state);
  990. ret = -EINVAL;
  991. }
  992. return ret;
  993. }
  994. /* Handler for SET/CLEAR FEATURE requests for device */
  995. static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
  996. u16 wIndex, bool set)
  997. {
  998. enum usb_device_state state = bdc->gadget.state;
  999. u32 usppms = 0;
  1000. dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
  1001. __func__, set, state);
  1002. switch (wValue) {
  1003. case USB_DEVICE_REMOTE_WAKEUP:
  1004. dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
  1005. if (set)
  1006. bdc->devstatus |= REMOTE_WAKE_ENABLE;
  1007. else
  1008. bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
  1009. break;
  1010. case USB_DEVICE_TEST_MODE:
  1011. dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
  1012. if ((wIndex & 0xFF) ||
  1013. (bdc->gadget.speed != USB_SPEED_HIGH) || !set)
  1014. return -EINVAL;
  1015. bdc->test_mode = wIndex >> 8;
  1016. break;
  1017. case USB_DEVICE_U1_ENABLE:
  1018. dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
  1019. if (bdc->gadget.speed != USB_SPEED_SUPER ||
  1020. state != USB_STATE_CONFIGURED)
  1021. return -EINVAL;
  1022. usppms = bdc_readl(bdc->regs, BDC_USPPMS);
  1023. if (set) {
  1024. /* clear previous u1t */
  1025. usppms &= ~BDC_U1T(BDC_U1T_MASK);
  1026. usppms |= BDC_U1T(U1_TIMEOUT);
  1027. usppms |= BDC_U1E | BDC_PORT_W1S;
  1028. bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
  1029. } else {
  1030. usppms &= ~BDC_U1E;
  1031. usppms |= BDC_PORT_W1S;
  1032. bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
  1033. }
  1034. bdc_writel(bdc->regs, BDC_USPPMS, usppms);
  1035. break;
  1036. case USB_DEVICE_U2_ENABLE:
  1037. dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
  1038. if (bdc->gadget.speed != USB_SPEED_SUPER ||
  1039. state != USB_STATE_CONFIGURED)
  1040. return -EINVAL;
  1041. usppms = bdc_readl(bdc->regs, BDC_USPPMS);
  1042. if (set) {
  1043. usppms |= BDC_U2E;
  1044. usppms |= BDC_U2A;
  1045. bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
  1046. } else {
  1047. usppms &= ~BDC_U2E;
  1048. usppms &= ~BDC_U2A;
  1049. bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
  1050. }
  1051. bdc_writel(bdc->regs, BDC_USPPMS, usppms);
  1052. break;
  1053. case USB_DEVICE_LTM_ENABLE:
  1054. dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
  1055. if (bdc->gadget.speed != USB_SPEED_SUPER ||
  1056. state != USB_STATE_CONFIGURED)
  1057. return -EINVAL;
  1058. break;
  1059. default:
  1060. dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
  1061. return -EOPNOTSUPP;
  1062. } /* USB_RECIP_DEVICE end */
  1063. return 0;
  1064. }
  1065. /* SET/CLEAR FEATURE handler */
  1066. static int ep0_handle_feature(struct bdc *bdc,
  1067. struct usb_ctrlrequest *setup_pkt, bool set)
  1068. {
  1069. enum usb_device_state state = bdc->gadget.state;
  1070. struct bdc_ep *ep;
  1071. u16 wValue;
  1072. u16 wIndex;
  1073. int epnum;
  1074. wValue = le16_to_cpu(setup_pkt->wValue);
  1075. wIndex = le16_to_cpu(setup_pkt->wIndex);
  1076. dev_dbg(bdc->dev,
  1077. "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
  1078. __func__, wValue, wIndex, state,
  1079. bdc->gadget.speed, set);
  1080. switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
  1081. case USB_RECIP_DEVICE:
  1082. return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
  1083. case USB_RECIP_INTERFACE:
  1084. dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
  1085. /* USB3 spec, sec 9.4.9 */
  1086. if (wValue != USB_INTRF_FUNC_SUSPEND)
  1087. return -EINVAL;
  1088. /* USB3 spec, Table 9-8 */
  1089. if (set) {
  1090. if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
  1091. dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
  1092. bdc->devstatus |= REMOTE_WAKE_ENABLE;
  1093. } else {
  1094. dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
  1095. bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
  1096. }
  1097. }
  1098. break;
  1099. case USB_RECIP_ENDPOINT:
  1100. dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
  1101. if (wValue != USB_ENDPOINT_HALT)
  1102. return -EINVAL;
  1103. epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1104. if (epnum) {
  1105. if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  1106. epnum = epnum * 2 + 1;
  1107. else
  1108. epnum *= 2;
  1109. } else {
  1110. epnum = 1; /*EP0*/
  1111. }
  1112. /*
  1113. * If CLEAR_FEATURE on ep0 then don't do anything as the stall
  1114. * condition on ep0 has already been cleared when SETUP packet
  1115. * was received.
  1116. */
  1117. if (epnum == 1 && !set) {
  1118. dev_dbg(bdc->dev, "ep0 stall already cleared\n");
  1119. return 0;
  1120. }
  1121. dev_dbg(bdc->dev, "epnum=%d\n", epnum);
  1122. ep = bdc->bdc_ep_array[epnum];
  1123. if (!ep)
  1124. return -EINVAL;
  1125. return ep_set_halt(ep, set);
  1126. default:
  1127. dev_err(bdc->dev, "Unknown recipient\n");
  1128. return -EINVAL;
  1129. }
  1130. return 0;
  1131. }
  1132. /* GET_STATUS request handler */
  1133. static int ep0_handle_status(struct bdc *bdc,
  1134. struct usb_ctrlrequest *setup_pkt)
  1135. {
  1136. enum usb_device_state state = bdc->gadget.state;
  1137. struct bdc_ep *ep;
  1138. u16 usb_status = 0;
  1139. u32 epnum;
  1140. u16 wIndex;
  1141. /* USB2.0 spec sec 9.4.5 */
  1142. if (state == USB_STATE_DEFAULT)
  1143. return -EINVAL;
  1144. wIndex = le16_to_cpu(setup_pkt->wIndex);
  1145. dev_dbg(bdc->dev, "%s\n", __func__);
  1146. usb_status = bdc->devstatus;
  1147. switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
  1148. case USB_RECIP_DEVICE:
  1149. dev_dbg(bdc->dev,
  1150. "USB_RECIP_DEVICE devstatus:%08x\n",
  1151. bdc->devstatus);
  1152. /* USB3 spec, sec 9.4.5 */
  1153. if (bdc->gadget.speed == USB_SPEED_SUPER)
  1154. usb_status &= ~REMOTE_WAKE_ENABLE;
  1155. break;
  1156. case USB_RECIP_INTERFACE:
  1157. dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
  1158. if (bdc->gadget.speed == USB_SPEED_SUPER) {
  1159. /*
  1160. * This should come from func for Func remote wkup
  1161. * usb_status |=1;
  1162. */
  1163. if (bdc->devstatus & REMOTE_WAKE_ENABLE)
  1164. usb_status |= REMOTE_WAKE_ENABLE;
  1165. } else {
  1166. usb_status = 0;
  1167. }
  1168. break;
  1169. case USB_RECIP_ENDPOINT:
  1170. dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
  1171. epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1172. if (epnum) {
  1173. if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  1174. epnum = epnum*2 + 1;
  1175. else
  1176. epnum *= 2;
  1177. } else {
  1178. epnum = 1; /* EP0 */
  1179. }
  1180. ep = bdc->bdc_ep_array[epnum];
  1181. if (!ep) {
  1182. dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
  1183. return -EINVAL;
  1184. }
  1185. if (ep->flags & BDC_EP_STALL)
  1186. usb_status |= 1 << USB_ENDPOINT_HALT;
  1187. break;
  1188. default:
  1189. dev_err(bdc->dev, "Unknown recipient for get_status\n");
  1190. return -EINVAL;
  1191. }
  1192. /* prepare a data stage for GET_STATUS */
  1193. dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
  1194. *(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
  1195. bdc->ep0_req.usb_req.length = 2;
  1196. bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
  1197. ep0_queue_data_stage(bdc);
  1198. return 0;
  1199. }
  1200. static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
  1201. {
  1202. /* ep0_set_sel_cmpl */
  1203. }
  1204. /* Queue data stage to handle 6 byte SET_SEL request */
  1205. static int ep0_set_sel(struct bdc *bdc,
  1206. struct usb_ctrlrequest *setup_pkt)
  1207. {
  1208. struct bdc_ep *ep;
  1209. u16 wLength;
  1210. dev_dbg(bdc->dev, "%s\n", __func__);
  1211. wLength = le16_to_cpu(setup_pkt->wLength);
  1212. if (unlikely(wLength != 6)) {
  1213. dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
  1214. return -EINVAL;
  1215. }
  1216. ep = bdc->bdc_ep_array[1];
  1217. bdc->ep0_req.ep = ep;
  1218. bdc->ep0_req.usb_req.length = 6;
  1219. bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
  1220. bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
  1221. ep0_queue_data_stage(bdc);
  1222. return 0;
  1223. }
  1224. /*
  1225. * Queue a 0 byte bd only if wLength is more than the length and and length is
  1226. * a multiple of MaxPacket then queue 0 byte BD
  1227. */
  1228. static int ep0_queue_zlp(struct bdc *bdc)
  1229. {
  1230. int ret;
  1231. dev_dbg(bdc->dev, "%s\n", __func__);
  1232. bdc->ep0_req.ep = bdc->bdc_ep_array[1];
  1233. bdc->ep0_req.usb_req.length = 0;
  1234. bdc->ep0_req.usb_req.complete = NULL;
  1235. bdc->ep0_state = WAIT_FOR_DATA_START;
  1236. ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
  1237. if (ret) {
  1238. dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
  1239. return ret;
  1240. }
  1241. bdc->ep0_state = WAIT_FOR_DATA_XMIT;
  1242. return 0;
  1243. }
  1244. /* Control request handler */
  1245. static int handle_control_request(struct bdc *bdc)
  1246. {
  1247. enum usb_device_state state = bdc->gadget.state;
  1248. struct usb_ctrlrequest *setup_pkt;
  1249. int delegate_setup = 0;
  1250. int ret = 0;
  1251. int config = 0;
  1252. setup_pkt = &bdc->setup_pkt;
  1253. dev_dbg(bdc->dev, "%s\n", __func__);
  1254. if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1255. switch (setup_pkt->bRequest) {
  1256. case USB_REQ_SET_ADDRESS:
  1257. dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
  1258. ret = ep0_set_address(bdc, setup_pkt);
  1259. bdc->devstatus &= DEVSTATUS_CLEAR;
  1260. break;
  1261. case USB_REQ_SET_CONFIGURATION:
  1262. dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
  1263. if (state == USB_STATE_ADDRESS) {
  1264. usb_gadget_set_state(&bdc->gadget,
  1265. USB_STATE_CONFIGURED);
  1266. } else if (state == USB_STATE_CONFIGURED) {
  1267. /*
  1268. * USB2 spec sec 9.4.7, if wValue is 0 then dev
  1269. * is moved to addressed state
  1270. */
  1271. config = le16_to_cpu(setup_pkt->wValue);
  1272. if (!config)
  1273. usb_gadget_set_state(
  1274. &bdc->gadget,
  1275. USB_STATE_ADDRESS);
  1276. }
  1277. delegate_setup = 1;
  1278. break;
  1279. case USB_REQ_SET_FEATURE:
  1280. dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
  1281. ret = ep0_handle_feature(bdc, setup_pkt, 1);
  1282. break;
  1283. case USB_REQ_CLEAR_FEATURE:
  1284. dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
  1285. ret = ep0_handle_feature(bdc, setup_pkt, 0);
  1286. break;
  1287. case USB_REQ_GET_STATUS:
  1288. dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
  1289. ret = ep0_handle_status(bdc, setup_pkt);
  1290. break;
  1291. case USB_REQ_SET_SEL:
  1292. dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
  1293. ret = ep0_set_sel(bdc, setup_pkt);
  1294. break;
  1295. case USB_REQ_SET_ISOCH_DELAY:
  1296. dev_warn(bdc->dev,
  1297. "USB_REQ_SET_ISOCH_DELAY not handled\n");
  1298. ret = 0;
  1299. break;
  1300. default:
  1301. delegate_setup = 1;
  1302. }
  1303. } else {
  1304. delegate_setup = 1;
  1305. }
  1306. if (delegate_setup) {
  1307. spin_unlock(&bdc->lock);
  1308. ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
  1309. spin_lock(&bdc->lock);
  1310. }
  1311. return ret;
  1312. }
  1313. /* EP0: Data stage started */
  1314. void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
  1315. {
  1316. struct bdc_ep *ep;
  1317. int ret = 0;
  1318. dev_dbg(bdc->dev, "%s\n", __func__);
  1319. ep = bdc->bdc_ep_array[1];
  1320. /* If ep0 was stalled, the clear it first */
  1321. if (ep->flags & BDC_EP_STALL) {
  1322. ret = ep_set_halt(ep, 0);
  1323. if (ret)
  1324. goto err;
  1325. }
  1326. if (bdc->ep0_state != WAIT_FOR_DATA_START)
  1327. dev_warn(bdc->dev,
  1328. "Data stage not expected ep0_state:%s\n",
  1329. ep0_state_string[bdc->ep0_state]);
  1330. ret = handle_control_request(bdc);
  1331. if (ret == USB_GADGET_DELAYED_STATUS) {
  1332. /*
  1333. * The ep0 state will remain WAIT_FOR_DATA_START till
  1334. * we received ep_queue on ep0
  1335. */
  1336. bdc->delayed_status = true;
  1337. return;
  1338. }
  1339. if (!ret) {
  1340. bdc->ep0_state = WAIT_FOR_DATA_XMIT;
  1341. dev_dbg(bdc->dev,
  1342. "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
  1343. return;
  1344. }
  1345. err:
  1346. ep0_stall(bdc);
  1347. }
  1348. /* EP0: status stage started */
  1349. void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
  1350. {
  1351. struct usb_ctrlrequest *setup_pkt;
  1352. struct bdc_ep *ep;
  1353. int ret = 0;
  1354. dev_dbg(bdc->dev,
  1355. "%s ep0_state:%s",
  1356. __func__, ep0_state_string[bdc->ep0_state]);
  1357. ep = bdc->bdc_ep_array[1];
  1358. /* check if ZLP was queued? */
  1359. if (bdc->zlp_needed)
  1360. bdc->zlp_needed = false;
  1361. if (ep->flags & BDC_EP_STALL) {
  1362. ret = ep_set_halt(ep, 0);
  1363. if (ret)
  1364. goto err;
  1365. }
  1366. if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
  1367. (bdc->ep0_state != WAIT_FOR_DATA_XMIT))
  1368. dev_err(bdc->dev,
  1369. "Status stage recv but ep0_state:%s\n",
  1370. ep0_state_string[bdc->ep0_state]);
  1371. /* check if data stage is in progress ? */
  1372. if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
  1373. bdc->ep0_state = STATUS_PENDING;
  1374. /* Status stage will be queued upon Data stage transmit event */
  1375. dev_dbg(bdc->dev,
  1376. "status started but data not transmitted yet\n");
  1377. return;
  1378. }
  1379. setup_pkt = &bdc->setup_pkt;
  1380. /*
  1381. * 2 stage setup then only process the setup, for 3 stage setup the date
  1382. * stage is already handled
  1383. */
  1384. if (!le16_to_cpu(setup_pkt->wLength)) {
  1385. ret = handle_control_request(bdc);
  1386. if (ret == USB_GADGET_DELAYED_STATUS) {
  1387. bdc->delayed_status = true;
  1388. /* ep0_state will remain WAIT_FOR_STATUS_START */
  1389. return;
  1390. }
  1391. }
  1392. if (!ret) {
  1393. /* Queue a status stage BD */
  1394. ep0_queue_status_stage(bdc);
  1395. bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
  1396. dev_dbg(bdc->dev,
  1397. "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
  1398. return;
  1399. }
  1400. err:
  1401. ep0_stall(bdc);
  1402. }
  1403. /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
  1404. static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
  1405. {
  1406. dev_dbg(bdc->dev, "%s\n", __func__);
  1407. switch (bdc->ep0_state) {
  1408. case WAIT_FOR_DATA_XMIT:
  1409. bdc->ep0_state = WAIT_FOR_STATUS_START;
  1410. break;
  1411. case WAIT_FOR_STATUS_XMIT:
  1412. bdc->ep0_state = WAIT_FOR_SETUP;
  1413. if (bdc->test_mode) {
  1414. int ret;
  1415. dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
  1416. ret = bdc_set_test_mode(bdc);
  1417. if (ret < 0) {
  1418. dev_err(bdc->dev, "Err in setting Test mode\n");
  1419. return;
  1420. }
  1421. bdc->test_mode = 0;
  1422. }
  1423. break;
  1424. case STATUS_PENDING:
  1425. bdc_xsf_ep0_status_start(bdc, sreport);
  1426. break;
  1427. default:
  1428. dev_err(bdc->dev,
  1429. "Unknown ep0_state:%s\n",
  1430. ep0_state_string[bdc->ep0_state]);
  1431. }
  1432. }
  1433. /* xfr completion status report handler */
  1434. void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
  1435. {
  1436. struct bdc_ep *ep;
  1437. u32 sr_status;
  1438. u8 ep_num;
  1439. ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
  1440. ep = bdc->bdc_ep_array[ep_num];
  1441. if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
  1442. dev_err(bdc->dev, "xsf for ep not enabled\n");
  1443. return;
  1444. }
  1445. /*
  1446. * check if this transfer is after link went from U3->U0 due
  1447. * to remote wakeup
  1448. */
  1449. if (bdc->devstatus & FUNC_WAKE_ISSUED) {
  1450. bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
  1451. dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
  1452. __func__);
  1453. }
  1454. sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
  1455. dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
  1456. __func__, sr_status, ep->name);
  1457. switch (sr_status) {
  1458. case XSF_SUCC:
  1459. case XSF_SHORT:
  1460. handle_xsr_succ_status(bdc, ep, sreport);
  1461. if (ep_num == 1)
  1462. ep0_xsf_complete(bdc, sreport);
  1463. break;
  1464. case XSF_SETUP_RECV:
  1465. case XSF_DATA_START:
  1466. case XSF_STATUS_START:
  1467. if (ep_num != 1) {
  1468. dev_err(bdc->dev,
  1469. "ep0 related packets on non ep0 endpoint");
  1470. return;
  1471. }
  1472. bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
  1473. break;
  1474. case XSF_BABB:
  1475. if (ep_num == 1) {
  1476. dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
  1477. bdc->zlp_needed);
  1478. /*
  1479. * If the last completed transfer had wLength >Data Len,
  1480. * and Len is multiple of MaxPacket,then queue ZLP
  1481. */
  1482. if (bdc->zlp_needed) {
  1483. /* queue 0 length bd */
  1484. ep0_queue_zlp(bdc);
  1485. return;
  1486. }
  1487. }
  1488. dev_warn(bdc->dev, "Babble on ep not handled\n");
  1489. break;
  1490. default:
  1491. dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
  1492. break;
  1493. }
  1494. }
  1495. static int bdc_gadget_ep_queue(struct usb_ep *_ep,
  1496. struct usb_request *_req, gfp_t gfp_flags)
  1497. {
  1498. struct bdc_req *req;
  1499. unsigned long flags;
  1500. struct bdc_ep *ep;
  1501. struct bdc *bdc;
  1502. int ret;
  1503. if (!_ep || !_ep->desc)
  1504. return -ESHUTDOWN;
  1505. if (!_req || !_req->complete || !_req->buf)
  1506. return -EINVAL;
  1507. ep = to_bdc_ep(_ep);
  1508. req = to_bdc_req(_req);
  1509. bdc = ep->bdc;
  1510. dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
  1511. dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
  1512. _req, ep->name, _req->length, _req->zero);
  1513. if (!ep->usb_ep.desc) {
  1514. dev_warn(bdc->dev,
  1515. "trying to queue req %p to disabled %s\n",
  1516. _req, ep->name);
  1517. return -ESHUTDOWN;
  1518. }
  1519. if (_req->length > MAX_XFR_LEN) {
  1520. dev_warn(bdc->dev,
  1521. "req length > supported MAX:%d requested:%d\n",
  1522. MAX_XFR_LEN, _req->length);
  1523. return -EOPNOTSUPP;
  1524. }
  1525. spin_lock_irqsave(&bdc->lock, flags);
  1526. if (ep == bdc->bdc_ep_array[1])
  1527. ret = ep0_queue(ep, req);
  1528. else
  1529. ret = ep_queue(ep, req);
  1530. spin_unlock_irqrestore(&bdc->lock, flags);
  1531. return ret;
  1532. }
  1533. static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
  1534. struct usb_request *_req)
  1535. {
  1536. struct bdc_req *req;
  1537. unsigned long flags;
  1538. struct bdc_ep *ep;
  1539. struct bdc *bdc;
  1540. int ret;
  1541. if (!_ep || !_req)
  1542. return -EINVAL;
  1543. ep = to_bdc_ep(_ep);
  1544. req = to_bdc_req(_req);
  1545. bdc = ep->bdc;
  1546. dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
  1547. bdc_dbg_bd_list(bdc, ep);
  1548. spin_lock_irqsave(&bdc->lock, flags);
  1549. /* make sure it's still queued on this endpoint */
  1550. list_for_each_entry(req, &ep->queue, queue) {
  1551. if (&req->usb_req == _req)
  1552. break;
  1553. }
  1554. if (&req->usb_req != _req) {
  1555. spin_unlock_irqrestore(&bdc->lock, flags);
  1556. dev_err(bdc->dev, "usb_req !=req n");
  1557. return -EINVAL;
  1558. }
  1559. ret = ep_dequeue(ep, req);
  1560. if (ret) {
  1561. ret = -EOPNOTSUPP;
  1562. goto err;
  1563. }
  1564. bdc_req_complete(ep, req, -ECONNRESET);
  1565. err:
  1566. bdc_dbg_bd_list(bdc, ep);
  1567. spin_unlock_irqrestore(&bdc->lock, flags);
  1568. return ret;
  1569. }
  1570. static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
  1571. {
  1572. unsigned long flags;
  1573. struct bdc_ep *ep;
  1574. struct bdc *bdc;
  1575. int ret;
  1576. ep = to_bdc_ep(_ep);
  1577. bdc = ep->bdc;
  1578. dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
  1579. spin_lock_irqsave(&bdc->lock, flags);
  1580. if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
  1581. ret = -EINVAL;
  1582. else if (!list_empty(&ep->queue))
  1583. ret = -EAGAIN;
  1584. else
  1585. ret = ep_set_halt(ep, value);
  1586. spin_unlock_irqrestore(&bdc->lock, flags);
  1587. return ret;
  1588. }
  1589. static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
  1590. gfp_t gfp_flags)
  1591. {
  1592. struct bdc_req *req;
  1593. struct bdc_ep *ep;
  1594. req = kzalloc(sizeof(*req), gfp_flags);
  1595. if (!req)
  1596. return NULL;
  1597. ep = to_bdc_ep(_ep);
  1598. req->ep = ep;
  1599. req->epnum = ep->ep_num;
  1600. req->usb_req.dma = DMA_ADDR_INVALID;
  1601. dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
  1602. return &req->usb_req;
  1603. }
  1604. static void bdc_gadget_free_request(struct usb_ep *_ep,
  1605. struct usb_request *_req)
  1606. {
  1607. struct bdc_req *req;
  1608. req = to_bdc_req(_req);
  1609. kfree(req);
  1610. }
  1611. /* endpoint operations */
  1612. /* configure endpoint and also allocate resources */
  1613. static int bdc_gadget_ep_enable(struct usb_ep *_ep,
  1614. const struct usb_endpoint_descriptor *desc)
  1615. {
  1616. unsigned long flags;
  1617. struct bdc_ep *ep;
  1618. struct bdc *bdc;
  1619. int ret;
  1620. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
  1621. pr_debug("bdc_gadget_ep_enable invalid parameters\n");
  1622. return -EINVAL;
  1623. }
  1624. if (!desc->wMaxPacketSize) {
  1625. pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
  1626. return -EINVAL;
  1627. }
  1628. ep = to_bdc_ep(_ep);
  1629. bdc = ep->bdc;
  1630. /* Sanity check, upper layer will not send enable for ep0 */
  1631. if (ep == bdc->bdc_ep_array[1])
  1632. return -EINVAL;
  1633. if (!bdc->gadget_driver
  1634. || bdc->gadget.speed == USB_SPEED_UNKNOWN) {
  1635. return -ESHUTDOWN;
  1636. }
  1637. dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
  1638. spin_lock_irqsave(&bdc->lock, flags);
  1639. ep->desc = desc;
  1640. ep->comp_desc = _ep->comp_desc;
  1641. ret = bdc_ep_enable(ep);
  1642. spin_unlock_irqrestore(&bdc->lock, flags);
  1643. return ret;
  1644. }
  1645. static int bdc_gadget_ep_disable(struct usb_ep *_ep)
  1646. {
  1647. unsigned long flags;
  1648. struct bdc_ep *ep;
  1649. struct bdc *bdc;
  1650. int ret;
  1651. if (!_ep) {
  1652. pr_debug("bdc: invalid parameters\n");
  1653. return -EINVAL;
  1654. }
  1655. ep = to_bdc_ep(_ep);
  1656. bdc = ep->bdc;
  1657. /* Upper layer will not call this for ep0, but do a sanity check */
  1658. if (ep == bdc->bdc_ep_array[1]) {
  1659. dev_warn(bdc->dev, "%s called for ep0\n", __func__);
  1660. return -EINVAL;
  1661. }
  1662. dev_dbg(bdc->dev,
  1663. "%s() ep:%s ep->flags:%08x\n",
  1664. __func__, ep->name, ep->flags);
  1665. if (!(ep->flags & BDC_EP_ENABLED)) {
  1666. if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
  1667. dev_warn(bdc->dev, "%s is already disabled\n",
  1668. ep->name);
  1669. return 0;
  1670. }
  1671. spin_lock_irqsave(&bdc->lock, flags);
  1672. ret = bdc_ep_disable(ep);
  1673. spin_unlock_irqrestore(&bdc->lock, flags);
  1674. return ret;
  1675. }
  1676. static const struct usb_ep_ops bdc_gadget_ep_ops = {
  1677. .enable = bdc_gadget_ep_enable,
  1678. .disable = bdc_gadget_ep_disable,
  1679. .alloc_request = bdc_gadget_alloc_request,
  1680. .free_request = bdc_gadget_free_request,
  1681. .queue = bdc_gadget_ep_queue,
  1682. .dequeue = bdc_gadget_ep_dequeue,
  1683. .set_halt = bdc_gadget_ep_set_halt
  1684. };
  1685. /* dir = 1 is IN */
  1686. static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
  1687. {
  1688. struct bdc_ep *ep;
  1689. dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
  1690. ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  1691. if (!ep)
  1692. return -ENOMEM;
  1693. ep->bdc = bdc;
  1694. ep->dir = dir;
  1695. if (dir)
  1696. ep->usb_ep.caps.dir_in = true;
  1697. else
  1698. ep->usb_ep.caps.dir_out = true;
  1699. /* ep->ep_num is the index inside bdc_ep */
  1700. if (epnum == 1) {
  1701. ep->ep_num = 1;
  1702. bdc->bdc_ep_array[ep->ep_num] = ep;
  1703. snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
  1704. usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
  1705. ep->usb_ep.caps.type_control = true;
  1706. ep->comp_desc = NULL;
  1707. bdc->gadget.ep0 = &ep->usb_ep;
  1708. } else {
  1709. if (dir)
  1710. ep->ep_num = epnum * 2 - 1;
  1711. else
  1712. ep->ep_num = epnum * 2 - 2;
  1713. bdc->bdc_ep_array[ep->ep_num] = ep;
  1714. snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
  1715. dir & 1 ? "in" : "out");
  1716. usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
  1717. ep->usb_ep.caps.type_iso = true;
  1718. ep->usb_ep.caps.type_bulk = true;
  1719. ep->usb_ep.caps.type_int = true;
  1720. ep->usb_ep.max_streams = 0;
  1721. list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
  1722. }
  1723. ep->usb_ep.ops = &bdc_gadget_ep_ops;
  1724. ep->usb_ep.name = ep->name;
  1725. ep->flags = 0;
  1726. ep->ignore_next_sr = false;
  1727. dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
  1728. ep, ep->usb_ep.name, epnum, ep->ep_num);
  1729. INIT_LIST_HEAD(&ep->queue);
  1730. return 0;
  1731. }
  1732. /* Init all ep */
  1733. int bdc_init_ep(struct bdc *bdc)
  1734. {
  1735. u8 epnum;
  1736. int ret;
  1737. dev_dbg(bdc->dev, "%s()\n", __func__);
  1738. INIT_LIST_HEAD(&bdc->gadget.ep_list);
  1739. /* init ep0 */
  1740. ret = init_ep(bdc, 1, 0);
  1741. if (ret) {
  1742. dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
  1743. return ret;
  1744. }
  1745. for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
  1746. /* OUT */
  1747. ret = init_ep(bdc, epnum, 0);
  1748. if (ret) {
  1749. dev_err(bdc->dev,
  1750. "init ep failed for:%d error: %d\n",
  1751. epnum, ret);
  1752. return ret;
  1753. }
  1754. /* IN */
  1755. ret = init_ep(bdc, epnum, 1);
  1756. if (ret) {
  1757. dev_err(bdc->dev,
  1758. "init ep failed for:%d error: %d\n",
  1759. epnum, ret);
  1760. return ret;
  1761. }
  1762. }
  1763. return 0;
  1764. }