dwc2_udc_otg_xfer_dma.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c
  4. * Designware DWC2 on-chip full/high speed USB OTG 2.0 device controllers
  5. *
  6. * Copyright (C) 2009 for Samsung Electronics
  7. *
  8. * BSP Support for Samsung's UDC driver
  9. * available at:
  10. * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git
  11. *
  12. * State machine bugfixes:
  13. * Marek Szyprowski <m.szyprowski@samsung.com>
  14. *
  15. * Ported to u-boot:
  16. * Marek Szyprowski <m.szyprowski@samsung.com>
  17. * Lukasz Majewski <l.majewski@samsumg.com>
  18. */
  19. static u8 clear_feature_num;
  20. int clear_feature_flag;
  21. /* Bulk-Only Mass Storage Reset (class-specific request) */
  22. #define GET_MAX_LUN_REQUEST 0xFE
  23. #define BOT_RESET_REQUEST 0xFF
  24. static inline void dwc2_udc_ep0_zlp(struct dwc2_udc *dev)
  25. {
  26. u32 ep_ctrl;
  27. writel(usb_ctrl_dma_addr, &reg->in_endp[EP0_CON].diepdma);
  28. writel(DIEPT_SIZ_PKT_CNT(1), &reg->in_endp[EP0_CON].dieptsiz);
  29. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  30. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  31. &reg->in_endp[EP0_CON].diepctl);
  32. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  33. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  34. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  35. }
  36. static void dwc2_udc_pre_setup(void)
  37. {
  38. u32 ep_ctrl;
  39. debug_cond(DEBUG_IN_EP,
  40. "%s : Prepare Setup packets.\n", __func__);
  41. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  42. &reg->out_endp[EP0_CON].doeptsiz);
  43. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  44. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  45. writel(ep_ctrl|DEPCTL_EPENA, &reg->out_endp[EP0_CON].doepctl);
  46. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  47. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  48. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  49. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  50. }
  51. static inline void dwc2_ep0_complete_out(void)
  52. {
  53. u32 ep_ctrl;
  54. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  55. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  56. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  57. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  58. debug_cond(DEBUG_IN_EP,
  59. "%s : Prepare Complete Out packet.\n", __func__);
  60. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  61. &reg->out_endp[EP0_CON].doeptsiz);
  62. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  63. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  64. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  65. &reg->out_endp[EP0_CON].doepctl);
  66. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  67. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  68. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  69. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  70. }
  71. static int setdma_rx(struct dwc2_ep *ep, struct dwc2_request *req)
  72. {
  73. u32 *buf, ctrl;
  74. u32 length, pktcnt;
  75. u32 ep_num = ep_index(ep);
  76. buf = req->req.buf + req->req.actual;
  77. length = min_t(u32, req->req.length - req->req.actual,
  78. ep_num ? DMA_BUFFER_SIZE : ep->ep.maxpacket);
  79. ep->len = length;
  80. ep->dma_buf = buf;
  81. if (ep_num == EP0_CON || length == 0)
  82. pktcnt = 1;
  83. else
  84. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  85. ctrl = readl(&reg->out_endp[ep_num].doepctl);
  86. invalidate_dcache_range((unsigned long) ep->dma_buf,
  87. (unsigned long) ep->dma_buf +
  88. ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE));
  89. writel((unsigned long) ep->dma_buf, &reg->out_endp[ep_num].doepdma);
  90. writel(DOEPT_SIZ_PKT_CNT(pktcnt) | DOEPT_SIZ_XFER_SIZE(length),
  91. &reg->out_endp[ep_num].doeptsiz);
  92. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->out_endp[ep_num].doepctl);
  93. debug_cond(DEBUG_OUT_EP != 0,
  94. "%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
  95. "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
  96. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  97. __func__, ep_num,
  98. readl(&reg->out_endp[ep_num].doepdma),
  99. readl(&reg->out_endp[ep_num].doeptsiz),
  100. readl(&reg->out_endp[ep_num].doepctl),
  101. buf, pktcnt, length);
  102. return 0;
  103. }
  104. static int setdma_tx(struct dwc2_ep *ep, struct dwc2_request *req)
  105. {
  106. u32 *buf, ctrl = 0;
  107. u32 length, pktcnt;
  108. u32 ep_num = ep_index(ep);
  109. buf = req->req.buf + req->req.actual;
  110. length = req->req.length - req->req.actual;
  111. if (ep_num == EP0_CON)
  112. length = min(length, (u32)ep_maxpacket(ep));
  113. ep->len = length;
  114. ep->dma_buf = buf;
  115. flush_dcache_range((unsigned long) ep->dma_buf,
  116. (unsigned long) ep->dma_buf +
  117. ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE));
  118. if (length == 0)
  119. pktcnt = 1;
  120. else
  121. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  122. /* Flush the endpoint's Tx FIFO */
  123. writel(TX_FIFO_NUMBER(ep->fifo_num), &reg->grstctl);
  124. writel(TX_FIFO_NUMBER(ep->fifo_num) | TX_FIFO_FLUSH, &reg->grstctl);
  125. while (readl(&reg->grstctl) & TX_FIFO_FLUSH)
  126. ;
  127. writel((unsigned long) ep->dma_buf, &reg->in_endp[ep_num].diepdma);
  128. writel(DIEPT_SIZ_PKT_CNT(pktcnt) | DIEPT_SIZ_XFER_SIZE(length),
  129. &reg->in_endp[ep_num].dieptsiz);
  130. ctrl = readl(&reg->in_endp[ep_num].diepctl);
  131. /* Write the FIFO number to be used for this endpoint */
  132. ctrl &= DIEPCTL_TX_FIFO_NUM_MASK;
  133. ctrl |= DIEPCTL_TX_FIFO_NUM(ep->fifo_num);
  134. /* Clear reserved (Next EP) bits */
  135. ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT));
  136. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->in_endp[ep_num].diepctl);
  137. debug_cond(DEBUG_IN_EP,
  138. "%s:EP%d TX DMA start : DIEPDMA0 = 0x%x,"
  139. "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n"
  140. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  141. __func__, ep_num,
  142. readl(&reg->in_endp[ep_num].diepdma),
  143. readl(&reg->in_endp[ep_num].dieptsiz),
  144. readl(&reg->in_endp[ep_num].diepctl),
  145. buf, pktcnt, length);
  146. return length;
  147. }
  148. static void complete_rx(struct dwc2_udc *dev, u8 ep_num)
  149. {
  150. struct dwc2_ep *ep = &dev->ep[ep_num];
  151. struct dwc2_request *req = NULL;
  152. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  153. if (list_empty(&ep->queue)) {
  154. debug_cond(DEBUG_OUT_EP != 0,
  155. "%s: RX DMA done : NULL REQ on OUT EP-%d\n",
  156. __func__, ep_num);
  157. return;
  158. }
  159. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  160. ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);
  161. if (ep_num == EP0_CON)
  162. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
  163. else
  164. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);
  165. xfer_size = ep->len - xfer_size;
  166. /*
  167. * NOTE:
  168. *
  169. * Please be careful with proper buffer allocation for USB request,
  170. * which needs to be aligned to CONFIG_SYS_CACHELINE_SIZE, not only
  171. * with starting address, but also its size shall be a cache line
  172. * multiplication.
  173. *
  174. * This will prevent from corruption of data allocated immediatelly
  175. * before or after the buffer.
  176. *
  177. * For armv7, the cache_v7.c provides proper code to emit "ERROR"
  178. * message to warn users.
  179. */
  180. invalidate_dcache_range((unsigned long) ep->dma_buf,
  181. (unsigned long) ep->dma_buf +
  182. ROUND(xfer_size, CONFIG_SYS_CACHELINE_SIZE));
  183. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  184. is_short = !!(xfer_size % ep->ep.maxpacket);
  185. debug_cond(DEBUG_OUT_EP != 0,
  186. "%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
  187. "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
  188. __func__, ep_num, req->req.actual, req->req.length,
  189. is_short, ep_tsr, req->req.length - req->req.actual);
  190. if (is_short || req->req.actual == req->req.length) {
  191. if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
  192. debug_cond(DEBUG_OUT_EP != 0, " => Send ZLP\n");
  193. dwc2_udc_ep0_zlp(dev);
  194. /* packet will be completed in complete_tx() */
  195. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  196. } else {
  197. done(ep, req, 0);
  198. if (!list_empty(&ep->queue)) {
  199. req = list_entry(ep->queue.next,
  200. struct dwc2_request, queue);
  201. debug_cond(DEBUG_OUT_EP != 0,
  202. "%s: Next Rx request start...\n",
  203. __func__);
  204. setdma_rx(ep, req);
  205. }
  206. }
  207. } else
  208. setdma_rx(ep, req);
  209. }
  210. static void complete_tx(struct dwc2_udc *dev, u8 ep_num)
  211. {
  212. struct dwc2_ep *ep = &dev->ep[ep_num];
  213. struct dwc2_request *req;
  214. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  215. u32 last;
  216. if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) {
  217. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  218. dwc2_ep0_complete_out();
  219. return;
  220. }
  221. if (list_empty(&ep->queue)) {
  222. debug_cond(DEBUG_IN_EP,
  223. "%s: TX DMA done : NULL REQ on IN EP-%d\n",
  224. __func__, ep_num);
  225. return;
  226. }
  227. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  228. ep_tsr = readl(&reg->in_endp[ep_num].dieptsiz);
  229. xfer_size = ep->len;
  230. is_short = (xfer_size < ep->ep.maxpacket);
  231. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  232. debug_cond(DEBUG_IN_EP,
  233. "%s: TX DMA done : ep = %d, tx bytes = %d/%d, "
  234. "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n",
  235. __func__, ep_num, req->req.actual, req->req.length,
  236. is_short, ep_tsr, req->req.length - req->req.actual);
  237. if (ep_num == 0) {
  238. if (dev->ep0state == DATA_STATE_XMIT) {
  239. debug_cond(DEBUG_IN_EP,
  240. "%s: ep_num = %d, ep0stat =="
  241. "DATA_STATE_XMIT\n",
  242. __func__, ep_num);
  243. last = write_fifo_ep0(ep, req);
  244. if (last)
  245. dev->ep0state = WAIT_FOR_COMPLETE;
  246. } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) {
  247. debug_cond(DEBUG_IN_EP,
  248. "%s: ep_num = %d, completing request\n",
  249. __func__, ep_num);
  250. done(ep, req, 0);
  251. dev->ep0state = WAIT_FOR_SETUP;
  252. } else if (dev->ep0state == WAIT_FOR_COMPLETE) {
  253. debug_cond(DEBUG_IN_EP,
  254. "%s: ep_num = %d, completing request\n",
  255. __func__, ep_num);
  256. done(ep, req, 0);
  257. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  258. dwc2_ep0_complete_out();
  259. } else {
  260. debug_cond(DEBUG_IN_EP,
  261. "%s: ep_num = %d, invalid ep state\n",
  262. __func__, ep_num);
  263. }
  264. return;
  265. }
  266. if (req->req.actual == req->req.length)
  267. done(ep, req, 0);
  268. if (!list_empty(&ep->queue)) {
  269. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  270. debug_cond(DEBUG_IN_EP,
  271. "%s: Next Tx request start...\n", __func__);
  272. setdma_tx(ep, req);
  273. }
  274. }
  275. static inline void dwc2_udc_check_tx_queue(struct dwc2_udc *dev, u8 ep_num)
  276. {
  277. struct dwc2_ep *ep = &dev->ep[ep_num];
  278. struct dwc2_request *req;
  279. debug_cond(DEBUG_IN_EP,
  280. "%s: Check queue, ep_num = %d\n", __func__, ep_num);
  281. if (!list_empty(&ep->queue)) {
  282. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  283. debug_cond(DEBUG_IN_EP,
  284. "%s: Next Tx request(0x%p) start...\n",
  285. __func__, req);
  286. if (ep_is_in(ep))
  287. setdma_tx(ep, req);
  288. else
  289. setdma_rx(ep, req);
  290. } else {
  291. debug_cond(DEBUG_IN_EP,
  292. "%s: NULL REQ on IN EP-%d\n", __func__, ep_num);
  293. return;
  294. }
  295. }
  296. static void process_ep_in_intr(struct dwc2_udc *dev)
  297. {
  298. u32 ep_intr, ep_intr_status;
  299. u8 ep_num = 0;
  300. ep_intr = readl(&reg->daint);
  301. debug_cond(DEBUG_IN_EP,
  302. "*** %s: EP In interrupt : DAINT = 0x%x\n", __func__, ep_intr);
  303. ep_intr &= DAINT_MASK;
  304. while (ep_intr) {
  305. if (ep_intr & DAINT_IN_EP_INT(1)) {
  306. ep_intr_status = readl(&reg->in_endp[ep_num].diepint);
  307. debug_cond(DEBUG_IN_EP,
  308. "\tEP%d-IN : DIEPINT = 0x%x\n",
  309. ep_num, ep_intr_status);
  310. /* Interrupt Clear */
  311. writel(ep_intr_status, &reg->in_endp[ep_num].diepint);
  312. if (ep_intr_status & TRANSFER_DONE) {
  313. complete_tx(dev, ep_num);
  314. if (ep_num == 0) {
  315. if (dev->ep0state ==
  316. WAIT_FOR_IN_COMPLETE)
  317. dev->ep0state = WAIT_FOR_SETUP;
  318. if (dev->ep0state == WAIT_FOR_SETUP)
  319. dwc2_udc_pre_setup();
  320. /* continue transfer after
  321. set_clear_halt for DMA mode */
  322. if (clear_feature_flag == 1) {
  323. dwc2_udc_check_tx_queue(dev,
  324. clear_feature_num);
  325. clear_feature_flag = 0;
  326. }
  327. }
  328. }
  329. }
  330. ep_num++;
  331. ep_intr >>= 1;
  332. }
  333. }
  334. static void process_ep_out_intr(struct dwc2_udc *dev)
  335. {
  336. u32 ep_intr, ep_intr_status;
  337. u8 ep_num = 0;
  338. ep_intr = readl(&reg->daint);
  339. debug_cond(DEBUG_OUT_EP != 0,
  340. "*** %s: EP OUT interrupt : DAINT = 0x%x\n",
  341. __func__, ep_intr);
  342. ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK;
  343. while (ep_intr) {
  344. if (ep_intr & 0x1) {
  345. ep_intr_status = readl(&reg->out_endp[ep_num].doepint);
  346. debug_cond(DEBUG_OUT_EP != 0,
  347. "\tEP%d-OUT : DOEPINT = 0x%x\n",
  348. ep_num, ep_intr_status);
  349. /* Interrupt Clear */
  350. writel(ep_intr_status, &reg->out_endp[ep_num].doepint);
  351. if (ep_num == 0) {
  352. if (ep_intr_status & TRANSFER_DONE) {
  353. if (dev->ep0state !=
  354. WAIT_FOR_OUT_COMPLETE)
  355. complete_rx(dev, ep_num);
  356. else {
  357. dev->ep0state = WAIT_FOR_SETUP;
  358. dwc2_udc_pre_setup();
  359. }
  360. }
  361. if (ep_intr_status &
  362. CTRL_OUT_EP_SETUP_PHASE_DONE) {
  363. debug_cond(DEBUG_OUT_EP != 0,
  364. "SETUP packet arrived\n");
  365. dwc2_handle_ep0(dev);
  366. }
  367. } else {
  368. if (ep_intr_status & TRANSFER_DONE)
  369. complete_rx(dev, ep_num);
  370. }
  371. }
  372. ep_num++;
  373. ep_intr >>= 1;
  374. }
  375. }
  376. /*
  377. * usb client interrupt handler.
  378. */
  379. static int dwc2_udc_irq(int irq, void *_dev)
  380. {
  381. struct dwc2_udc *dev = _dev;
  382. u32 intr_status;
  383. u32 usb_status, gintmsk;
  384. unsigned long flags = 0;
  385. spin_lock_irqsave(&dev->lock, flags);
  386. intr_status = readl(&reg->gintsts);
  387. gintmsk = readl(&reg->gintmsk);
  388. debug_cond(DEBUG_ISR,
  389. "\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x,"
  390. "DAINT : 0x%x, DAINTMSK : 0x%x\n",
  391. __func__, intr_status, state_names[dev->ep0state], gintmsk,
  392. readl(&reg->daint), readl(&reg->daintmsk));
  393. if (!intr_status) {
  394. spin_unlock_irqrestore(&dev->lock, flags);
  395. return IRQ_HANDLED;
  396. }
  397. if (intr_status & INT_ENUMDONE) {
  398. debug_cond(DEBUG_ISR, "\tSpeed Detection interrupt\n");
  399. writel(INT_ENUMDONE, &reg->gintsts);
  400. usb_status = (readl(&reg->dsts) & 0x6);
  401. if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) {
  402. debug_cond(DEBUG_ISR,
  403. "\t\tFull Speed Detection\n");
  404. set_max_pktsize(dev, USB_SPEED_FULL);
  405. } else {
  406. debug_cond(DEBUG_ISR,
  407. "\t\tHigh Speed Detection : 0x%x\n",
  408. usb_status);
  409. set_max_pktsize(dev, USB_SPEED_HIGH);
  410. }
  411. }
  412. if (intr_status & INT_EARLY_SUSPEND) {
  413. debug_cond(DEBUG_ISR, "\tEarly suspend interrupt\n");
  414. writel(INT_EARLY_SUSPEND, &reg->gintsts);
  415. }
  416. if (intr_status & INT_SUSPEND) {
  417. usb_status = readl(&reg->dsts);
  418. debug_cond(DEBUG_ISR,
  419. "\tSuspend interrupt :(DSTS):0x%x\n", usb_status);
  420. writel(INT_SUSPEND, &reg->gintsts);
  421. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  422. && dev->driver) {
  423. if (dev->driver->suspend)
  424. dev->driver->suspend(&dev->gadget);
  425. /* HACK to let gadget detect disconnected state */
  426. if (dev->driver->disconnect) {
  427. spin_unlock_irqrestore(&dev->lock, flags);
  428. dev->driver->disconnect(&dev->gadget);
  429. spin_lock_irqsave(&dev->lock, flags);
  430. }
  431. }
  432. }
  433. if (intr_status & INT_RESUME) {
  434. debug_cond(DEBUG_ISR, "\tResume interrupt\n");
  435. writel(INT_RESUME, &reg->gintsts);
  436. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  437. && dev->driver
  438. && dev->driver->resume) {
  439. dev->driver->resume(&dev->gadget);
  440. }
  441. }
  442. if (intr_status & INT_RESET) {
  443. usb_status = readl(&reg->gotgctl);
  444. debug_cond(DEBUG_ISR,
  445. "\tReset interrupt - (GOTGCTL):0x%x\n", usb_status);
  446. writel(INT_RESET, &reg->gintsts);
  447. if ((usb_status & 0xc0000) == (0x3 << 18)) {
  448. if (reset_available) {
  449. debug_cond(DEBUG_ISR,
  450. "\t\tOTG core got reset (%d)!!\n",
  451. reset_available);
  452. reconfig_usbd(dev);
  453. dev->ep0state = WAIT_FOR_SETUP;
  454. reset_available = 0;
  455. dwc2_udc_pre_setup();
  456. } else
  457. reset_available = 1;
  458. } else {
  459. reset_available = 1;
  460. debug_cond(DEBUG_ISR,
  461. "\t\tRESET handling skipped\n");
  462. }
  463. }
  464. if (intr_status & INT_IN_EP)
  465. process_ep_in_intr(dev);
  466. if (intr_status & INT_OUT_EP)
  467. process_ep_out_intr(dev);
  468. spin_unlock_irqrestore(&dev->lock, flags);
  469. return IRQ_HANDLED;
  470. }
  471. /** Queue one request
  472. * Kickstart transfer if needed
  473. */
  474. static int dwc2_queue(struct usb_ep *_ep, struct usb_request *_req,
  475. gfp_t gfp_flags)
  476. {
  477. struct dwc2_request *req;
  478. struct dwc2_ep *ep;
  479. struct dwc2_udc *dev;
  480. unsigned long flags = 0;
  481. u32 ep_num, gintsts;
  482. req = container_of(_req, struct dwc2_request, req);
  483. if (unlikely(!_req || !_req->complete || !_req->buf
  484. || !list_empty(&req->queue))) {
  485. debug("%s: bad params\n", __func__);
  486. return -EINVAL;
  487. }
  488. ep = container_of(_ep, struct dwc2_ep, ep);
  489. if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
  490. debug("%s: bad ep: %s, %d, %p\n", __func__,
  491. ep->ep.name, !ep->desc, _ep);
  492. return -EINVAL;
  493. }
  494. ep_num = ep_index(ep);
  495. dev = ep->dev;
  496. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  497. debug("%s: bogus device state %p\n", __func__, dev->driver);
  498. return -ESHUTDOWN;
  499. }
  500. spin_lock_irqsave(&dev->lock, flags);
  501. _req->status = -EINPROGRESS;
  502. _req->actual = 0;
  503. /* kickstart this i/o queue? */
  504. debug("\n*** %s: %s-%s req = %p, len = %d, buf = %p"
  505. "Q empty = %d, stopped = %d\n",
  506. __func__, _ep->name, ep_is_in(ep) ? "in" : "out",
  507. _req, _req->length, _req->buf,
  508. list_empty(&ep->queue), ep->stopped);
  509. #ifdef DEBUG
  510. {
  511. int i, len = _req->length;
  512. printf("pkt = ");
  513. if (len > 64)
  514. len = 64;
  515. for (i = 0; i < len; i++) {
  516. printf("%02x", ((u8 *)_req->buf)[i]);
  517. if ((i & 7) == 7)
  518. printf(" ");
  519. }
  520. printf("\n");
  521. }
  522. #endif
  523. if (list_empty(&ep->queue) && !ep->stopped) {
  524. if (ep_num == 0) {
  525. /* EP0 */
  526. list_add_tail(&req->queue, &ep->queue);
  527. dwc2_ep0_kick(dev, ep);
  528. req = 0;
  529. } else if (ep_is_in(ep)) {
  530. gintsts = readl(&reg->gintsts);
  531. debug_cond(DEBUG_IN_EP,
  532. "%s: ep_is_in, DWC2_UDC_OTG_GINTSTS=0x%x\n",
  533. __func__, gintsts);
  534. setdma_tx(ep, req);
  535. } else {
  536. gintsts = readl(&reg->gintsts);
  537. debug_cond(DEBUG_OUT_EP != 0,
  538. "%s:ep_is_out, DWC2_UDC_OTG_GINTSTS=0x%x\n",
  539. __func__, gintsts);
  540. setdma_rx(ep, req);
  541. }
  542. }
  543. /* pio or dma irq handler advances the queue. */
  544. if (likely(req != 0))
  545. list_add_tail(&req->queue, &ep->queue);
  546. spin_unlock_irqrestore(&dev->lock, flags);
  547. return 0;
  548. }
  549. /****************************************************************/
  550. /* End Point 0 related functions */
  551. /****************************************************************/
  552. /* return: 0 = still running, 1 = completed, negative = errno */
  553. static int write_fifo_ep0(struct dwc2_ep *ep, struct dwc2_request *req)
  554. {
  555. u32 max;
  556. unsigned count;
  557. int is_last;
  558. max = ep_maxpacket(ep);
  559. debug_cond(DEBUG_EP0 != 0, "%s: max = %d\n", __func__, max);
  560. count = setdma_tx(ep, req);
  561. /* last packet is usually short (or a zlp) */
  562. if (likely(count != max))
  563. is_last = 1;
  564. else {
  565. if (likely(req->req.length != req->req.actual + count)
  566. || req->req.zero)
  567. is_last = 0;
  568. else
  569. is_last = 1;
  570. }
  571. debug_cond(DEBUG_EP0 != 0,
  572. "%s: wrote %s %d bytes%s %d left %p\n", __func__,
  573. ep->ep.name, count,
  574. is_last ? "/L" : "",
  575. req->req.length - req->req.actual - count, req);
  576. /* requests complete when all IN data is in the FIFO */
  577. if (is_last) {
  578. ep->dev->ep0state = WAIT_FOR_SETUP;
  579. return 1;
  580. }
  581. return 0;
  582. }
  583. static int dwc2_fifo_read(struct dwc2_ep *ep, u32 *cp, int max)
  584. {
  585. invalidate_dcache_range((unsigned long)cp, (unsigned long)cp +
  586. ROUND(max, CONFIG_SYS_CACHELINE_SIZE));
  587. debug_cond(DEBUG_EP0 != 0,
  588. "%s: bytes=%d, ep_index=%d 0x%p\n", __func__,
  589. max, ep_index(ep), cp);
  590. return max;
  591. }
  592. /**
  593. * udc_set_address - set the USB address for this device
  594. * @address:
  595. *
  596. * Called from control endpoint function
  597. * after it decodes a set address setup packet.
  598. */
  599. static void udc_set_address(struct dwc2_udc *dev, unsigned char address)
  600. {
  601. u32 ctrl = readl(&reg->dcfg);
  602. writel(DEVICE_ADDRESS(address) | ctrl, &reg->dcfg);
  603. dwc2_udc_ep0_zlp(dev);
  604. debug_cond(DEBUG_EP0 != 0,
  605. "%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
  606. __func__, address, readl(&reg->dcfg));
  607. dev->usb_address = address;
  608. }
  609. static inline void dwc2_udc_ep0_set_stall(struct dwc2_ep *ep)
  610. {
  611. struct dwc2_udc *dev;
  612. u32 ep_ctrl = 0;
  613. dev = ep->dev;
  614. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  615. /* set the disable and stall bits */
  616. if (ep_ctrl & DEPCTL_EPENA)
  617. ep_ctrl |= DEPCTL_EPDIS;
  618. ep_ctrl |= DEPCTL_STALL;
  619. writel(ep_ctrl, &reg->in_endp[EP0_CON].diepctl);
  620. debug_cond(DEBUG_EP0 != 0,
  621. "%s: set ep%d stall, DIEPCTL0 = 0x%p\n",
  622. __func__, ep_index(ep), &reg->in_endp[EP0_CON].diepctl);
  623. /*
  624. * The application can only set this bit, and the core clears it,
  625. * when a SETUP token is received for this endpoint
  626. */
  627. dev->ep0state = WAIT_FOR_SETUP;
  628. dwc2_udc_pre_setup();
  629. }
  630. static void dwc2_ep0_read(struct dwc2_udc *dev)
  631. {
  632. struct dwc2_request *req;
  633. struct dwc2_ep *ep = &dev->ep[0];
  634. if (!list_empty(&ep->queue)) {
  635. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  636. } else {
  637. debug("%s: ---> BUG\n", __func__);
  638. BUG();
  639. return;
  640. }
  641. debug_cond(DEBUG_EP0 != 0,
  642. "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  643. __func__, req, req->req.length, req->req.actual);
  644. if (req->req.length == 0) {
  645. /* zlp for Set_configuration, Set_interface,
  646. * or Bulk-Only mass storge reset */
  647. ep->len = 0;
  648. dwc2_udc_ep0_zlp(dev);
  649. debug_cond(DEBUG_EP0 != 0,
  650. "%s: req.length = 0, bRequest = %d\n",
  651. __func__, usb_ctrl->bRequest);
  652. return;
  653. }
  654. setdma_rx(ep, req);
  655. }
  656. /*
  657. * DATA_STATE_XMIT
  658. */
  659. static int dwc2_ep0_write(struct dwc2_udc *dev)
  660. {
  661. struct dwc2_request *req;
  662. struct dwc2_ep *ep = &dev->ep[0];
  663. int ret, need_zlp = 0;
  664. if (list_empty(&ep->queue))
  665. req = 0;
  666. else
  667. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  668. if (!req) {
  669. debug_cond(DEBUG_EP0 != 0, "%s: NULL REQ\n", __func__);
  670. return 0;
  671. }
  672. debug_cond(DEBUG_EP0 != 0,
  673. "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  674. __func__, req, req->req.length, req->req.actual);
  675. if (req->req.length - req->req.actual == ep0_fifo_size) {
  676. /* Next write will end with the packet size, */
  677. /* so we need Zero-length-packet */
  678. need_zlp = 1;
  679. }
  680. ret = write_fifo_ep0(ep, req);
  681. if ((ret == 1) && !need_zlp) {
  682. /* Last packet */
  683. dev->ep0state = WAIT_FOR_COMPLETE;
  684. debug_cond(DEBUG_EP0 != 0,
  685. "%s: finished, waiting for status\n", __func__);
  686. } else {
  687. dev->ep0state = DATA_STATE_XMIT;
  688. debug_cond(DEBUG_EP0 != 0,
  689. "%s: not finished\n", __func__);
  690. }
  691. return 1;
  692. }
  693. static int dwc2_udc_get_status(struct dwc2_udc *dev,
  694. struct usb_ctrlrequest *crq)
  695. {
  696. u8 ep_num = crq->wIndex & 0x7F;
  697. u16 g_status = 0;
  698. u32 ep_ctrl;
  699. debug_cond(DEBUG_SETUP != 0,
  700. "%s: *** USB_REQ_GET_STATUS\n", __func__);
  701. printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK);
  702. switch (crq->bRequestType & USB_RECIP_MASK) {
  703. case USB_RECIP_INTERFACE:
  704. g_status = 0;
  705. debug_cond(DEBUG_SETUP != 0,
  706. "\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n",
  707. g_status);
  708. break;
  709. case USB_RECIP_DEVICE:
  710. g_status = 0x1; /* Self powered */
  711. debug_cond(DEBUG_SETUP != 0,
  712. "\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n",
  713. g_status);
  714. break;
  715. case USB_RECIP_ENDPOINT:
  716. if (crq->wLength > 2) {
  717. debug_cond(DEBUG_SETUP != 0,
  718. "\tGET_STATUS:Not support EP or wLength\n");
  719. return 1;
  720. }
  721. g_status = dev->ep[ep_num].stopped;
  722. debug_cond(DEBUG_SETUP != 0,
  723. "\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n",
  724. g_status);
  725. break;
  726. default:
  727. return 1;
  728. }
  729. memcpy(usb_ctrl, &g_status, sizeof(g_status));
  730. flush_dcache_range((unsigned long) usb_ctrl,
  731. (unsigned long) usb_ctrl +
  732. ROUND(sizeof(g_status), CONFIG_SYS_CACHELINE_SIZE));
  733. writel(usb_ctrl_dma_addr, &reg->in_endp[EP0_CON].diepdma);
  734. writel(DIEPT_SIZ_PKT_CNT(1) | DIEPT_SIZ_XFER_SIZE(2),
  735. &reg->in_endp[EP0_CON].dieptsiz);
  736. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  737. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  738. &reg->in_endp[EP0_CON].diepctl);
  739. dev->ep0state = WAIT_FOR_NULL_COMPLETE;
  740. return 0;
  741. }
  742. static void dwc2_udc_set_nak(struct dwc2_ep *ep)
  743. {
  744. u8 ep_num;
  745. u32 ep_ctrl = 0;
  746. ep_num = ep_index(ep);
  747. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  748. if (ep_is_in(ep)) {
  749. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  750. ep_ctrl |= DEPCTL_SNAK;
  751. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  752. debug("%s: set NAK, DIEPCTL%d = 0x%x\n",
  753. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  754. } else {
  755. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  756. ep_ctrl |= DEPCTL_SNAK;
  757. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  758. debug("%s: set NAK, DOEPCTL%d = 0x%x\n",
  759. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  760. }
  761. return;
  762. }
  763. static void dwc2_udc_ep_set_stall(struct dwc2_ep *ep)
  764. {
  765. u8 ep_num;
  766. u32 ep_ctrl = 0;
  767. ep_num = ep_index(ep);
  768. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  769. if (ep_is_in(ep)) {
  770. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  771. /* set the disable and stall bits */
  772. if (ep_ctrl & DEPCTL_EPENA)
  773. ep_ctrl |= DEPCTL_EPDIS;
  774. ep_ctrl |= DEPCTL_STALL;
  775. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  776. debug("%s: set stall, DIEPCTL%d = 0x%x\n",
  777. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  778. } else {
  779. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  780. /* set the stall bit */
  781. ep_ctrl |= DEPCTL_STALL;
  782. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  783. debug("%s: set stall, DOEPCTL%d = 0x%x\n",
  784. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  785. }
  786. return;
  787. }
  788. static void dwc2_udc_ep_clear_stall(struct dwc2_ep *ep)
  789. {
  790. u8 ep_num;
  791. u32 ep_ctrl = 0;
  792. ep_num = ep_index(ep);
  793. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  794. if (ep_is_in(ep)) {
  795. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  796. /* clear stall bit */
  797. ep_ctrl &= ~DEPCTL_STALL;
  798. /*
  799. * USB Spec 9.4.5: For endpoints using data toggle, regardless
  800. * of whether an endpoint has the Halt feature set, a
  801. * ClearFeature(ENDPOINT_HALT) request always results in the
  802. * data toggle being reinitialized to DATA0.
  803. */
  804. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  805. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  806. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  807. }
  808. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  809. debug("%s: cleared stall, DIEPCTL%d = 0x%x\n",
  810. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  811. } else {
  812. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  813. /* clear stall bit */
  814. ep_ctrl &= ~DEPCTL_STALL;
  815. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  816. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  817. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  818. }
  819. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  820. debug("%s: cleared stall, DOEPCTL%d = 0x%x\n",
  821. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  822. }
  823. return;
  824. }
  825. static int dwc2_udc_set_halt(struct usb_ep *_ep, int value)
  826. {
  827. struct dwc2_ep *ep;
  828. struct dwc2_udc *dev;
  829. unsigned long flags = 0;
  830. u8 ep_num;
  831. ep = container_of(_ep, struct dwc2_ep, ep);
  832. ep_num = ep_index(ep);
  833. if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON ||
  834. ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) {
  835. debug("%s: %s bad ep or descriptor\n", __func__, ep->ep.name);
  836. return -EINVAL;
  837. }
  838. /* Attempt to halt IN ep will fail if any transfer requests
  839. * are still queue */
  840. if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
  841. debug("%s: %s queue not empty, req = %p\n",
  842. __func__, ep->ep.name,
  843. list_entry(ep->queue.next, struct dwc2_request, queue));
  844. return -EAGAIN;
  845. }
  846. dev = ep->dev;
  847. debug("%s: ep_num = %d, value = %d\n", __func__, ep_num, value);
  848. spin_lock_irqsave(&dev->lock, flags);
  849. if (value == 0) {
  850. ep->stopped = 0;
  851. dwc2_udc_ep_clear_stall(ep);
  852. } else {
  853. if (ep_num == 0)
  854. dev->ep0state = WAIT_FOR_SETUP;
  855. ep->stopped = 1;
  856. dwc2_udc_ep_set_stall(ep);
  857. }
  858. spin_unlock_irqrestore(&dev->lock, flags);
  859. return 0;
  860. }
  861. static void dwc2_udc_ep_activate(struct dwc2_ep *ep)
  862. {
  863. u8 ep_num;
  864. u32 ep_ctrl = 0, daintmsk = 0;
  865. ep_num = ep_index(ep);
  866. /* Read DEPCTLn register */
  867. if (ep_is_in(ep)) {
  868. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  869. daintmsk = 1 << ep_num;
  870. } else {
  871. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  872. daintmsk = (1 << ep_num) << DAINT_OUT_BIT;
  873. }
  874. debug("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n",
  875. __func__, ep_num, ep_ctrl, ep_is_in(ep));
  876. /* If the EP is already active don't change the EP Control
  877. * register. */
  878. if (!(ep_ctrl & DEPCTL_USBACTEP)) {
  879. ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK) |
  880. (ep->bmAttributes << DEPCTL_TYPE_BIT);
  881. ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) |
  882. (ep->ep.maxpacket << DEPCTL_MPS_BIT);
  883. ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP | DEPCTL_SNAK);
  884. if (ep_is_in(ep)) {
  885. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  886. debug("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n",
  887. __func__, ep_num, ep_num,
  888. readl(&reg->in_endp[ep_num].diepctl));
  889. } else {
  890. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  891. debug("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n",
  892. __func__, ep_num, ep_num,
  893. readl(&reg->out_endp[ep_num].doepctl));
  894. }
  895. }
  896. /* Unmask EP Interrtupt */
  897. writel(readl(&reg->daintmsk)|daintmsk, &reg->daintmsk);
  898. debug("%s: DAINTMSK = 0x%x\n", __func__, readl(&reg->daintmsk));
  899. }
  900. static int dwc2_udc_clear_feature(struct usb_ep *_ep)
  901. {
  902. struct dwc2_udc *dev;
  903. struct dwc2_ep *ep;
  904. u8 ep_num;
  905. ep = container_of(_ep, struct dwc2_ep, ep);
  906. ep_num = ep_index(ep);
  907. dev = ep->dev;
  908. debug_cond(DEBUG_SETUP != 0,
  909. "%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n",
  910. __func__, ep_num, ep_is_in(ep), clear_feature_flag);
  911. if (usb_ctrl->wLength != 0) {
  912. debug_cond(DEBUG_SETUP != 0,
  913. "\tCLEAR_FEATURE: wLength is not zero.....\n");
  914. return 1;
  915. }
  916. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  917. case USB_RECIP_DEVICE:
  918. switch (usb_ctrl->wValue) {
  919. case USB_DEVICE_REMOTE_WAKEUP:
  920. debug_cond(DEBUG_SETUP != 0,
  921. "\tOFF:USB_DEVICE_REMOTE_WAKEUP\n");
  922. break;
  923. case USB_DEVICE_TEST_MODE:
  924. debug_cond(DEBUG_SETUP != 0,
  925. "\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n");
  926. /** @todo Add CLEAR_FEATURE for TEST modes. */
  927. break;
  928. }
  929. dwc2_udc_ep0_zlp(dev);
  930. break;
  931. case USB_RECIP_ENDPOINT:
  932. debug_cond(DEBUG_SETUP != 0,
  933. "\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n",
  934. usb_ctrl->wValue);
  935. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  936. if (ep_num == 0) {
  937. dwc2_udc_ep0_set_stall(ep);
  938. return 0;
  939. }
  940. dwc2_udc_ep0_zlp(dev);
  941. dwc2_udc_ep_clear_stall(ep);
  942. dwc2_udc_ep_activate(ep);
  943. ep->stopped = 0;
  944. clear_feature_num = ep_num;
  945. clear_feature_flag = 1;
  946. }
  947. break;
  948. }
  949. return 0;
  950. }
  951. static int dwc2_udc_set_feature(struct usb_ep *_ep)
  952. {
  953. struct dwc2_udc *dev;
  954. struct dwc2_ep *ep;
  955. u8 ep_num;
  956. ep = container_of(_ep, struct dwc2_ep, ep);
  957. ep_num = ep_index(ep);
  958. dev = ep->dev;
  959. debug_cond(DEBUG_SETUP != 0,
  960. "%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n",
  961. __func__, ep_num);
  962. if (usb_ctrl->wLength != 0) {
  963. debug_cond(DEBUG_SETUP != 0,
  964. "\tSET_FEATURE: wLength is not zero.....\n");
  965. return 1;
  966. }
  967. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  968. case USB_RECIP_DEVICE:
  969. switch (usb_ctrl->wValue) {
  970. case USB_DEVICE_REMOTE_WAKEUP:
  971. debug_cond(DEBUG_SETUP != 0,
  972. "\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n");
  973. break;
  974. case USB_DEVICE_B_HNP_ENABLE:
  975. debug_cond(DEBUG_SETUP != 0,
  976. "\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
  977. break;
  978. case USB_DEVICE_A_HNP_SUPPORT:
  979. /* RH port supports HNP */
  980. debug_cond(DEBUG_SETUP != 0,
  981. "\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n");
  982. break;
  983. case USB_DEVICE_A_ALT_HNP_SUPPORT:
  984. /* other RH port does */
  985. debug_cond(DEBUG_SETUP != 0,
  986. "\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
  987. break;
  988. }
  989. dwc2_udc_ep0_zlp(dev);
  990. return 0;
  991. case USB_RECIP_INTERFACE:
  992. debug_cond(DEBUG_SETUP != 0,
  993. "\tSET_FEATURE: USB_RECIP_INTERFACE\n");
  994. break;
  995. case USB_RECIP_ENDPOINT:
  996. debug_cond(DEBUG_SETUP != 0,
  997. "\tSET_FEATURE: USB_RECIP_ENDPOINT\n");
  998. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  999. if (ep_num == 0) {
  1000. dwc2_udc_ep0_set_stall(ep);
  1001. return 0;
  1002. }
  1003. ep->stopped = 1;
  1004. dwc2_udc_ep_set_stall(ep);
  1005. }
  1006. dwc2_udc_ep0_zlp(dev);
  1007. return 0;
  1008. }
  1009. return 1;
  1010. }
  1011. /*
  1012. * WAIT_FOR_SETUP (OUT_PKT_RDY)
  1013. */
  1014. static void dwc2_ep0_setup(struct dwc2_udc *dev)
  1015. {
  1016. struct dwc2_ep *ep = &dev->ep[0];
  1017. int i;
  1018. u8 ep_num;
  1019. /* Nuke all previous transfers */
  1020. nuke(ep, -EPROTO);
  1021. /* read control req from fifo (8 bytes) */
  1022. dwc2_fifo_read(ep, (u32 *)usb_ctrl, 8);
  1023. debug_cond(DEBUG_SETUP != 0,
  1024. "%s: bRequestType = 0x%x(%s), bRequest = 0x%x"
  1025. "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n",
  1026. __func__, usb_ctrl->bRequestType,
  1027. (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT",
  1028. usb_ctrl->bRequest,
  1029. usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex);
  1030. #ifdef DEBUG
  1031. {
  1032. int i, len = sizeof(*usb_ctrl);
  1033. char *p = (char *)usb_ctrl;
  1034. printf("pkt = ");
  1035. for (i = 0; i < len; i++) {
  1036. printf("%02x", ((u8 *)p)[i]);
  1037. if ((i & 7) == 7)
  1038. printf(" ");
  1039. }
  1040. printf("\n");
  1041. }
  1042. #endif
  1043. if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST &&
  1044. usb_ctrl->wLength != 1) {
  1045. debug_cond(DEBUG_SETUP != 0,
  1046. "\t%s:GET_MAX_LUN_REQUEST:invalid",
  1047. __func__);
  1048. debug_cond(DEBUG_SETUP != 0,
  1049. "wLength = %d, setup returned\n",
  1050. usb_ctrl->wLength);
  1051. dwc2_udc_ep0_set_stall(ep);
  1052. dev->ep0state = WAIT_FOR_SETUP;
  1053. return;
  1054. } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST &&
  1055. usb_ctrl->wLength != 0) {
  1056. /* Bulk-Only *mass storge reset of class-specific request */
  1057. debug_cond(DEBUG_SETUP != 0,
  1058. "%s:BOT Rest:invalid wLength =%d, setup returned\n",
  1059. __func__, usb_ctrl->wLength);
  1060. dwc2_udc_ep0_set_stall(ep);
  1061. dev->ep0state = WAIT_FOR_SETUP;
  1062. return;
  1063. }
  1064. /* Set direction of EP0 */
  1065. if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) {
  1066. ep->bEndpointAddress |= USB_DIR_IN;
  1067. } else {
  1068. ep->bEndpointAddress &= ~USB_DIR_IN;
  1069. }
  1070. /* cope with automagic for some standard requests. */
  1071. dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK)
  1072. == USB_TYPE_STANDARD;
  1073. dev->req_pending = 1;
  1074. /* Handle some SETUP packets ourselves */
  1075. if (dev->req_std) {
  1076. switch (usb_ctrl->bRequest) {
  1077. case USB_REQ_SET_ADDRESS:
  1078. debug_cond(DEBUG_SETUP != 0,
  1079. "%s: *** USB_REQ_SET_ADDRESS (%d)\n",
  1080. __func__, usb_ctrl->wValue);
  1081. if (usb_ctrl->bRequestType
  1082. != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
  1083. break;
  1084. udc_set_address(dev, usb_ctrl->wValue);
  1085. return;
  1086. case USB_REQ_SET_CONFIGURATION:
  1087. debug_cond(DEBUG_SETUP != 0,
  1088. "=====================================\n");
  1089. debug_cond(DEBUG_SETUP != 0,
  1090. "%s: USB_REQ_SET_CONFIGURATION (%d)\n",
  1091. __func__, usb_ctrl->wValue);
  1092. if (usb_ctrl->bRequestType == USB_RECIP_DEVICE)
  1093. reset_available = 1;
  1094. break;
  1095. case USB_REQ_GET_DESCRIPTOR:
  1096. debug_cond(DEBUG_SETUP != 0,
  1097. "%s: *** USB_REQ_GET_DESCRIPTOR\n",
  1098. __func__);
  1099. break;
  1100. case USB_REQ_SET_INTERFACE:
  1101. debug_cond(DEBUG_SETUP != 0,
  1102. "%s: *** USB_REQ_SET_INTERFACE (%d)\n",
  1103. __func__, usb_ctrl->wValue);
  1104. if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE)
  1105. reset_available = 1;
  1106. break;
  1107. case USB_REQ_GET_CONFIGURATION:
  1108. debug_cond(DEBUG_SETUP != 0,
  1109. "%s: *** USB_REQ_GET_CONFIGURATION\n",
  1110. __func__);
  1111. break;
  1112. case USB_REQ_GET_STATUS:
  1113. if (!dwc2_udc_get_status(dev, usb_ctrl))
  1114. return;
  1115. break;
  1116. case USB_REQ_CLEAR_FEATURE:
  1117. ep_num = usb_ctrl->wIndex & 0x7f;
  1118. if (!dwc2_udc_clear_feature(&dev->ep[ep_num].ep))
  1119. return;
  1120. break;
  1121. case USB_REQ_SET_FEATURE:
  1122. ep_num = usb_ctrl->wIndex & 0x7f;
  1123. if (!dwc2_udc_set_feature(&dev->ep[ep_num].ep))
  1124. return;
  1125. break;
  1126. default:
  1127. debug_cond(DEBUG_SETUP != 0,
  1128. "%s: *** Default of usb_ctrl->bRequest=0x%x"
  1129. "happened.\n", __func__, usb_ctrl->bRequest);
  1130. break;
  1131. }
  1132. }
  1133. if (likely(dev->driver)) {
  1134. /* device-2-host (IN) or no data setup command,
  1135. * process immediately */
  1136. debug_cond(DEBUG_SETUP != 0,
  1137. "%s:usb_ctrlreq will be passed to fsg_setup()\n",
  1138. __func__);
  1139. spin_unlock(&dev->lock);
  1140. i = dev->driver->setup(&dev->gadget, usb_ctrl);
  1141. spin_lock(&dev->lock);
  1142. if (i < 0) {
  1143. /* setup processing failed, force stall */
  1144. dwc2_udc_ep0_set_stall(ep);
  1145. dev->ep0state = WAIT_FOR_SETUP;
  1146. debug_cond(DEBUG_SETUP != 0,
  1147. "\tdev->driver->setup failed (%d),"
  1148. " bRequest = %d\n",
  1149. i, usb_ctrl->bRequest);
  1150. } else if (dev->req_pending) {
  1151. dev->req_pending = 0;
  1152. debug_cond(DEBUG_SETUP != 0,
  1153. "\tdev->req_pending...\n");
  1154. }
  1155. debug_cond(DEBUG_SETUP != 0,
  1156. "\tep0state = %s\n", state_names[dev->ep0state]);
  1157. }
  1158. }
  1159. /*
  1160. * handle ep0 interrupt
  1161. */
  1162. static void dwc2_handle_ep0(struct dwc2_udc *dev)
  1163. {
  1164. if (dev->ep0state == WAIT_FOR_SETUP) {
  1165. debug_cond(DEBUG_OUT_EP != 0,
  1166. "%s: WAIT_FOR_SETUP\n", __func__);
  1167. dwc2_ep0_setup(dev);
  1168. } else {
  1169. debug_cond(DEBUG_OUT_EP != 0,
  1170. "%s: strange state!!(state = %s)\n",
  1171. __func__, state_names[dev->ep0state]);
  1172. }
  1173. }
  1174. static void dwc2_ep0_kick(struct dwc2_udc *dev, struct dwc2_ep *ep)
  1175. {
  1176. debug_cond(DEBUG_EP0 != 0,
  1177. "%s: ep_is_in = %d\n", __func__, ep_is_in(ep));
  1178. if (ep_is_in(ep)) {
  1179. dev->ep0state = DATA_STATE_XMIT;
  1180. dwc2_ep0_write(dev);
  1181. } else {
  1182. dev->ep0state = DATA_STATE_RECV;
  1183. dwc2_ep0_read(dev);
  1184. }
  1185. }