gr_udc.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  4. *
  5. * 2013 (c) Aeroflex Gaisler AB
  6. *
  7. * This driver supports GRUSBDC USB Device Controller cores available in the
  8. * GRLIB VHDL IP core library.
  9. *
  10. * Full documentation of the GRUSBDC core can be found here:
  11. * https://www.gaisler.com/products/grlib/grip.pdf
  12. *
  13. * Contributors:
  14. * - Andreas Larsson <andreas@gaisler.com>
  15. * - Marko Isomaki
  16. */
  17. /*
  18. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  19. * individually configurable to any of the four USB transfer types. This driver
  20. * only supports cores in DMA mode.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/slab.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/errno.h>
  28. #include <linux/list.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/device.h>
  31. #include <linux/usb.h>
  32. #include <linux/usb/ch9.h>
  33. #include <linux/usb/gadget.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/dmapool.h>
  36. #include <linux/debugfs.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/of.h>
  39. #include <asm/byteorder.h>
  40. #include "gr_udc.h"
  41. #define DRIVER_NAME "gr_udc"
  42. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  43. static const char driver_name[] = DRIVER_NAME;
  44. #define gr_read32(x) (ioread32be((x)))
  45. #define gr_write32(x, v) (iowrite32be((v), (x)))
  46. /* USB speed and corresponding string calculated from status register value */
  47. #define GR_SPEED(status) \
  48. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  49. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  50. /* Size of hardware buffer calculated from epctrl register value */
  51. #define GR_BUFFER_SIZE(epctrl) \
  52. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  53. GR_EPCTRL_BUFSZ_SCALER)
  54. /* ---------------------------------------------------------------------- */
  55. /* Debug printout functionality */
  56. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  57. static const char *gr_ep0state_string(enum gr_ep0state state)
  58. {
  59. static const char *const names[] = {
  60. [GR_EP0_DISCONNECT] = "disconnect",
  61. [GR_EP0_SETUP] = "setup",
  62. [GR_EP0_IDATA] = "idata",
  63. [GR_EP0_ODATA] = "odata",
  64. [GR_EP0_ISTATUS] = "istatus",
  65. [GR_EP0_OSTATUS] = "ostatus",
  66. [GR_EP0_STALL] = "stall",
  67. [GR_EP0_SUSPEND] = "suspend",
  68. };
  69. if (state < 0 || state >= ARRAY_SIZE(names))
  70. return "UNKNOWN";
  71. return names[state];
  72. }
  73. #ifdef VERBOSE_DEBUG
  74. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  75. struct gr_request *req)
  76. {
  77. int buflen = ep->is_in ? req->req.length : req->req.actual;
  78. int rowlen = 32;
  79. int plen = min(rowlen, buflen);
  80. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  81. (buflen > plen ? " (truncated)" : ""));
  82. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  83. rowlen, 4, req->req.buf, plen, false);
  84. }
  85. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  86. u16 value, u16 index, u16 length)
  87. {
  88. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  89. type, request, value, index, length);
  90. }
  91. #else /* !VERBOSE_DEBUG */
  92. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  93. struct gr_request *req) {}
  94. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  95. u16 value, u16 index, u16 length) {}
  96. #endif /* VERBOSE_DEBUG */
  97. /* ---------------------------------------------------------------------- */
  98. /* Debugfs functionality */
  99. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  100. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  101. {
  102. u32 epctrl = gr_read32(&ep->regs->epctrl);
  103. u32 epstat = gr_read32(&ep->regs->epstat);
  104. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  105. struct gr_request *req;
  106. seq_printf(seq, "%s:\n", ep->ep.name);
  107. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  108. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  109. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  110. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  111. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  112. seq_printf(seq, " stopped = %d\n", ep->stopped);
  113. seq_printf(seq, " wedged = %d\n", ep->wedged);
  114. seq_printf(seq, " callback = %d\n", ep->callback);
  115. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  116. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  117. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  118. if (mode == 1 || mode == 3)
  119. seq_printf(seq, " nt = %d\n",
  120. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  121. seq_printf(seq, " Buffer 0: %s %s%d\n",
  122. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  123. epstat & GR_EPSTAT_BS ? " " : "selected ",
  124. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  125. seq_printf(seq, " Buffer 1: %s %s%d\n",
  126. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  127. epstat & GR_EPSTAT_BS ? "selected " : " ",
  128. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  129. if (list_empty(&ep->queue)) {
  130. seq_puts(seq, " Queue: empty\n\n");
  131. return;
  132. }
  133. seq_puts(seq, " Queue:\n");
  134. list_for_each_entry(req, &ep->queue, queue) {
  135. struct gr_dma_desc *desc;
  136. struct gr_dma_desc *next;
  137. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  138. &req->req.buf, req->req.actual, req->req.length);
  139. next = req->first_desc;
  140. do {
  141. desc = next;
  142. next = desc->next_desc;
  143. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  144. desc == req->curr_desc ? 'c' : ' ',
  145. desc, desc->paddr, desc->ctrl, desc->data);
  146. } while (desc != req->last_desc);
  147. }
  148. seq_puts(seq, "\n");
  149. }
  150. static int gr_dfs_show(struct seq_file *seq, void *v)
  151. {
  152. struct gr_udc *dev = seq->private;
  153. u32 control = gr_read32(&dev->regs->control);
  154. u32 status = gr_read32(&dev->regs->status);
  155. struct gr_ep *ep;
  156. seq_printf(seq, "usb state = %s\n",
  157. usb_state_string(dev->gadget.state));
  158. seq_printf(seq, "address = %d\n",
  159. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  160. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  161. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  162. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  163. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  164. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  165. seq_puts(seq, "\n");
  166. list_for_each_entry(ep, &dev->ep_list, ep_list)
  167. gr_seq_ep_show(seq, ep);
  168. return 0;
  169. }
  170. DEFINE_SHOW_ATTRIBUTE(gr_dfs);
  171. static void gr_dfs_create(struct gr_udc *dev)
  172. {
  173. const char *name = "gr_udc_state";
  174. struct dentry *root;
  175. root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
  176. debugfs_create_file(name, 0444, root, dev, &gr_dfs_fops);
  177. }
  178. static void gr_dfs_delete(struct gr_udc *dev)
  179. {
  180. debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
  181. }
  182. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  183. static void gr_dfs_create(struct gr_udc *dev) {}
  184. static void gr_dfs_delete(struct gr_udc *dev) {}
  185. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  186. /* ---------------------------------------------------------------------- */
  187. /* DMA and request handling */
  188. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  189. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  190. {
  191. dma_addr_t paddr;
  192. struct gr_dma_desc *dma_desc;
  193. dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
  194. if (!dma_desc) {
  195. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  196. return NULL;
  197. }
  198. dma_desc->paddr = paddr;
  199. return dma_desc;
  200. }
  201. static inline void gr_free_dma_desc(struct gr_udc *dev,
  202. struct gr_dma_desc *desc)
  203. {
  204. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  205. }
  206. /* Frees the chain of struct gr_dma_desc for the given request */
  207. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  208. {
  209. struct gr_dma_desc *desc;
  210. struct gr_dma_desc *next;
  211. next = req->first_desc;
  212. if (!next)
  213. return;
  214. do {
  215. desc = next;
  216. next = desc->next_desc;
  217. gr_free_dma_desc(dev, desc);
  218. } while (desc != req->last_desc);
  219. req->first_desc = NULL;
  220. req->curr_desc = NULL;
  221. req->last_desc = NULL;
  222. }
  223. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  224. /*
  225. * Frees allocated resources and calls the appropriate completion function/setup
  226. * package handler for a finished request.
  227. *
  228. * Must be called with dev->lock held and irqs disabled.
  229. */
  230. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  231. int status)
  232. __releases(&dev->lock)
  233. __acquires(&dev->lock)
  234. {
  235. struct gr_udc *dev;
  236. list_del_init(&req->queue);
  237. if (likely(req->req.status == -EINPROGRESS))
  238. req->req.status = status;
  239. else
  240. status = req->req.status;
  241. dev = ep->dev;
  242. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  243. gr_free_dma_desc_chain(dev, req);
  244. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  245. req->req.actual = req->req.length;
  246. } else if (req->oddlen && req->req.actual > req->evenlen) {
  247. /*
  248. * Copy to user buffer in this case where length was not evenly
  249. * divisible by ep->ep.maxpacket and the last descriptor was
  250. * actually used.
  251. */
  252. char *buftail = ((char *)req->req.buf + req->evenlen);
  253. memcpy(buftail, ep->tailbuf, req->oddlen);
  254. if (req->req.actual > req->req.length) {
  255. /* We got more data than was requested */
  256. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  257. ep->ep.name);
  258. gr_dbgprint_request("OVFL", ep, req);
  259. req->req.status = -EOVERFLOW;
  260. }
  261. }
  262. if (!status) {
  263. if (ep->is_in)
  264. gr_dbgprint_request("SENT", ep, req);
  265. else
  266. gr_dbgprint_request("RECV", ep, req);
  267. }
  268. /* Prevent changes to ep->queue during callback */
  269. ep->callback = 1;
  270. if (req == dev->ep0reqo && !status) {
  271. if (req->setup)
  272. gr_ep0_setup(dev, req);
  273. else
  274. dev_err(dev->dev,
  275. "Unexpected non setup packet on ep0in\n");
  276. } else if (req->req.complete) {
  277. spin_unlock(&dev->lock);
  278. usb_gadget_giveback_request(&ep->ep, &req->req);
  279. spin_lock(&dev->lock);
  280. }
  281. ep->callback = 0;
  282. }
  283. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  284. {
  285. struct gr_request *req;
  286. req = kzalloc(sizeof(*req), gfp_flags);
  287. if (!req)
  288. return NULL;
  289. INIT_LIST_HEAD(&req->queue);
  290. return &req->req;
  291. }
  292. /*
  293. * Starts DMA for endpoint ep if there are requests in the queue.
  294. *
  295. * Must be called with dev->lock held and with !ep->stopped.
  296. */
  297. static void gr_start_dma(struct gr_ep *ep)
  298. {
  299. struct gr_request *req;
  300. u32 dmactrl;
  301. if (list_empty(&ep->queue)) {
  302. ep->dma_start = 0;
  303. return;
  304. }
  305. req = list_first_entry(&ep->queue, struct gr_request, queue);
  306. /* A descriptor should already have been allocated */
  307. BUG_ON(!req->curr_desc);
  308. /*
  309. * The DMA controller can not handle smaller OUT buffers than
  310. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  311. * long packet are received. Therefore an internal bounce buffer gets
  312. * used when such a request gets enabled.
  313. */
  314. if (!ep->is_in && req->oddlen)
  315. req->last_desc->data = ep->tailbuf_paddr;
  316. wmb(); /* Make sure all is settled before handing it over to DMA */
  317. /* Set the descriptor pointer in the hardware */
  318. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  319. /* Announce available descriptors */
  320. dmactrl = gr_read32(&ep->regs->dmactrl);
  321. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  322. ep->dma_start = 1;
  323. }
  324. /*
  325. * Finishes the first request in the ep's queue and, if available, starts the
  326. * next request in queue.
  327. *
  328. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  329. */
  330. static void gr_dma_advance(struct gr_ep *ep, int status)
  331. {
  332. struct gr_request *req;
  333. req = list_first_entry(&ep->queue, struct gr_request, queue);
  334. gr_finish_request(ep, req, status);
  335. gr_start_dma(ep); /* Regardless of ep->dma_start */
  336. }
  337. /*
  338. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  339. * transfer to be canceled and clears GR_DMACTRL_DA.
  340. *
  341. * Must be called with dev->lock held.
  342. */
  343. static void gr_abort_dma(struct gr_ep *ep)
  344. {
  345. u32 dmactrl;
  346. dmactrl = gr_read32(&ep->regs->dmactrl);
  347. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  348. }
  349. /*
  350. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  351. * chain.
  352. *
  353. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  354. * smaller buffer than MAXPL in the OUT direction.
  355. */
  356. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  357. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  358. {
  359. struct gr_dma_desc *desc;
  360. desc = gr_alloc_dma_desc(ep, gfp_flags);
  361. if (!desc)
  362. return -ENOMEM;
  363. desc->data = data;
  364. if (ep->is_in)
  365. desc->ctrl =
  366. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  367. else
  368. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  369. if (!req->first_desc) {
  370. req->first_desc = desc;
  371. req->curr_desc = desc;
  372. } else {
  373. req->last_desc->next_desc = desc;
  374. req->last_desc->next = desc->paddr;
  375. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  376. }
  377. req->last_desc = desc;
  378. return 0;
  379. }
  380. /*
  381. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  382. * together covers req->req.length bytes of the buffer at DMA address
  383. * req->req.dma for the OUT direction.
  384. *
  385. * The first descriptor in the chain is enabled, the rest disabled. The
  386. * interrupt handler will later enable them one by one when needed so we can
  387. * find out when the transfer is finished. For OUT endpoints, all descriptors
  388. * therefore generate interrutps.
  389. */
  390. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  391. gfp_t gfp_flags)
  392. {
  393. u16 bytes_left; /* Bytes left to provide descriptors for */
  394. u16 bytes_used; /* Bytes accommodated for */
  395. int ret = 0;
  396. req->first_desc = NULL; /* Signals that no allocation is done yet */
  397. bytes_left = req->req.length;
  398. bytes_used = 0;
  399. while (bytes_left > 0) {
  400. dma_addr_t start = req->req.dma + bytes_used;
  401. u16 size = min(bytes_left, ep->bytes_per_buffer);
  402. if (size < ep->bytes_per_buffer) {
  403. /* Prepare using bounce buffer */
  404. req->evenlen = req->req.length - bytes_left;
  405. req->oddlen = size;
  406. }
  407. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  408. if (ret)
  409. goto alloc_err;
  410. bytes_left -= size;
  411. bytes_used += size;
  412. }
  413. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  414. return 0;
  415. alloc_err:
  416. gr_free_dma_desc_chain(ep->dev, req);
  417. return ret;
  418. }
  419. /*
  420. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  421. * together covers req->req.length bytes of the buffer at DMA address
  422. * req->req.dma for the IN direction.
  423. *
  424. * When more data is provided than the maximum payload size, the hardware splits
  425. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  426. * is always set to a multiple of the maximum payload (restricted to the valid
  427. * number of maximum payloads during high bandwidth isochronous or interrupt
  428. * transfers)
  429. *
  430. * All descriptors are enabled from the beginning and we only generate an
  431. * interrupt for the last one indicating that the entire request has been pushed
  432. * to hardware.
  433. */
  434. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  435. gfp_t gfp_flags)
  436. {
  437. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  438. u16 bytes_used; /* Bytes in req accommodated for */
  439. int ret = 0;
  440. req->first_desc = NULL; /* Signals that no allocation is done yet */
  441. bytes_left = req->req.length;
  442. bytes_used = 0;
  443. do { /* Allow for zero length packets */
  444. dma_addr_t start = req->req.dma + bytes_used;
  445. u16 size = min(bytes_left, ep->bytes_per_buffer);
  446. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  447. if (ret)
  448. goto alloc_err;
  449. bytes_left -= size;
  450. bytes_used += size;
  451. } while (bytes_left > 0);
  452. /*
  453. * Send an extra zero length packet to indicate that no more data is
  454. * available when req->req.zero is set and the data length is even
  455. * multiples of ep->ep.maxpacket.
  456. */
  457. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  458. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  459. if (ret)
  460. goto alloc_err;
  461. }
  462. /*
  463. * For IN packets we only want to know when the last packet has been
  464. * transmitted (not just put into internal buffers).
  465. */
  466. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  467. return 0;
  468. alloc_err:
  469. gr_free_dma_desc_chain(ep->dev, req);
  470. return ret;
  471. }
  472. /* Must be called with dev->lock held */
  473. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  474. {
  475. struct gr_udc *dev = ep->dev;
  476. int ret;
  477. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  478. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  479. return -EINVAL;
  480. }
  481. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  482. dev_err(dev->dev,
  483. "Invalid request for %s: buf=%p list_empty=%d\n",
  484. ep->ep.name, req->req.buf, list_empty(&req->queue));
  485. return -EINVAL;
  486. }
  487. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  488. dev_err(dev->dev, "-ESHUTDOWN");
  489. return -ESHUTDOWN;
  490. }
  491. /* Can't touch registers when suspended */
  492. if (dev->ep0state == GR_EP0_SUSPEND) {
  493. dev_err(dev->dev, "-EBUSY");
  494. return -EBUSY;
  495. }
  496. /* Set up DMA mapping in case the caller didn't */
  497. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  498. if (ret) {
  499. dev_err(dev->dev, "usb_gadget_map_request");
  500. return ret;
  501. }
  502. if (ep->is_in)
  503. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  504. else
  505. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  506. if (ret)
  507. return ret;
  508. req->req.status = -EINPROGRESS;
  509. req->req.actual = 0;
  510. list_add_tail(&req->queue, &ep->queue);
  511. /* Start DMA if not started, otherwise interrupt handler handles it */
  512. if (!ep->dma_start && likely(!ep->stopped))
  513. gr_start_dma(ep);
  514. return 0;
  515. }
  516. /*
  517. * Queue a request from within the driver.
  518. *
  519. * Must be called with dev->lock held.
  520. */
  521. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  522. gfp_t gfp_flags)
  523. {
  524. if (ep->is_in)
  525. gr_dbgprint_request("RESP", ep, req);
  526. return gr_queue(ep, req, gfp_flags);
  527. }
  528. /* ---------------------------------------------------------------------- */
  529. /* General helper functions */
  530. /*
  531. * Dequeue ALL requests.
  532. *
  533. * Must be called with dev->lock held and irqs disabled.
  534. */
  535. static void gr_ep_nuke(struct gr_ep *ep)
  536. {
  537. struct gr_request *req;
  538. ep->stopped = 1;
  539. ep->dma_start = 0;
  540. gr_abort_dma(ep);
  541. while (!list_empty(&ep->queue)) {
  542. req = list_first_entry(&ep->queue, struct gr_request, queue);
  543. gr_finish_request(ep, req, -ESHUTDOWN);
  544. }
  545. }
  546. /*
  547. * Reset the hardware state of this endpoint.
  548. *
  549. * Must be called with dev->lock held.
  550. */
  551. static void gr_ep_reset(struct gr_ep *ep)
  552. {
  553. gr_write32(&ep->regs->epctrl, 0);
  554. gr_write32(&ep->regs->dmactrl, 0);
  555. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  556. ep->ep.desc = NULL;
  557. ep->stopped = 1;
  558. ep->dma_start = 0;
  559. }
  560. /*
  561. * Generate STALL on ep0in/out.
  562. *
  563. * Must be called with dev->lock held.
  564. */
  565. static void gr_control_stall(struct gr_udc *dev)
  566. {
  567. u32 epctrl;
  568. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  569. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  570. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  571. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  572. dev->ep0state = GR_EP0_STALL;
  573. }
  574. /*
  575. * Halts, halts and wedges, or clears halt for an endpoint.
  576. *
  577. * Must be called with dev->lock held.
  578. */
  579. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  580. {
  581. u32 epctrl;
  582. int retval = 0;
  583. if (ep->num && !ep->ep.desc)
  584. return -EINVAL;
  585. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  586. return -EOPNOTSUPP;
  587. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  588. if (!ep->num) {
  589. if (halt && !fromhost) {
  590. /* ep0 halt from gadget - generate protocol stall */
  591. gr_control_stall(ep->dev);
  592. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  593. return 0;
  594. }
  595. return -EINVAL;
  596. }
  597. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  598. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  599. epctrl = gr_read32(&ep->regs->epctrl);
  600. if (halt) {
  601. /* Set HALT */
  602. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  603. ep->stopped = 1;
  604. if (wedge)
  605. ep->wedged = 1;
  606. } else {
  607. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  608. ep->stopped = 0;
  609. ep->wedged = 0;
  610. /* Things might have been queued up in the meantime */
  611. if (!ep->dma_start)
  612. gr_start_dma(ep);
  613. }
  614. return retval;
  615. }
  616. /* Must be called with dev->lock held */
  617. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  618. {
  619. if (dev->ep0state != value)
  620. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  621. gr_ep0state_string(value));
  622. dev->ep0state = value;
  623. }
  624. /*
  625. * Should only be called when endpoints can not generate interrupts.
  626. *
  627. * Must be called with dev->lock held.
  628. */
  629. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  630. {
  631. gr_write32(&dev->regs->control, 0);
  632. wmb(); /* Make sure that we do not deny one of our interrupts */
  633. dev->irq_enabled = 0;
  634. }
  635. /*
  636. * Stop all device activity and disable data line pullup.
  637. *
  638. * Must be called with dev->lock held and irqs disabled.
  639. */
  640. static void gr_stop_activity(struct gr_udc *dev)
  641. {
  642. struct gr_ep *ep;
  643. list_for_each_entry(ep, &dev->ep_list, ep_list)
  644. gr_ep_nuke(ep);
  645. gr_disable_interrupts_and_pullup(dev);
  646. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  647. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  648. }
  649. /* ---------------------------------------------------------------------- */
  650. /* ep0 setup packet handling */
  651. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  652. struct usb_request *_req)
  653. {
  654. struct gr_ep *ep;
  655. struct gr_udc *dev;
  656. u32 control;
  657. ep = container_of(_ep, struct gr_ep, ep);
  658. dev = ep->dev;
  659. spin_lock(&dev->lock);
  660. control = gr_read32(&dev->regs->control);
  661. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  662. gr_write32(&dev->regs->control, control);
  663. spin_unlock(&dev->lock);
  664. }
  665. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  666. {
  667. /* Nothing needs to be done here */
  668. }
  669. /*
  670. * Queue a response on ep0in.
  671. *
  672. * Must be called with dev->lock held.
  673. */
  674. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  675. void (*complete)(struct usb_ep *ep,
  676. struct usb_request *req))
  677. {
  678. u8 *reqbuf = dev->ep0reqi->req.buf;
  679. int status;
  680. int i;
  681. for (i = 0; i < length; i++)
  682. reqbuf[i] = buf[i];
  683. dev->ep0reqi->req.length = length;
  684. dev->ep0reqi->req.complete = complete;
  685. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  686. if (status < 0)
  687. dev_err(dev->dev,
  688. "Could not queue ep0in setup response: %d\n", status);
  689. return status;
  690. }
  691. /*
  692. * Queue a 2 byte response on ep0in.
  693. *
  694. * Must be called with dev->lock held.
  695. */
  696. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  697. {
  698. __le16 le_response = cpu_to_le16(response);
  699. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  700. gr_ep0_dummy_complete);
  701. }
  702. /*
  703. * Queue a ZLP response on ep0in.
  704. *
  705. * Must be called with dev->lock held.
  706. */
  707. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  708. {
  709. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  710. }
  711. /*
  712. * This is run when a SET_ADDRESS request is received. First writes
  713. * the new address to the control register which is updated internally
  714. * when the next IN packet is ACKED.
  715. *
  716. * Must be called with dev->lock held.
  717. */
  718. static void gr_set_address(struct gr_udc *dev, u8 address)
  719. {
  720. u32 control;
  721. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  722. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  723. control |= GR_CONTROL_SU;
  724. gr_write32(&dev->regs->control, control);
  725. }
  726. /*
  727. * Returns negative for STALL, 0 for successful handling and positive for
  728. * delegation.
  729. *
  730. * Must be called with dev->lock held.
  731. */
  732. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  733. u16 value, u16 index)
  734. {
  735. u16 response;
  736. u8 test;
  737. switch (request) {
  738. case USB_REQ_SET_ADDRESS:
  739. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  740. gr_set_address(dev, value & 0xff);
  741. if (value)
  742. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  743. else
  744. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  745. return gr_ep0_respond_empty(dev);
  746. case USB_REQ_GET_STATUS:
  747. /* Self powered | remote wakeup */
  748. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  749. return gr_ep0_respond_u16(dev, response);
  750. case USB_REQ_SET_FEATURE:
  751. switch (value) {
  752. case USB_DEVICE_REMOTE_WAKEUP:
  753. /* Allow remote wakeup */
  754. dev->remote_wakeup = 1;
  755. return gr_ep0_respond_empty(dev);
  756. case USB_DEVICE_TEST_MODE:
  757. /* The hardware does not support USB_TEST_FORCE_ENABLE */
  758. test = index >> 8;
  759. if (test >= USB_TEST_J && test <= USB_TEST_PACKET) {
  760. dev->test_mode = test;
  761. return gr_ep0_respond(dev, NULL, 0,
  762. gr_ep0_testmode_complete);
  763. }
  764. }
  765. break;
  766. case USB_REQ_CLEAR_FEATURE:
  767. switch (value) {
  768. case USB_DEVICE_REMOTE_WAKEUP:
  769. /* Disallow remote wakeup */
  770. dev->remote_wakeup = 0;
  771. return gr_ep0_respond_empty(dev);
  772. }
  773. break;
  774. }
  775. return 1; /* Delegate the rest */
  776. }
  777. /*
  778. * Returns negative for STALL, 0 for successful handling and positive for
  779. * delegation.
  780. *
  781. * Must be called with dev->lock held.
  782. */
  783. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  784. u16 value, u16 index)
  785. {
  786. if (dev->gadget.state != USB_STATE_CONFIGURED)
  787. return -1;
  788. /*
  789. * Should return STALL for invalid interfaces, but udc driver does not
  790. * know anything about that. However, many gadget drivers do not handle
  791. * GET_STATUS so we need to take care of that.
  792. */
  793. switch (request) {
  794. case USB_REQ_GET_STATUS:
  795. return gr_ep0_respond_u16(dev, 0x0000);
  796. case USB_REQ_SET_FEATURE:
  797. case USB_REQ_CLEAR_FEATURE:
  798. /*
  799. * No possible valid standard requests. Still let gadget drivers
  800. * have a go at it.
  801. */
  802. break;
  803. }
  804. return 1; /* Delegate the rest */
  805. }
  806. /*
  807. * Returns negative for STALL, 0 for successful handling and positive for
  808. * delegation.
  809. *
  810. * Must be called with dev->lock held.
  811. */
  812. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  813. u16 value, u16 index)
  814. {
  815. struct gr_ep *ep;
  816. int status;
  817. int halted;
  818. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  819. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  820. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  821. return -1;
  822. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  823. return -1;
  824. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  825. switch (request) {
  826. case USB_REQ_GET_STATUS:
  827. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  828. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  829. case USB_REQ_SET_FEATURE:
  830. switch (value) {
  831. case USB_ENDPOINT_HALT:
  832. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  833. if (status >= 0)
  834. status = gr_ep0_respond_empty(dev);
  835. return status;
  836. }
  837. break;
  838. case USB_REQ_CLEAR_FEATURE:
  839. switch (value) {
  840. case USB_ENDPOINT_HALT:
  841. if (ep->wedged)
  842. return -1;
  843. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  844. if (status >= 0)
  845. status = gr_ep0_respond_empty(dev);
  846. return status;
  847. }
  848. break;
  849. }
  850. return 1; /* Delegate the rest */
  851. }
  852. /* Must be called with dev->lock held */
  853. static void gr_ep0out_requeue(struct gr_udc *dev)
  854. {
  855. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  856. if (ret)
  857. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  858. ret);
  859. }
  860. /*
  861. * The main function dealing with setup requests on ep0.
  862. *
  863. * Must be called with dev->lock held and irqs disabled
  864. */
  865. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  866. __releases(&dev->lock)
  867. __acquires(&dev->lock)
  868. {
  869. union {
  870. struct usb_ctrlrequest ctrl;
  871. u8 raw[8];
  872. u32 word[2];
  873. } u;
  874. u8 type;
  875. u8 request;
  876. u16 value;
  877. u16 index;
  878. u16 length;
  879. int i;
  880. int status;
  881. /* Restore from ep0 halt */
  882. if (dev->ep0state == GR_EP0_STALL) {
  883. gr_set_ep0state(dev, GR_EP0_SETUP);
  884. if (!req->req.actual)
  885. goto out;
  886. }
  887. if (dev->ep0state == GR_EP0_ISTATUS) {
  888. gr_set_ep0state(dev, GR_EP0_SETUP);
  889. if (req->req.actual > 0)
  890. dev_dbg(dev->dev,
  891. "Unexpected setup packet at state %s\n",
  892. gr_ep0state_string(GR_EP0_ISTATUS));
  893. else
  894. goto out; /* Got expected ZLP */
  895. } else if (dev->ep0state != GR_EP0_SETUP) {
  896. dev_info(dev->dev,
  897. "Unexpected ep0out request at state %s - stalling\n",
  898. gr_ep0state_string(dev->ep0state));
  899. gr_control_stall(dev);
  900. gr_set_ep0state(dev, GR_EP0_SETUP);
  901. goto out;
  902. } else if (!req->req.actual) {
  903. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  904. gr_ep0state_string(dev->ep0state));
  905. goto out;
  906. }
  907. /* Handle SETUP packet */
  908. for (i = 0; i < req->req.actual; i++)
  909. u.raw[i] = ((u8 *)req->req.buf)[i];
  910. type = u.ctrl.bRequestType;
  911. request = u.ctrl.bRequest;
  912. value = le16_to_cpu(u.ctrl.wValue);
  913. index = le16_to_cpu(u.ctrl.wIndex);
  914. length = le16_to_cpu(u.ctrl.wLength);
  915. gr_dbgprint_devreq(dev, type, request, value, index, length);
  916. /* Check for data stage */
  917. if (length) {
  918. if (type & USB_DIR_IN)
  919. gr_set_ep0state(dev, GR_EP0_IDATA);
  920. else
  921. gr_set_ep0state(dev, GR_EP0_ODATA);
  922. }
  923. status = 1; /* Positive status flags delegation */
  924. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  925. switch (type & USB_RECIP_MASK) {
  926. case USB_RECIP_DEVICE:
  927. status = gr_device_request(dev, type, request,
  928. value, index);
  929. break;
  930. case USB_RECIP_ENDPOINT:
  931. status = gr_endpoint_request(dev, type, request,
  932. value, index);
  933. break;
  934. case USB_RECIP_INTERFACE:
  935. status = gr_interface_request(dev, type, request,
  936. value, index);
  937. break;
  938. }
  939. }
  940. if (status > 0) {
  941. spin_unlock(&dev->lock);
  942. dev_vdbg(dev->dev, "DELEGATE\n");
  943. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  944. spin_lock(&dev->lock);
  945. }
  946. /* Generate STALL on both ep0out and ep0in if requested */
  947. if (unlikely(status < 0)) {
  948. dev_vdbg(dev->dev, "STALL\n");
  949. gr_control_stall(dev);
  950. }
  951. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  952. request == USB_REQ_SET_CONFIGURATION) {
  953. if (!value) {
  954. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  955. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  956. } else if (status >= 0) {
  957. /* Not configured unless gadget OK:s it */
  958. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  959. usb_gadget_set_state(&dev->gadget,
  960. USB_STATE_CONFIGURED);
  961. }
  962. }
  963. /* Get ready for next stage */
  964. if (dev->ep0state == GR_EP0_ODATA)
  965. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  966. else if (dev->ep0state == GR_EP0_IDATA)
  967. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  968. else
  969. gr_set_ep0state(dev, GR_EP0_SETUP);
  970. out:
  971. gr_ep0out_requeue(dev);
  972. }
  973. /* ---------------------------------------------------------------------- */
  974. /* VBUS and USB reset handling */
  975. /* Must be called with dev->lock held and irqs disabled */
  976. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  977. {
  978. u32 control;
  979. dev->gadget.speed = GR_SPEED(status);
  980. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  981. /* Turn on full interrupts and pullup */
  982. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  983. GR_CONTROL_SP | GR_CONTROL_EP);
  984. gr_write32(&dev->regs->control, control);
  985. }
  986. /* Must be called with dev->lock held */
  987. static void gr_enable_vbus_detect(struct gr_udc *dev)
  988. {
  989. u32 status;
  990. dev->irq_enabled = 1;
  991. wmb(); /* Make sure we do not ignore an interrupt */
  992. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  993. /* Take care of the case we are already plugged in at this point */
  994. status = gr_read32(&dev->regs->status);
  995. if (status & GR_STATUS_VB)
  996. gr_vbus_connected(dev, status);
  997. }
  998. /* Must be called with dev->lock held and irqs disabled */
  999. static void gr_vbus_disconnected(struct gr_udc *dev)
  1000. {
  1001. gr_stop_activity(dev);
  1002. /* Report disconnect */
  1003. if (dev->driver && dev->driver->disconnect) {
  1004. spin_unlock(&dev->lock);
  1005. dev->driver->disconnect(&dev->gadget);
  1006. spin_lock(&dev->lock);
  1007. }
  1008. gr_enable_vbus_detect(dev);
  1009. }
  1010. /* Must be called with dev->lock held and irqs disabled */
  1011. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1012. {
  1013. gr_set_address(dev, 0);
  1014. gr_set_ep0state(dev, GR_EP0_SETUP);
  1015. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1016. dev->gadget.speed = GR_SPEED(status);
  1017. gr_ep_nuke(&dev->epo[0]);
  1018. gr_ep_nuke(&dev->epi[0]);
  1019. dev->epo[0].stopped = 0;
  1020. dev->epi[0].stopped = 0;
  1021. gr_ep0out_requeue(dev);
  1022. }
  1023. /* ---------------------------------------------------------------------- */
  1024. /* Irq handling */
  1025. /*
  1026. * Handles interrupts from in endpoints. Returns whether something was handled.
  1027. *
  1028. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1029. */
  1030. static int gr_handle_in_ep(struct gr_ep *ep)
  1031. {
  1032. struct gr_request *req;
  1033. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1034. if (!req->last_desc)
  1035. return 0;
  1036. if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1037. return 0; /* Not put in hardware buffers yet */
  1038. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1039. return 0; /* Not transmitted yet, still in hardware buffers */
  1040. /* Write complete */
  1041. gr_dma_advance(ep, 0);
  1042. return 1;
  1043. }
  1044. /*
  1045. * Handles interrupts from out endpoints. Returns whether something was handled.
  1046. *
  1047. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1048. */
  1049. static int gr_handle_out_ep(struct gr_ep *ep)
  1050. {
  1051. u32 ep_dmactrl;
  1052. u32 ctrl;
  1053. u16 len;
  1054. struct gr_request *req;
  1055. struct gr_udc *dev = ep->dev;
  1056. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1057. if (!req->curr_desc)
  1058. return 0;
  1059. ctrl = READ_ONCE(req->curr_desc->ctrl);
  1060. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1061. return 0; /* Not received yet */
  1062. /* Read complete */
  1063. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1064. req->req.actual += len;
  1065. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1066. req->setup = 1;
  1067. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1068. /* Short packet or >= expected size - we are done */
  1069. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1070. /*
  1071. * Send a status stage ZLP to ack the DATA stage in the
  1072. * OUT direction. This needs to be done before
  1073. * gr_dma_advance as that can lead to a call to
  1074. * ep0_setup that can change dev->ep0state.
  1075. */
  1076. gr_ep0_respond_empty(dev);
  1077. gr_set_ep0state(dev, GR_EP0_SETUP);
  1078. }
  1079. gr_dma_advance(ep, 0);
  1080. } else {
  1081. /* Not done yet. Enable the next descriptor to receive more. */
  1082. req->curr_desc = req->curr_desc->next_desc;
  1083. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1084. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1085. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1086. }
  1087. return 1;
  1088. }
  1089. /*
  1090. * Handle state changes. Returns whether something was handled.
  1091. *
  1092. * Must be called with dev->lock held and irqs disabled.
  1093. */
  1094. static int gr_handle_state_changes(struct gr_udc *dev)
  1095. {
  1096. u32 status = gr_read32(&dev->regs->status);
  1097. int handled = 0;
  1098. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1099. dev->gadget.state == USB_STATE_ATTACHED);
  1100. /* VBUS valid detected */
  1101. if (!powstate && (status & GR_STATUS_VB)) {
  1102. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1103. gr_vbus_connected(dev, status);
  1104. handled = 1;
  1105. }
  1106. /* Disconnect */
  1107. if (powstate && !(status & GR_STATUS_VB)) {
  1108. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1109. gr_vbus_disconnected(dev);
  1110. handled = 1;
  1111. }
  1112. /* USB reset detected */
  1113. if (status & GR_STATUS_UR) {
  1114. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1115. GR_SPEED_STR(status));
  1116. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1117. gr_udc_usbreset(dev, status);
  1118. handled = 1;
  1119. }
  1120. /* Speed change */
  1121. if (dev->gadget.speed != GR_SPEED(status)) {
  1122. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1123. GR_SPEED_STR(status));
  1124. dev->gadget.speed = GR_SPEED(status);
  1125. handled = 1;
  1126. }
  1127. /* Going into suspend */
  1128. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1129. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1130. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1131. dev->suspended_from = dev->gadget.state;
  1132. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1133. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1134. dev->driver && dev->driver->suspend) {
  1135. spin_unlock(&dev->lock);
  1136. dev->driver->suspend(&dev->gadget);
  1137. spin_lock(&dev->lock);
  1138. }
  1139. handled = 1;
  1140. }
  1141. /* Coming out of suspend */
  1142. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1143. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1144. if (dev->suspended_from == USB_STATE_POWERED)
  1145. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1146. else
  1147. gr_set_ep0state(dev, GR_EP0_SETUP);
  1148. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1149. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1150. dev->driver && dev->driver->resume) {
  1151. spin_unlock(&dev->lock);
  1152. dev->driver->resume(&dev->gadget);
  1153. spin_lock(&dev->lock);
  1154. }
  1155. handled = 1;
  1156. }
  1157. return handled;
  1158. }
  1159. /* Non-interrupt context irq handler */
  1160. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1161. {
  1162. struct gr_udc *dev = _dev;
  1163. struct gr_ep *ep;
  1164. int handled = 0;
  1165. int i;
  1166. unsigned long flags;
  1167. spin_lock_irqsave(&dev->lock, flags);
  1168. if (!dev->irq_enabled)
  1169. goto out;
  1170. /*
  1171. * Check IN ep interrupts. We check these before the OUT eps because
  1172. * some gadgets reuse the request that might already be currently
  1173. * outstanding and needs to be completed (mainly setup requests).
  1174. */
  1175. for (i = 0; i < dev->nepi; i++) {
  1176. ep = &dev->epi[i];
  1177. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1178. handled = gr_handle_in_ep(ep) || handled;
  1179. }
  1180. /* Check OUT ep interrupts */
  1181. for (i = 0; i < dev->nepo; i++) {
  1182. ep = &dev->epo[i];
  1183. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1184. handled = gr_handle_out_ep(ep) || handled;
  1185. }
  1186. /* Check status interrupts */
  1187. handled = gr_handle_state_changes(dev) || handled;
  1188. /*
  1189. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1190. * handle because this shouldn't happen if we did everything right.
  1191. */
  1192. if (!handled) {
  1193. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1194. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1195. dev_err(dev->dev,
  1196. "AMBA Error occurred for %s\n",
  1197. ep->ep.name);
  1198. handled = 1;
  1199. }
  1200. }
  1201. }
  1202. out:
  1203. spin_unlock_irqrestore(&dev->lock, flags);
  1204. return handled ? IRQ_HANDLED : IRQ_NONE;
  1205. }
  1206. /* Interrupt context irq handler */
  1207. static irqreturn_t gr_irq(int irq, void *_dev)
  1208. {
  1209. struct gr_udc *dev = _dev;
  1210. if (!dev->irq_enabled)
  1211. return IRQ_NONE;
  1212. return IRQ_WAKE_THREAD;
  1213. }
  1214. /* ---------------------------------------------------------------------- */
  1215. /* USB ep ops */
  1216. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1217. static int gr_ep_enable(struct usb_ep *_ep,
  1218. const struct usb_endpoint_descriptor *desc)
  1219. {
  1220. struct gr_udc *dev;
  1221. struct gr_ep *ep;
  1222. u8 mode;
  1223. u8 nt;
  1224. u16 max;
  1225. u16 buffer_size = 0;
  1226. u32 epctrl;
  1227. ep = container_of(_ep, struct gr_ep, ep);
  1228. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1229. return -EINVAL;
  1230. dev = ep->dev;
  1231. /* 'ep0' IN and OUT are reserved */
  1232. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1233. return -EINVAL;
  1234. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1235. return -ESHUTDOWN;
  1236. /* Make sure we are clear for enabling */
  1237. epctrl = gr_read32(&ep->regs->epctrl);
  1238. if (epctrl & GR_EPCTRL_EV)
  1239. return -EBUSY;
  1240. /* Check that directions match */
  1241. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1242. return -EINVAL;
  1243. /* Check ep num */
  1244. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1245. (ep->is_in && ep->num >= dev->nepi))
  1246. return -EINVAL;
  1247. if (usb_endpoint_xfer_control(desc)) {
  1248. mode = 0;
  1249. } else if (usb_endpoint_xfer_isoc(desc)) {
  1250. mode = 1;
  1251. } else if (usb_endpoint_xfer_bulk(desc)) {
  1252. mode = 2;
  1253. } else if (usb_endpoint_xfer_int(desc)) {
  1254. mode = 3;
  1255. } else {
  1256. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1257. ep->ep.name);
  1258. return -EINVAL;
  1259. }
  1260. /*
  1261. * Bits 10-0 set the max payload. 12-11 set the number of
  1262. * additional transactions.
  1263. */
  1264. max = usb_endpoint_maxp(desc);
  1265. nt = usb_endpoint_maxp_mult(desc) - 1;
  1266. buffer_size = GR_BUFFER_SIZE(epctrl);
  1267. if (nt && (mode == 0 || mode == 2)) {
  1268. dev_err(dev->dev,
  1269. "%s mode: multiple trans./microframe not valid\n",
  1270. (mode == 2 ? "Bulk" : "Control"));
  1271. return -EINVAL;
  1272. } else if (nt == 0x3) {
  1273. dev_err(dev->dev,
  1274. "Invalid value 0x3 for additional trans./microframe\n");
  1275. return -EINVAL;
  1276. } else if ((nt + 1) * max > buffer_size) {
  1277. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1278. buffer_size, (nt + 1), max);
  1279. return -EINVAL;
  1280. } else if (max == 0) {
  1281. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1282. return -EINVAL;
  1283. } else if (max > ep->ep.maxpacket_limit) {
  1284. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1285. max, ep->ep.maxpacket_limit);
  1286. return -EINVAL;
  1287. }
  1288. spin_lock(&ep->dev->lock);
  1289. if (!ep->stopped) {
  1290. spin_unlock(&ep->dev->lock);
  1291. return -EBUSY;
  1292. }
  1293. ep->stopped = 0;
  1294. ep->wedged = 0;
  1295. ep->ep.desc = desc;
  1296. ep->ep.maxpacket = max;
  1297. ep->dma_start = 0;
  1298. if (nt) {
  1299. /*
  1300. * Maximum possible size of all payloads in one microframe
  1301. * regardless of direction when using high-bandwidth mode.
  1302. */
  1303. ep->bytes_per_buffer = (nt + 1) * max;
  1304. } else if (ep->is_in) {
  1305. /*
  1306. * The biggest multiple of maximum packet size that fits into
  1307. * the buffer. The hardware will split up into many packets in
  1308. * the IN direction.
  1309. */
  1310. ep->bytes_per_buffer = (buffer_size / max) * max;
  1311. } else {
  1312. /*
  1313. * Only single packets will be placed the buffers in the OUT
  1314. * direction.
  1315. */
  1316. ep->bytes_per_buffer = max;
  1317. }
  1318. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1319. | (nt << GR_EPCTRL_NT_POS)
  1320. | (mode << GR_EPCTRL_TT_POS)
  1321. | GR_EPCTRL_EV;
  1322. if (ep->is_in)
  1323. epctrl |= GR_EPCTRL_PI;
  1324. gr_write32(&ep->regs->epctrl, epctrl);
  1325. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1326. spin_unlock(&ep->dev->lock);
  1327. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1328. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1329. return 0;
  1330. }
  1331. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1332. static int gr_ep_disable(struct usb_ep *_ep)
  1333. {
  1334. struct gr_ep *ep;
  1335. struct gr_udc *dev;
  1336. unsigned long flags;
  1337. ep = container_of(_ep, struct gr_ep, ep);
  1338. if (!_ep || !ep->ep.desc)
  1339. return -ENODEV;
  1340. dev = ep->dev;
  1341. /* 'ep0' IN and OUT are reserved */
  1342. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1343. return -EINVAL;
  1344. if (dev->ep0state == GR_EP0_SUSPEND)
  1345. return -EBUSY;
  1346. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1347. spin_lock_irqsave(&dev->lock, flags);
  1348. gr_ep_nuke(ep);
  1349. gr_ep_reset(ep);
  1350. ep->ep.desc = NULL;
  1351. spin_unlock_irqrestore(&dev->lock, flags);
  1352. return 0;
  1353. }
  1354. /*
  1355. * Frees a request, but not any DMA buffers associated with it
  1356. * (gr_finish_request should already have taken care of that).
  1357. */
  1358. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1359. {
  1360. struct gr_request *req;
  1361. if (!_ep || !_req)
  1362. return;
  1363. req = container_of(_req, struct gr_request, req);
  1364. /* Leads to memory leak */
  1365. WARN(!list_empty(&req->queue),
  1366. "request not dequeued properly before freeing\n");
  1367. kfree(req);
  1368. }
  1369. /* Queue a request from the gadget */
  1370. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1371. gfp_t gfp_flags)
  1372. {
  1373. struct gr_ep *ep;
  1374. struct gr_request *req;
  1375. struct gr_udc *dev;
  1376. int ret;
  1377. if (unlikely(!_ep || !_req))
  1378. return -EINVAL;
  1379. ep = container_of(_ep, struct gr_ep, ep);
  1380. req = container_of(_req, struct gr_request, req);
  1381. dev = ep->dev;
  1382. spin_lock(&ep->dev->lock);
  1383. /*
  1384. * The ep0 pointer in the gadget struct is used both for ep0in and
  1385. * ep0out. In a data stage in the out direction ep0out needs to be used
  1386. * instead of the default ep0in. Completion functions might use
  1387. * driver_data, so that needs to be copied as well.
  1388. */
  1389. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1390. ep = &dev->epo[0];
  1391. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1392. }
  1393. if (ep->is_in)
  1394. gr_dbgprint_request("EXTERN", ep, req);
  1395. ret = gr_queue(ep, req, GFP_ATOMIC);
  1396. spin_unlock(&ep->dev->lock);
  1397. return ret;
  1398. }
  1399. /* Dequeue JUST ONE request */
  1400. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1401. {
  1402. struct gr_request *req = NULL, *iter;
  1403. struct gr_ep *ep;
  1404. struct gr_udc *dev;
  1405. int ret = 0;
  1406. unsigned long flags;
  1407. ep = container_of(_ep, struct gr_ep, ep);
  1408. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1409. return -EINVAL;
  1410. dev = ep->dev;
  1411. if (!dev->driver)
  1412. return -ESHUTDOWN;
  1413. /* We can't touch (DMA) registers when suspended */
  1414. if (dev->ep0state == GR_EP0_SUSPEND)
  1415. return -EBUSY;
  1416. spin_lock_irqsave(&dev->lock, flags);
  1417. /* Make sure it's actually queued on this endpoint */
  1418. list_for_each_entry(iter, &ep->queue, queue) {
  1419. if (&iter->req != _req)
  1420. continue;
  1421. req = iter;
  1422. break;
  1423. }
  1424. if (!req) {
  1425. ret = -EINVAL;
  1426. goto out;
  1427. }
  1428. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1429. /* This request is currently being processed */
  1430. gr_abort_dma(ep);
  1431. if (ep->stopped)
  1432. gr_finish_request(ep, req, -ECONNRESET);
  1433. else
  1434. gr_dma_advance(ep, -ECONNRESET);
  1435. } else if (!list_empty(&req->queue)) {
  1436. /* Not being processed - gr_finish_request dequeues it */
  1437. gr_finish_request(ep, req, -ECONNRESET);
  1438. } else {
  1439. ret = -EOPNOTSUPP;
  1440. }
  1441. out:
  1442. spin_unlock_irqrestore(&dev->lock, flags);
  1443. return ret;
  1444. }
  1445. /* Helper for gr_set_halt and gr_set_wedge */
  1446. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1447. {
  1448. int ret;
  1449. struct gr_ep *ep;
  1450. if (!_ep)
  1451. return -ENODEV;
  1452. ep = container_of(_ep, struct gr_ep, ep);
  1453. spin_lock(&ep->dev->lock);
  1454. /* Halting an IN endpoint should fail if queue is not empty */
  1455. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1456. ret = -EAGAIN;
  1457. goto out;
  1458. }
  1459. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1460. out:
  1461. spin_unlock(&ep->dev->lock);
  1462. return ret;
  1463. }
  1464. /* Halt endpoint */
  1465. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1466. {
  1467. return gr_set_halt_wedge(_ep, halt, 0);
  1468. }
  1469. /* Halt and wedge endpoint */
  1470. static int gr_set_wedge(struct usb_ep *_ep)
  1471. {
  1472. return gr_set_halt_wedge(_ep, 1, 1);
  1473. }
  1474. /*
  1475. * Return the total number of bytes currently stored in the internal buffers of
  1476. * the endpoint.
  1477. */
  1478. static int gr_fifo_status(struct usb_ep *_ep)
  1479. {
  1480. struct gr_ep *ep;
  1481. u32 epstat;
  1482. u32 bytes = 0;
  1483. if (!_ep)
  1484. return -ENODEV;
  1485. ep = container_of(_ep, struct gr_ep, ep);
  1486. epstat = gr_read32(&ep->regs->epstat);
  1487. if (epstat & GR_EPSTAT_B0)
  1488. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1489. if (epstat & GR_EPSTAT_B1)
  1490. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1491. return bytes;
  1492. }
  1493. /* Empty data from internal buffers of an endpoint. */
  1494. static void gr_fifo_flush(struct usb_ep *_ep)
  1495. {
  1496. struct gr_ep *ep;
  1497. u32 epctrl;
  1498. if (!_ep)
  1499. return;
  1500. ep = container_of(_ep, struct gr_ep, ep);
  1501. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1502. spin_lock(&ep->dev->lock);
  1503. epctrl = gr_read32(&ep->regs->epctrl);
  1504. epctrl |= GR_EPCTRL_CB;
  1505. gr_write32(&ep->regs->epctrl, epctrl);
  1506. spin_unlock(&ep->dev->lock);
  1507. }
  1508. static const struct usb_ep_ops gr_ep_ops = {
  1509. .enable = gr_ep_enable,
  1510. .disable = gr_ep_disable,
  1511. .alloc_request = gr_alloc_request,
  1512. .free_request = gr_free_request,
  1513. .queue = gr_queue_ext,
  1514. .dequeue = gr_dequeue,
  1515. .set_halt = gr_set_halt,
  1516. .set_wedge = gr_set_wedge,
  1517. .fifo_status = gr_fifo_status,
  1518. .fifo_flush = gr_fifo_flush,
  1519. };
  1520. /* ---------------------------------------------------------------------- */
  1521. /* USB Gadget ops */
  1522. static int gr_get_frame(struct usb_gadget *_gadget)
  1523. {
  1524. struct gr_udc *dev;
  1525. if (!_gadget)
  1526. return -ENODEV;
  1527. dev = container_of(_gadget, struct gr_udc, gadget);
  1528. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1529. }
  1530. static int gr_wakeup(struct usb_gadget *_gadget)
  1531. {
  1532. struct gr_udc *dev;
  1533. if (!_gadget)
  1534. return -ENODEV;
  1535. dev = container_of(_gadget, struct gr_udc, gadget);
  1536. /* Remote wakeup feature not enabled by host*/
  1537. if (!dev->remote_wakeup)
  1538. return -EINVAL;
  1539. spin_lock(&dev->lock);
  1540. gr_write32(&dev->regs->control,
  1541. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1542. spin_unlock(&dev->lock);
  1543. return 0;
  1544. }
  1545. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1546. {
  1547. struct gr_udc *dev;
  1548. u32 control;
  1549. if (!_gadget)
  1550. return -ENODEV;
  1551. dev = container_of(_gadget, struct gr_udc, gadget);
  1552. spin_lock(&dev->lock);
  1553. control = gr_read32(&dev->regs->control);
  1554. if (is_on)
  1555. control |= GR_CONTROL_EP;
  1556. else
  1557. control &= ~GR_CONTROL_EP;
  1558. gr_write32(&dev->regs->control, control);
  1559. spin_unlock(&dev->lock);
  1560. return 0;
  1561. }
  1562. static int gr_udc_start(struct usb_gadget *gadget,
  1563. struct usb_gadget_driver *driver)
  1564. {
  1565. struct gr_udc *dev = to_gr_udc(gadget);
  1566. spin_lock(&dev->lock);
  1567. /* Hook up the driver */
  1568. dev->driver = driver;
  1569. /* Get ready for host detection */
  1570. gr_enable_vbus_detect(dev);
  1571. spin_unlock(&dev->lock);
  1572. return 0;
  1573. }
  1574. static int gr_udc_stop(struct usb_gadget *gadget)
  1575. {
  1576. struct gr_udc *dev = to_gr_udc(gadget);
  1577. unsigned long flags;
  1578. spin_lock_irqsave(&dev->lock, flags);
  1579. dev->driver = NULL;
  1580. gr_stop_activity(dev);
  1581. spin_unlock_irqrestore(&dev->lock, flags);
  1582. return 0;
  1583. }
  1584. static const struct usb_gadget_ops gr_ops = {
  1585. .get_frame = gr_get_frame,
  1586. .wakeup = gr_wakeup,
  1587. .pullup = gr_pullup,
  1588. .udc_start = gr_udc_start,
  1589. .udc_stop = gr_udc_stop,
  1590. /* Other operations not supported */
  1591. };
  1592. /* ---------------------------------------------------------------------- */
  1593. /* Module probe, removal and of-matching */
  1594. static const char * const onames[] = {
  1595. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1596. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1597. "ep12out", "ep13out", "ep14out", "ep15out"
  1598. };
  1599. static const char * const inames[] = {
  1600. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1601. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1602. "ep12in", "ep13in", "ep14in", "ep15in"
  1603. };
  1604. /* Must be called with dev->lock held */
  1605. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1606. {
  1607. struct gr_ep *ep;
  1608. struct gr_request *req;
  1609. struct usb_request *_req;
  1610. void *buf;
  1611. if (is_in) {
  1612. ep = &dev->epi[num];
  1613. ep->ep.name = inames[num];
  1614. ep->regs = &dev->regs->epi[num];
  1615. } else {
  1616. ep = &dev->epo[num];
  1617. ep->ep.name = onames[num];
  1618. ep->regs = &dev->regs->epo[num];
  1619. }
  1620. gr_ep_reset(ep);
  1621. ep->num = num;
  1622. ep->is_in = is_in;
  1623. ep->dev = dev;
  1624. ep->ep.ops = &gr_ep_ops;
  1625. INIT_LIST_HEAD(&ep->queue);
  1626. if (num == 0) {
  1627. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1628. if (!_req)
  1629. return -ENOMEM;
  1630. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1631. if (!buf) {
  1632. gr_free_request(&ep->ep, _req);
  1633. return -ENOMEM;
  1634. }
  1635. req = container_of(_req, struct gr_request, req);
  1636. req->req.buf = buf;
  1637. req->req.length = MAX_CTRL_PL_SIZE;
  1638. if (is_in)
  1639. dev->ep0reqi = req; /* Complete gets set as used */
  1640. else
  1641. dev->ep0reqo = req; /* Completion treated separately */
  1642. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1643. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1644. ep->ep.caps.type_control = true;
  1645. } else {
  1646. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1647. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1648. ep->ep.caps.type_iso = true;
  1649. ep->ep.caps.type_bulk = true;
  1650. ep->ep.caps.type_int = true;
  1651. }
  1652. list_add_tail(&ep->ep_list, &dev->ep_list);
  1653. if (is_in)
  1654. ep->ep.caps.dir_in = true;
  1655. else
  1656. ep->ep.caps.dir_out = true;
  1657. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1658. &ep->tailbuf_paddr, GFP_ATOMIC);
  1659. if (!ep->tailbuf)
  1660. return -ENOMEM;
  1661. return 0;
  1662. }
  1663. /* Must be called with dev->lock held */
  1664. static int gr_udc_init(struct gr_udc *dev)
  1665. {
  1666. struct device_node *np = dev->dev->of_node;
  1667. u32 epctrl_val;
  1668. u32 dmactrl_val;
  1669. int i;
  1670. int ret = 0;
  1671. u32 bufsize;
  1672. gr_set_address(dev, 0);
  1673. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1674. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1675. dev->gadget.ep0 = &dev->epi[0].ep;
  1676. INIT_LIST_HEAD(&dev->ep_list);
  1677. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1678. for (i = 0; i < dev->nepo; i++) {
  1679. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1680. bufsize = 1024;
  1681. ret = gr_ep_init(dev, i, 0, bufsize);
  1682. if (ret)
  1683. return ret;
  1684. }
  1685. for (i = 0; i < dev->nepi; i++) {
  1686. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1687. bufsize = 1024;
  1688. ret = gr_ep_init(dev, i, 1, bufsize);
  1689. if (ret)
  1690. return ret;
  1691. }
  1692. /* Must be disabled by default */
  1693. dev->remote_wakeup = 0;
  1694. /* Enable ep0out and ep0in */
  1695. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1696. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1697. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1698. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1699. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1700. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1701. return 0;
  1702. }
  1703. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1704. {
  1705. struct gr_ep *ep;
  1706. if (is_in)
  1707. ep = &dev->epi[num];
  1708. else
  1709. ep = &dev->epo[num];
  1710. if (ep->tailbuf)
  1711. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1712. ep->tailbuf, ep->tailbuf_paddr);
  1713. }
  1714. static void gr_remove(struct platform_device *pdev)
  1715. {
  1716. struct gr_udc *dev = platform_get_drvdata(pdev);
  1717. int i;
  1718. if (dev->added)
  1719. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1720. if (dev->driver) {
  1721. dev_err(&pdev->dev,
  1722. "Driver still in use but removing anyhow\n");
  1723. return;
  1724. }
  1725. gr_dfs_delete(dev);
  1726. dma_pool_destroy(dev->desc_pool);
  1727. platform_set_drvdata(pdev, NULL);
  1728. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1729. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1730. for (i = 0; i < dev->nepo; i++)
  1731. gr_ep_remove(dev, i, 0);
  1732. for (i = 0; i < dev->nepi; i++)
  1733. gr_ep_remove(dev, i, 1);
  1734. }
  1735. static int gr_request_irq(struct gr_udc *dev, int irq)
  1736. {
  1737. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1738. IRQF_SHARED, driver_name, dev);
  1739. }
  1740. static int gr_probe(struct platform_device *pdev)
  1741. {
  1742. struct gr_udc *dev;
  1743. struct gr_regs __iomem *regs;
  1744. int retval;
  1745. u32 status;
  1746. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1747. if (!dev)
  1748. return -ENOMEM;
  1749. dev->dev = &pdev->dev;
  1750. regs = devm_platform_ioremap_resource(pdev, 0);
  1751. if (IS_ERR(regs))
  1752. return PTR_ERR(regs);
  1753. dev->irq = platform_get_irq(pdev, 0);
  1754. if (dev->irq < 0)
  1755. return dev->irq;
  1756. /* Some core configurations has separate irqs for IN and OUT events */
  1757. dev->irqi = platform_get_irq(pdev, 1);
  1758. if (dev->irqi > 0) {
  1759. dev->irqo = platform_get_irq(pdev, 2);
  1760. if (dev->irqo < 0)
  1761. return dev->irqo;
  1762. } else {
  1763. dev->irqi = 0;
  1764. }
  1765. dev->gadget.name = driver_name;
  1766. dev->gadget.max_speed = USB_SPEED_HIGH;
  1767. dev->gadget.ops = &gr_ops;
  1768. spin_lock_init(&dev->lock);
  1769. dev->regs = regs;
  1770. platform_set_drvdata(pdev, dev);
  1771. /* Determine number of endpoints and data interface mode */
  1772. status = gr_read32(&dev->regs->status);
  1773. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1774. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1775. if (!(status & GR_STATUS_DM)) {
  1776. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1777. return -ENODEV;
  1778. }
  1779. /* --- Effects of the following calls might need explicit cleanup --- */
  1780. /* Create DMA pool for descriptors */
  1781. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1782. sizeof(struct gr_dma_desc), 4, 0);
  1783. if (!dev->desc_pool) {
  1784. dev_err(dev->dev, "Could not allocate DMA pool");
  1785. return -ENOMEM;
  1786. }
  1787. /* Inside lock so that no gadget can use this udc until probe is done */
  1788. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1789. if (retval) {
  1790. dev_err(dev->dev, "Could not add gadget udc");
  1791. goto out;
  1792. }
  1793. dev->added = 1;
  1794. spin_lock(&dev->lock);
  1795. retval = gr_udc_init(dev);
  1796. if (retval) {
  1797. spin_unlock(&dev->lock);
  1798. goto out;
  1799. }
  1800. /* Clear all interrupt enables that might be left on since last boot */
  1801. gr_disable_interrupts_and_pullup(dev);
  1802. spin_unlock(&dev->lock);
  1803. gr_dfs_create(dev);
  1804. retval = gr_request_irq(dev, dev->irq);
  1805. if (retval) {
  1806. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1807. goto out;
  1808. }
  1809. if (dev->irqi) {
  1810. retval = gr_request_irq(dev, dev->irqi);
  1811. if (retval) {
  1812. dev_err(dev->dev, "Failed to request irqi %d\n",
  1813. dev->irqi);
  1814. goto out;
  1815. }
  1816. retval = gr_request_irq(dev, dev->irqo);
  1817. if (retval) {
  1818. dev_err(dev->dev, "Failed to request irqo %d\n",
  1819. dev->irqo);
  1820. goto out;
  1821. }
  1822. }
  1823. if (dev->irqi)
  1824. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1825. dev->irq, dev->irqi, dev->irqo);
  1826. else
  1827. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1828. out:
  1829. if (retval)
  1830. gr_remove(pdev);
  1831. return retval;
  1832. }
  1833. static const struct of_device_id gr_match[] = {
  1834. {.name = "GAISLER_USBDC"},
  1835. {.name = "01_021"},
  1836. {},
  1837. };
  1838. MODULE_DEVICE_TABLE(of, gr_match);
  1839. static struct platform_driver gr_driver = {
  1840. .driver = {
  1841. .name = DRIVER_NAME,
  1842. .of_match_table = gr_match,
  1843. },
  1844. .probe = gr_probe,
  1845. .remove_new = gr_remove,
  1846. };
  1847. module_platform_driver(gr_driver);
  1848. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1849. MODULE_DESCRIPTION(DRIVER_DESC);
  1850. MODULE_LICENSE("GPL");