cdnsp-gadget.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence CDNSP DRD Driver.
  4. *
  5. * Copyright (C) 2020 Cadence.
  6. *
  7. * Author: Pawel Laszczak <pawell@cadence.com>
  8. *
  9. */
  10. #include <linux/moduleparam.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/module.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/delay.h>
  15. #include <linux/log2.h>
  16. #include <linux/slab.h>
  17. #include <linux/pci.h>
  18. #include <linux/irq.h>
  19. #include <linux/dmi.h>
  20. #include "core.h"
  21. #include "gadget-export.h"
  22. #include "drd.h"
  23. #include "cdnsp-gadget.h"
  24. #include "cdnsp-trace.h"
  25. unsigned int cdnsp_port_speed(unsigned int port_status)
  26. {
  27. /*Detect gadget speed based on PORTSC register*/
  28. if (DEV_SUPERSPEEDPLUS(port_status))
  29. return USB_SPEED_SUPER_PLUS;
  30. else if (DEV_SUPERSPEED(port_status))
  31. return USB_SPEED_SUPER;
  32. else if (DEV_HIGHSPEED(port_status))
  33. return USB_SPEED_HIGH;
  34. else if (DEV_FULLSPEED(port_status))
  35. return USB_SPEED_FULL;
  36. /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
  37. return USB_SPEED_UNKNOWN;
  38. }
  39. /*
  40. * Given a port state, this function returns a value that would result in the
  41. * port being in the same state, if the value was written to the port status
  42. * control register.
  43. * Save Read Only (RO) bits and save read/write bits where
  44. * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
  45. * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
  46. */
  47. u32 cdnsp_port_state_to_neutral(u32 state)
  48. {
  49. /* Save read-only status and port state. */
  50. return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
  51. }
  52. /**
  53. * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities
  54. * with capability ID id.
  55. * @base: PCI MMIO registers base address.
  56. * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
  57. * beginning of list)
  58. * @id: Extended capability ID to search for.
  59. *
  60. * Returns the offset of the next matching extended capability structure.
  61. * Some capabilities can occur several times,
  62. * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
  63. */
  64. int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
  65. {
  66. u32 offset = start;
  67. u32 next;
  68. u32 val;
  69. if (!start || start == HCC_PARAMS_OFFSET) {
  70. val = readl(base + HCC_PARAMS_OFFSET);
  71. if (val == ~0)
  72. return 0;
  73. offset = HCC_EXT_CAPS(val) << 2;
  74. if (!offset)
  75. return 0;
  76. }
  77. do {
  78. val = readl(base + offset);
  79. if (val == ~0)
  80. return 0;
  81. if (EXT_CAPS_ID(val) == id && offset != start)
  82. return offset;
  83. next = EXT_CAPS_NEXT(val);
  84. offset += next << 2;
  85. } while (next);
  86. return 0;
  87. }
  88. void cdnsp_set_link_state(struct cdnsp_device *pdev,
  89. __le32 __iomem *port_regs,
  90. u32 link_state)
  91. {
  92. int port_num = 0xFF;
  93. u32 temp;
  94. temp = readl(port_regs);
  95. temp = cdnsp_port_state_to_neutral(temp);
  96. temp |= PORT_WKCONN_E | PORT_WKDISC_E;
  97. writel(temp, port_regs);
  98. temp &= ~PORT_PLS_MASK;
  99. temp |= PORT_LINK_STROBE | link_state;
  100. if (pdev->active_port)
  101. port_num = pdev->active_port->port_num;
  102. trace_cdnsp_handle_port_status(port_num, readl(port_regs));
  103. writel(temp, port_regs);
  104. trace_cdnsp_link_state_changed(port_num, readl(port_regs));
  105. }
  106. static void cdnsp_disable_port(struct cdnsp_device *pdev,
  107. __le32 __iomem *port_regs)
  108. {
  109. u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
  110. writel(temp | PORT_PED, port_regs);
  111. }
  112. static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
  113. __le32 __iomem *port_regs)
  114. {
  115. u32 portsc = readl(port_regs);
  116. writel(cdnsp_port_state_to_neutral(portsc) |
  117. (portsc & PORT_CHANGE_BITS), port_regs);
  118. }
  119. static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
  120. {
  121. __le32 __iomem *reg;
  122. void __iomem *base;
  123. u32 offset = 0;
  124. base = &pdev->cap_regs->hc_capbase;
  125. offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
  126. reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
  127. bit = readl(reg) | bit;
  128. writel(bit, reg);
  129. }
  130. static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
  131. {
  132. __le32 __iomem *reg;
  133. void __iomem *base;
  134. u32 offset = 0;
  135. base = &pdev->cap_regs->hc_capbase;
  136. offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
  137. reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
  138. bit = readl(reg) & ~bit;
  139. writel(bit, reg);
  140. }
  141. /*
  142. * Disable interrupts and begin the controller halting process.
  143. */
  144. static void cdnsp_quiesce(struct cdnsp_device *pdev)
  145. {
  146. u32 halted;
  147. u32 mask;
  148. u32 cmd;
  149. mask = ~(u32)(CDNSP_IRQS);
  150. halted = readl(&pdev->op_regs->status) & STS_HALT;
  151. if (!halted)
  152. mask &= ~(CMD_R_S | CMD_DEVEN);
  153. cmd = readl(&pdev->op_regs->command);
  154. cmd &= mask;
  155. writel(cmd, &pdev->op_regs->command);
  156. }
  157. /*
  158. * Force controller into halt state.
  159. *
  160. * Disable any IRQs and clear the run/stop bit.
  161. * Controller will complete any current and actively pipelined transactions, and
  162. * should halt within 16 ms of the run/stop bit being cleared.
  163. * Read controller Halted bit in the status register to see when the
  164. * controller is finished.
  165. */
  166. int cdnsp_halt(struct cdnsp_device *pdev)
  167. {
  168. int ret;
  169. u32 val;
  170. cdnsp_quiesce(pdev);
  171. ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
  172. val & STS_HALT, 1,
  173. CDNSP_MAX_HALT_USEC);
  174. if (ret) {
  175. dev_err(pdev->dev, "ERROR: Device halt failed\n");
  176. return ret;
  177. }
  178. pdev->cdnsp_state |= CDNSP_STATE_HALTED;
  179. return 0;
  180. }
  181. /*
  182. * device controller died, register read returns 0xffffffff, or command never
  183. * ends.
  184. */
  185. void cdnsp_died(struct cdnsp_device *pdev)
  186. {
  187. dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
  188. pdev->cdnsp_state |= CDNSP_STATE_DYING;
  189. cdnsp_halt(pdev);
  190. }
  191. /*
  192. * Set the run bit and wait for the device to be running.
  193. */
  194. static int cdnsp_start(struct cdnsp_device *pdev)
  195. {
  196. u32 temp;
  197. int ret;
  198. temp = readl(&pdev->op_regs->command);
  199. temp |= (CMD_R_S | CMD_DEVEN);
  200. writel(temp, &pdev->op_regs->command);
  201. pdev->cdnsp_state = 0;
  202. /*
  203. * Wait for the STS_HALT Status bit to be 0 to indicate the device is
  204. * running.
  205. */
  206. ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
  207. !(temp & STS_HALT), 1,
  208. CDNSP_MAX_HALT_USEC);
  209. if (ret) {
  210. pdev->cdnsp_state = CDNSP_STATE_DYING;
  211. dev_err(pdev->dev, "ERROR: Controller run failed\n");
  212. }
  213. return ret;
  214. }
  215. /*
  216. * Reset a halted controller.
  217. *
  218. * This resets pipelines, timers, counters, state machines, etc.
  219. * Transactions will be terminated immediately, and operational registers
  220. * will be set to their defaults.
  221. */
  222. int cdnsp_reset(struct cdnsp_device *pdev)
  223. {
  224. u32 command;
  225. u32 temp;
  226. int ret;
  227. temp = readl(&pdev->op_regs->status);
  228. if (temp == ~(u32)0) {
  229. dev_err(pdev->dev, "Device not accessible, reset failed.\n");
  230. return -ENODEV;
  231. }
  232. if ((temp & STS_HALT) == 0) {
  233. dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
  234. return -EINVAL;
  235. }
  236. command = readl(&pdev->op_regs->command);
  237. command |= CMD_RESET;
  238. writel(command, &pdev->op_regs->command);
  239. ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
  240. !(temp & CMD_RESET), 1,
  241. 10 * 1000);
  242. if (ret) {
  243. dev_err(pdev->dev, "ERROR: Controller reset failed\n");
  244. return ret;
  245. }
  246. /*
  247. * CDNSP cannot write any doorbells or operational registers other
  248. * than status until the "Controller Not Ready" flag is cleared.
  249. */
  250. ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
  251. !(temp & STS_CNR), 1,
  252. 10 * 1000);
  253. if (ret) {
  254. dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
  255. return ret;
  256. }
  257. dev_dbg(pdev->dev, "Controller ready to work");
  258. return ret;
  259. }
  260. /*
  261. * cdnsp_get_endpoint_index - Find the index for an endpoint given its
  262. * descriptor.Use the return value to right shift 1 for the bitmask.
  263. *
  264. * Index = (epnum * 2) + direction - 1,
  265. * where direction = 0 for OUT, 1 for IN.
  266. * For control endpoints, the IN index is used (OUT index is unused), so
  267. * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
  268. */
  269. static unsigned int
  270. cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
  271. {
  272. unsigned int index = (unsigned int)usb_endpoint_num(desc);
  273. if (usb_endpoint_xfer_control(desc))
  274. return index * 2;
  275. return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
  276. }
  277. /*
  278. * Find the flag for this endpoint (for use in the control context). Use the
  279. * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
  280. * bit 1, etc.
  281. */
  282. static unsigned int
  283. cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
  284. {
  285. return 1 << (cdnsp_get_endpoint_index(desc) + 1);
  286. }
  287. int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
  288. {
  289. struct cdnsp_device *pdev = pep->pdev;
  290. struct usb_request *request;
  291. int ret;
  292. if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
  293. trace_cdnsp_request_enqueue_busy(preq);
  294. return -EBUSY;
  295. }
  296. request = &preq->request;
  297. request->actual = 0;
  298. request->status = -EINPROGRESS;
  299. preq->direction = pep->direction;
  300. preq->epnum = pep->number;
  301. preq->td.drbl = 0;
  302. ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
  303. if (ret) {
  304. trace_cdnsp_request_enqueue_error(preq);
  305. return ret;
  306. }
  307. list_add_tail(&preq->list, &pep->pending_list);
  308. trace_cdnsp_request_enqueue(preq);
  309. switch (usb_endpoint_type(pep->endpoint.desc)) {
  310. case USB_ENDPOINT_XFER_CONTROL:
  311. ret = cdnsp_queue_ctrl_tx(pdev, preq);
  312. break;
  313. case USB_ENDPOINT_XFER_BULK:
  314. case USB_ENDPOINT_XFER_INT:
  315. ret = cdnsp_queue_bulk_tx(pdev, preq);
  316. break;
  317. case USB_ENDPOINT_XFER_ISOC:
  318. ret = cdnsp_queue_isoc_tx(pdev, preq);
  319. }
  320. if (ret)
  321. goto unmap;
  322. return 0;
  323. unmap:
  324. usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
  325. pep->direction);
  326. list_del(&preq->list);
  327. trace_cdnsp_request_enqueue_error(preq);
  328. return ret;
  329. }
  330. /*
  331. * Remove the request's TD from the endpoint ring. This may cause the
  332. * controller to stop USB transfers, potentially stopping in the middle of a
  333. * TRB buffer. The controller should pick up where it left off in the TD,
  334. * unless a Set Transfer Ring Dequeue Pointer is issued.
  335. *
  336. * The TRBs that make up the buffers for the canceled request will be "removed"
  337. * from the ring. Since the ring is a contiguous structure, they can't be
  338. * physically removed. Instead, there are two options:
  339. *
  340. * 1) If the controller is in the middle of processing the request to be
  341. * canceled, we simply move the ring's dequeue pointer past those TRBs
  342. * using the Set Transfer Ring Dequeue Pointer command. This will be
  343. * the common case, when drivers timeout on the last submitted request
  344. * and attempt to cancel.
  345. *
  346. * 2) If the controller is in the middle of a different TD, we turn the TRBs
  347. * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
  348. * The controller will need to invalidate the any TRBs it has cached after
  349. * the stop endpoint command.
  350. *
  351. * 3) The TD may have completed by the time the Stop Endpoint Command
  352. * completes, so software needs to handle that case too.
  353. *
  354. */
  355. int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
  356. {
  357. struct cdnsp_device *pdev = pep->pdev;
  358. int ret_stop = 0;
  359. int ret_rem;
  360. trace_cdnsp_request_dequeue(preq);
  361. if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
  362. ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
  363. ret_rem = cdnsp_remove_request(pdev, preq, pep);
  364. return ret_rem ? ret_rem : ret_stop;
  365. }
  366. static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
  367. {
  368. struct cdnsp_input_control_ctx *ctrl_ctx;
  369. struct cdnsp_slot_ctx *slot_ctx;
  370. struct cdnsp_ep_ctx *ep_ctx;
  371. int i;
  372. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  373. /*
  374. * When a device's add flag and drop flag are zero, any subsequent
  375. * configure endpoint command will leave that endpoint's state
  376. * untouched. Make sure we don't leave any old state in the input
  377. * endpoint contexts.
  378. */
  379. ctrl_ctx->drop_flags = 0;
  380. ctrl_ctx->add_flags = 0;
  381. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  382. slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
  383. /* Endpoint 0 is always valid */
  384. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
  385. for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
  386. ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
  387. ep_ctx->ep_info = 0;
  388. ep_ctx->ep_info2 = 0;
  389. ep_ctx->deq = 0;
  390. ep_ctx->tx_info = 0;
  391. }
  392. }
  393. /* Issue a configure endpoint command and wait for it to finish. */
  394. static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
  395. {
  396. int ret;
  397. cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
  398. cdnsp_ring_cmd_db(pdev);
  399. ret = cdnsp_wait_for_cmd_compl(pdev);
  400. if (ret) {
  401. dev_err(pdev->dev,
  402. "ERR: unexpected command completion code 0x%x.\n", ret);
  403. return -EINVAL;
  404. }
  405. return ret;
  406. }
  407. static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
  408. struct cdnsp_ep *pep)
  409. {
  410. struct cdnsp_segment *segment;
  411. union cdnsp_trb *event;
  412. u32 cycle_state;
  413. u32 data;
  414. event = pdev->event_ring->dequeue;
  415. segment = pdev->event_ring->deq_seg;
  416. cycle_state = pdev->event_ring->cycle_state;
  417. while (1) {
  418. data = le32_to_cpu(event->trans_event.flags);
  419. /* Check the owner of the TRB. */
  420. if ((data & TRB_CYCLE) != cycle_state)
  421. break;
  422. if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
  423. TRB_TO_EP_ID(data) == (pep->idx + 1)) {
  424. data |= TRB_EVENT_INVALIDATE;
  425. event->trans_event.flags = cpu_to_le32(data);
  426. }
  427. if (cdnsp_last_trb_on_seg(segment, event)) {
  428. cycle_state ^= 1;
  429. segment = pdev->event_ring->deq_seg->next;
  430. event = segment->trbs;
  431. } else {
  432. event++;
  433. }
  434. }
  435. }
  436. int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
  437. {
  438. struct cdnsp_segment *event_deq_seg;
  439. union cdnsp_trb *cmd_trb;
  440. dma_addr_t cmd_deq_dma;
  441. union cdnsp_trb *event;
  442. u32 cycle_state;
  443. int ret, val;
  444. u64 cmd_dma;
  445. u32 flags;
  446. cmd_trb = pdev->cmd.command_trb;
  447. pdev->cmd.status = 0;
  448. trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
  449. ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
  450. !CMD_RING_BUSY(val), 1,
  451. CDNSP_CMD_TIMEOUT);
  452. if (ret) {
  453. dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
  454. trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
  455. pdev->cdnsp_state = CDNSP_STATE_DYING;
  456. return -ETIMEDOUT;
  457. }
  458. event = pdev->event_ring->dequeue;
  459. event_deq_seg = pdev->event_ring->deq_seg;
  460. cycle_state = pdev->event_ring->cycle_state;
  461. cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
  462. if (!cmd_deq_dma)
  463. return -EINVAL;
  464. while (1) {
  465. flags = le32_to_cpu(event->event_cmd.flags);
  466. /* Check the owner of the TRB. */
  467. if ((flags & TRB_CYCLE) != cycle_state)
  468. return -EINVAL;
  469. cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
  470. /*
  471. * Check whether the completion event is for last queued
  472. * command.
  473. */
  474. if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
  475. cmd_dma != (u64)cmd_deq_dma) {
  476. if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
  477. event++;
  478. continue;
  479. }
  480. if (cdnsp_last_trb_on_ring(pdev->event_ring,
  481. event_deq_seg, event))
  482. cycle_state ^= 1;
  483. event_deq_seg = event_deq_seg->next;
  484. event = event_deq_seg->trbs;
  485. continue;
  486. }
  487. trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
  488. pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
  489. if (pdev->cmd.status == COMP_SUCCESS)
  490. return 0;
  491. return -pdev->cmd.status;
  492. }
  493. }
  494. int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
  495. struct cdnsp_ep *pep,
  496. int value)
  497. {
  498. int ret;
  499. trace_cdnsp_ep_halt(value ? "Set" : "Clear");
  500. ret = cdnsp_cmd_stop_ep(pdev, pep);
  501. if (ret)
  502. return ret;
  503. if (value) {
  504. if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
  505. cdnsp_queue_halt_endpoint(pdev, pep->idx);
  506. cdnsp_ring_cmd_db(pdev);
  507. ret = cdnsp_wait_for_cmd_compl(pdev);
  508. }
  509. pep->ep_state |= EP_HALTED;
  510. } else {
  511. cdnsp_queue_reset_ep(pdev, pep->idx);
  512. cdnsp_ring_cmd_db(pdev);
  513. ret = cdnsp_wait_for_cmd_compl(pdev);
  514. trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
  515. if (ret)
  516. return ret;
  517. pep->ep_state &= ~EP_HALTED;
  518. if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
  519. cdnsp_ring_doorbell_for_active_rings(pdev, pep);
  520. pep->ep_state &= ~EP_WEDGE;
  521. }
  522. return 0;
  523. }
  524. static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
  525. struct cdnsp_ep *pep)
  526. {
  527. struct cdnsp_input_control_ctx *ctrl_ctx;
  528. struct cdnsp_slot_ctx *slot_ctx;
  529. int ret = 0;
  530. u32 ep_sts;
  531. int i;
  532. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  533. /* Don't issue the command if there's no endpoints to update. */
  534. if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
  535. return 0;
  536. ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
  537. ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
  538. ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
  539. /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
  540. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  541. for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
  542. __le32 le32 = cpu_to_le32(BIT(i));
  543. if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
  544. (ctrl_ctx->add_flags & le32) || i == 1) {
  545. slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
  546. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
  547. break;
  548. }
  549. }
  550. ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
  551. if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
  552. ep_sts == EP_STATE_DISABLED) ||
  553. (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
  554. ret = cdnsp_configure_endpoint(pdev);
  555. trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
  556. trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
  557. cdnsp_zero_in_ctx(pdev);
  558. return ret;
  559. }
  560. /*
  561. * This submits a Reset Device Command, which will set the device state to 0,
  562. * set the device address to 0, and disable all the endpoints except the default
  563. * control endpoint. The USB core should come back and call
  564. * cdnsp_setup_device(), and then re-set up the configuration.
  565. */
  566. int cdnsp_reset_device(struct cdnsp_device *pdev)
  567. {
  568. struct cdnsp_slot_ctx *slot_ctx;
  569. int slot_state;
  570. int ret, i;
  571. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  572. slot_ctx->dev_info = 0;
  573. pdev->device_address = 0;
  574. /* If device is not setup, there is no point in resetting it. */
  575. slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
  576. slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
  577. trace_cdnsp_reset_device(slot_ctx);
  578. if (slot_state <= SLOT_STATE_DEFAULT &&
  579. pdev->eps[0].ep_state & EP_HALTED) {
  580. cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
  581. }
  582. /*
  583. * During Reset Device command controller shall transition the
  584. * endpoint ep0 to the Running State.
  585. */
  586. pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
  587. pdev->eps[0].ep_state |= EP_ENABLED;
  588. if (slot_state <= SLOT_STATE_DEFAULT)
  589. return 0;
  590. cdnsp_queue_reset_device(pdev);
  591. cdnsp_ring_cmd_db(pdev);
  592. ret = cdnsp_wait_for_cmd_compl(pdev);
  593. /*
  594. * After Reset Device command all not default endpoints
  595. * are in Disabled state.
  596. */
  597. for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
  598. pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
  599. trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
  600. if (ret)
  601. dev_err(pdev->dev, "Reset device failed with error code %d",
  602. ret);
  603. return ret;
  604. }
  605. /*
  606. * Sets the MaxPStreams field and the Linear Stream Array field.
  607. * Sets the dequeue pointer to the stream context array.
  608. */
  609. static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
  610. struct cdnsp_ep_ctx *ep_ctx,
  611. struct cdnsp_stream_info *stream_info)
  612. {
  613. u32 max_primary_streams;
  614. /* MaxPStreams is the number of stream context array entries, not the
  615. * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
  616. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
  617. */
  618. max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
  619. ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
  620. ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
  621. | EP_HAS_LSA);
  622. ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
  623. }
  624. /*
  625. * The drivers use this function to prepare a bulk endpoints to use streams.
  626. *
  627. * Don't allow the call to succeed if endpoint only supports one stream
  628. * (which means it doesn't support streams at all).
  629. */
  630. int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
  631. {
  632. unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
  633. unsigned int num_stream_ctxs;
  634. int ret;
  635. if (num_streams == 0)
  636. return 0;
  637. if (num_streams > STREAM_NUM_STREAMS)
  638. return -EINVAL;
  639. /*
  640. * Add two to the number of streams requested to account for
  641. * stream 0 that is reserved for controller usage and one additional
  642. * for TASK SET FULL response.
  643. */
  644. num_streams += 2;
  645. /* The stream context array size must be a power of two */
  646. num_stream_ctxs = roundup_pow_of_two(num_streams);
  647. trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
  648. ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
  649. if (ret)
  650. return ret;
  651. cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
  652. pep->ep_state |= EP_HAS_STREAMS;
  653. pep->stream_info.td_count = 0;
  654. pep->stream_info.first_prime_det = 0;
  655. /* Subtract 1 for stream 0, which drivers can't use. */
  656. return num_streams - 1;
  657. }
  658. int cdnsp_disable_slot(struct cdnsp_device *pdev)
  659. {
  660. int ret;
  661. cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
  662. cdnsp_ring_cmd_db(pdev);
  663. ret = cdnsp_wait_for_cmd_compl(pdev);
  664. pdev->slot_id = 0;
  665. pdev->active_port = NULL;
  666. trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
  667. memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
  668. memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
  669. return ret;
  670. }
  671. int cdnsp_enable_slot(struct cdnsp_device *pdev)
  672. {
  673. struct cdnsp_slot_ctx *slot_ctx;
  674. int slot_state;
  675. int ret;
  676. /* If device is not setup, there is no point in resetting it */
  677. slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
  678. slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
  679. if (slot_state != SLOT_STATE_DISABLED)
  680. return 0;
  681. cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
  682. cdnsp_ring_cmd_db(pdev);
  683. ret = cdnsp_wait_for_cmd_compl(pdev);
  684. if (ret)
  685. goto show_trace;
  686. pdev->slot_id = 1;
  687. show_trace:
  688. trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
  689. return ret;
  690. }
  691. /*
  692. * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
  693. * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
  694. */
  695. int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
  696. {
  697. struct cdnsp_input_control_ctx *ctrl_ctx;
  698. struct cdnsp_slot_ctx *slot_ctx;
  699. int dev_state = 0;
  700. int ret;
  701. if (!pdev->slot_id) {
  702. trace_cdnsp_slot_id("incorrect");
  703. return -EINVAL;
  704. }
  705. if (!pdev->active_port->port_num)
  706. return -EINVAL;
  707. slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
  708. dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
  709. if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
  710. trace_cdnsp_slot_already_in_default(slot_ctx);
  711. return 0;
  712. }
  713. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  714. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  715. if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
  716. ret = cdnsp_setup_addressable_priv_dev(pdev);
  717. if (ret)
  718. return ret;
  719. }
  720. cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
  721. ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
  722. ctrl_ctx->drop_flags = 0;
  723. trace_cdnsp_setup_device_slot(slot_ctx);
  724. cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
  725. cdnsp_ring_cmd_db(pdev);
  726. ret = cdnsp_wait_for_cmd_compl(pdev);
  727. trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
  728. /* Zero the input context control for later use. */
  729. ctrl_ctx->add_flags = 0;
  730. ctrl_ctx->drop_flags = 0;
  731. return ret;
  732. }
  733. void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
  734. struct usb_request *req,
  735. int enable)
  736. {
  737. if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
  738. return;
  739. trace_cdnsp_lpm(enable);
  740. if (enable)
  741. writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
  742. &pdev->active_port->regs->portpmsc);
  743. else
  744. writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
  745. }
  746. static int cdnsp_get_frame(struct cdnsp_device *pdev)
  747. {
  748. return readl(&pdev->run_regs->microframe_index) >> 3;
  749. }
  750. static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
  751. const struct usb_endpoint_descriptor *desc)
  752. {
  753. struct cdnsp_input_control_ctx *ctrl_ctx;
  754. struct cdnsp_device *pdev;
  755. struct cdnsp_ep *pep;
  756. unsigned long flags;
  757. u32 added_ctxs;
  758. int ret;
  759. if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
  760. !desc->wMaxPacketSize)
  761. return -EINVAL;
  762. pep = to_cdnsp_ep(ep);
  763. pdev = pep->pdev;
  764. pep->ep_state &= ~EP_UNCONFIGURED;
  765. if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
  766. "%s is already enabled\n", pep->name))
  767. return 0;
  768. spin_lock_irqsave(&pdev->lock, flags);
  769. added_ctxs = cdnsp_get_endpoint_flag(desc);
  770. if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
  771. dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
  772. ret = -EINVAL;
  773. goto unlock;
  774. }
  775. pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
  776. if (pdev->gadget.speed == USB_SPEED_FULL) {
  777. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
  778. pep->interval = desc->bInterval << 3;
  779. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
  780. pep->interval = BIT(desc->bInterval - 1) << 3;
  781. }
  782. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
  783. if (pep->interval > BIT(12)) {
  784. dev_err(pdev->dev, "bInterval %d not supported\n",
  785. desc->bInterval);
  786. ret = -EINVAL;
  787. goto unlock;
  788. }
  789. cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
  790. }
  791. ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
  792. if (ret)
  793. goto unlock;
  794. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  795. ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
  796. ctrl_ctx->drop_flags = 0;
  797. ret = cdnsp_update_eps_configuration(pdev, pep);
  798. if (ret) {
  799. cdnsp_free_endpoint_rings(pdev, pep);
  800. goto unlock;
  801. }
  802. pep->ep_state |= EP_ENABLED;
  803. pep->ep_state &= ~EP_STOPPED;
  804. unlock:
  805. trace_cdnsp_ep_enable_end(pep, 0);
  806. spin_unlock_irqrestore(&pdev->lock, flags);
  807. return ret;
  808. }
  809. static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
  810. {
  811. struct cdnsp_input_control_ctx *ctrl_ctx;
  812. struct cdnsp_request *preq;
  813. struct cdnsp_device *pdev;
  814. struct cdnsp_ep *pep;
  815. unsigned long flags;
  816. u32 drop_flag;
  817. int ret = 0;
  818. if (!ep)
  819. return -EINVAL;
  820. pep = to_cdnsp_ep(ep);
  821. pdev = pep->pdev;
  822. spin_lock_irqsave(&pdev->lock, flags);
  823. if (!(pep->ep_state & EP_ENABLED)) {
  824. dev_err(pdev->dev, "%s is already disabled\n", pep->name);
  825. ret = -EINVAL;
  826. goto finish;
  827. }
  828. pep->ep_state |= EP_DIS_IN_RROGRESS;
  829. /* Endpoint was unconfigured by Reset Device command. */
  830. if (!(pep->ep_state & EP_UNCONFIGURED))
  831. cdnsp_cmd_stop_ep(pdev, pep);
  832. /* Remove all queued USB requests. */
  833. while (!list_empty(&pep->pending_list)) {
  834. preq = next_request(&pep->pending_list);
  835. cdnsp_ep_dequeue(pep, preq);
  836. }
  837. cdnsp_invalidate_ep_events(pdev, pep);
  838. pep->ep_state &= ~EP_DIS_IN_RROGRESS;
  839. drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
  840. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  841. ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
  842. ctrl_ctx->add_flags = 0;
  843. cdnsp_endpoint_zero(pdev, pep);
  844. if (!(pep->ep_state & EP_UNCONFIGURED))
  845. ret = cdnsp_update_eps_configuration(pdev, pep);
  846. cdnsp_free_endpoint_rings(pdev, pep);
  847. pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
  848. pep->ep_state |= EP_STOPPED;
  849. finish:
  850. trace_cdnsp_ep_disable_end(pep, 0);
  851. spin_unlock_irqrestore(&pdev->lock, flags);
  852. return ret;
  853. }
  854. static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
  855. gfp_t gfp_flags)
  856. {
  857. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  858. struct cdnsp_request *preq;
  859. preq = kzalloc(sizeof(*preq), gfp_flags);
  860. if (!preq)
  861. return NULL;
  862. preq->epnum = pep->number;
  863. preq->pep = pep;
  864. trace_cdnsp_alloc_request(preq);
  865. return &preq->request;
  866. }
  867. static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
  868. struct usb_request *request)
  869. {
  870. struct cdnsp_request *preq = to_cdnsp_request(request);
  871. trace_cdnsp_free_request(preq);
  872. kfree(preq);
  873. }
  874. static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
  875. struct usb_request *request,
  876. gfp_t gfp_flags)
  877. {
  878. struct cdnsp_request *preq;
  879. struct cdnsp_device *pdev;
  880. struct cdnsp_ep *pep;
  881. unsigned long flags;
  882. int ret;
  883. if (!request || !ep)
  884. return -EINVAL;
  885. pep = to_cdnsp_ep(ep);
  886. pdev = pep->pdev;
  887. if (!(pep->ep_state & EP_ENABLED)) {
  888. dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
  889. pep->name);
  890. return -EINVAL;
  891. }
  892. preq = to_cdnsp_request(request);
  893. spin_lock_irqsave(&pdev->lock, flags);
  894. ret = cdnsp_ep_enqueue(pep, preq);
  895. spin_unlock_irqrestore(&pdev->lock, flags);
  896. return ret;
  897. }
  898. static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
  899. struct usb_request *request)
  900. {
  901. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  902. struct cdnsp_device *pdev = pep->pdev;
  903. unsigned long flags;
  904. int ret;
  905. if (request->status != -EINPROGRESS)
  906. return 0;
  907. if (!pep->endpoint.desc) {
  908. dev_err(pdev->dev,
  909. "%s: can't dequeue to disabled endpoint\n",
  910. pep->name);
  911. return -ESHUTDOWN;
  912. }
  913. /* Requests has been dequeued during disabling endpoint. */
  914. if (!(pep->ep_state & EP_ENABLED))
  915. return 0;
  916. spin_lock_irqsave(&pdev->lock, flags);
  917. ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
  918. spin_unlock_irqrestore(&pdev->lock, flags);
  919. return ret;
  920. }
  921. static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
  922. {
  923. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  924. struct cdnsp_device *pdev = pep->pdev;
  925. struct cdnsp_request *preq;
  926. unsigned long flags;
  927. int ret;
  928. spin_lock_irqsave(&pdev->lock, flags);
  929. preq = next_request(&pep->pending_list);
  930. if (value) {
  931. if (preq) {
  932. trace_cdnsp_ep_busy_try_halt_again(pep, 0);
  933. ret = -EAGAIN;
  934. goto done;
  935. }
  936. }
  937. ret = cdnsp_halt_endpoint(pdev, pep, value);
  938. done:
  939. spin_unlock_irqrestore(&pdev->lock, flags);
  940. return ret;
  941. }
  942. static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
  943. {
  944. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  945. struct cdnsp_device *pdev = pep->pdev;
  946. unsigned long flags;
  947. int ret;
  948. spin_lock_irqsave(&pdev->lock, flags);
  949. pep->ep_state |= EP_WEDGE;
  950. ret = cdnsp_halt_endpoint(pdev, pep, 1);
  951. spin_unlock_irqrestore(&pdev->lock, flags);
  952. return ret;
  953. }
  954. static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
  955. .enable = cdnsp_gadget_ep_enable,
  956. .disable = cdnsp_gadget_ep_disable,
  957. .alloc_request = cdnsp_gadget_ep_alloc_request,
  958. .free_request = cdnsp_gadget_ep_free_request,
  959. .queue = cdnsp_gadget_ep_queue,
  960. .dequeue = cdnsp_gadget_ep_dequeue,
  961. .set_halt = cdnsp_gadget_ep_set_halt,
  962. .set_wedge = cdnsp_gadget_ep_set_wedge,
  963. };
  964. static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
  965. .enable = cdnsp_gadget_ep_enable,
  966. .disable = cdnsp_gadget_ep_disable,
  967. .alloc_request = cdnsp_gadget_ep_alloc_request,
  968. .free_request = cdnsp_gadget_ep_free_request,
  969. .queue = cdnsp_gadget_ep_queue,
  970. .dequeue = cdnsp_gadget_ep_dequeue,
  971. .set_halt = cdnsp_gadget_ep_set_halt,
  972. .set_wedge = cdnsp_gadget_ep_set_wedge,
  973. };
  974. void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
  975. struct cdnsp_request *preq,
  976. int status)
  977. {
  978. struct cdnsp_device *pdev = pep->pdev;
  979. list_del(&preq->list);
  980. if (preq->request.status == -EINPROGRESS)
  981. preq->request.status = status;
  982. usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
  983. preq->direction);
  984. trace_cdnsp_request_giveback(preq);
  985. if (preq != &pdev->ep0_preq) {
  986. spin_unlock(&pdev->lock);
  987. usb_gadget_giveback_request(&pep->endpoint, &preq->request);
  988. spin_lock(&pdev->lock);
  989. }
  990. }
  991. static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
  992. .bLength = USB_DT_ENDPOINT_SIZE,
  993. .bDescriptorType = USB_DT_ENDPOINT,
  994. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  995. };
  996. static int cdnsp_run(struct cdnsp_device *pdev,
  997. enum usb_device_speed speed)
  998. {
  999. u32 fs_speed = 0;
  1000. u32 temp;
  1001. int ret;
  1002. temp = readl(&pdev->ir_set->irq_control);
  1003. temp &= ~IMOD_INTERVAL_MASK;
  1004. temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
  1005. writel(temp, &pdev->ir_set->irq_control);
  1006. temp = readl(&pdev->port3x_regs->mode_addr);
  1007. switch (speed) {
  1008. case USB_SPEED_SUPER_PLUS:
  1009. temp |= CFG_3XPORT_SSP_SUPPORT;
  1010. break;
  1011. case USB_SPEED_SUPER:
  1012. temp &= ~CFG_3XPORT_SSP_SUPPORT;
  1013. break;
  1014. case USB_SPEED_HIGH:
  1015. break;
  1016. case USB_SPEED_FULL:
  1017. fs_speed = PORT_REG6_FORCE_FS;
  1018. break;
  1019. default:
  1020. dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
  1021. speed);
  1022. fallthrough;
  1023. case USB_SPEED_UNKNOWN:
  1024. /* Default to superspeed. */
  1025. speed = USB_SPEED_SUPER;
  1026. break;
  1027. }
  1028. if (speed >= USB_SPEED_SUPER) {
  1029. writel(temp, &pdev->port3x_regs->mode_addr);
  1030. cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
  1031. XDEV_RXDETECT);
  1032. } else {
  1033. cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
  1034. }
  1035. cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
  1036. XDEV_RXDETECT);
  1037. cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  1038. writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
  1039. ret = cdnsp_start(pdev);
  1040. if (ret) {
  1041. ret = -ENODEV;
  1042. goto err;
  1043. }
  1044. temp = readl(&pdev->op_regs->command);
  1045. temp |= (CMD_INTE);
  1046. writel(temp, &pdev->op_regs->command);
  1047. temp = readl(&pdev->ir_set->irq_pending);
  1048. writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
  1049. trace_cdnsp_init("Controller ready to work");
  1050. return 0;
  1051. err:
  1052. cdnsp_halt(pdev);
  1053. return ret;
  1054. }
  1055. static int cdnsp_gadget_udc_start(struct usb_gadget *g,
  1056. struct usb_gadget_driver *driver)
  1057. {
  1058. enum usb_device_speed max_speed = driver->max_speed;
  1059. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1060. unsigned long flags;
  1061. int ret;
  1062. spin_lock_irqsave(&pdev->lock, flags);
  1063. pdev->gadget_driver = driver;
  1064. /* limit speed if necessary */
  1065. max_speed = min(driver->max_speed, g->max_speed);
  1066. ret = cdnsp_run(pdev, max_speed);
  1067. spin_unlock_irqrestore(&pdev->lock, flags);
  1068. return ret;
  1069. }
  1070. /*
  1071. * Update Event Ring Dequeue Pointer:
  1072. * - When all events have finished
  1073. * - To avoid "Event Ring Full Error" condition
  1074. */
  1075. void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
  1076. union cdnsp_trb *event_ring_deq,
  1077. u8 clear_ehb)
  1078. {
  1079. u64 temp_64;
  1080. dma_addr_t deq;
  1081. temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
  1082. /* If necessary, update the HW's version of the event ring deq ptr. */
  1083. if (event_ring_deq != pdev->event_ring->dequeue) {
  1084. deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
  1085. pdev->event_ring->dequeue);
  1086. temp_64 &= ERST_PTR_MASK;
  1087. temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
  1088. }
  1089. /* Clear the event handler busy flag (RW1C). */
  1090. if (clear_ehb)
  1091. temp_64 |= ERST_EHB;
  1092. else
  1093. temp_64 &= ~ERST_EHB;
  1094. cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
  1095. }
  1096. static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
  1097. {
  1098. struct cdnsp_segment *seg;
  1099. u64 val_64;
  1100. int i;
  1101. cdnsp_initialize_ring_info(pdev->cmd_ring);
  1102. seg = pdev->cmd_ring->first_seg;
  1103. for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
  1104. memset(seg->trbs, 0,
  1105. sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
  1106. seg = seg->next;
  1107. }
  1108. /* Set the address in the Command Ring Control register. */
  1109. val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
  1110. val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
  1111. (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
  1112. pdev->cmd_ring->cycle_state;
  1113. cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
  1114. }
  1115. static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
  1116. {
  1117. struct cdnsp_segment *event_deq_seg;
  1118. union cdnsp_trb *event_ring_deq;
  1119. union cdnsp_trb *event;
  1120. u32 cycle_bit;
  1121. event_ring_deq = pdev->event_ring->dequeue;
  1122. event_deq_seg = pdev->event_ring->deq_seg;
  1123. event = pdev->event_ring->dequeue;
  1124. /* Update ring dequeue pointer. */
  1125. while (1) {
  1126. cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
  1127. /* Does the controller or driver own the TRB? */
  1128. if (cycle_bit != pdev->event_ring->cycle_state)
  1129. break;
  1130. cdnsp_inc_deq(pdev, pdev->event_ring);
  1131. if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
  1132. event++;
  1133. continue;
  1134. }
  1135. if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
  1136. event))
  1137. cycle_bit ^= 1;
  1138. event_deq_seg = event_deq_seg->next;
  1139. event = event_deq_seg->trbs;
  1140. }
  1141. cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
  1142. }
  1143. static void cdnsp_stop(struct cdnsp_device *pdev)
  1144. {
  1145. u32 temp;
  1146. /* Remove internally queued request for ep0. */
  1147. if (!list_empty(&pdev->eps[0].pending_list)) {
  1148. struct cdnsp_request *req;
  1149. req = next_request(&pdev->eps[0].pending_list);
  1150. if (req == &pdev->ep0_preq)
  1151. cdnsp_ep_dequeue(&pdev->eps[0], req);
  1152. }
  1153. cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
  1154. cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
  1155. cdnsp_disable_slot(pdev);
  1156. cdnsp_halt(pdev);
  1157. temp = readl(&pdev->op_regs->status);
  1158. writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
  1159. temp = readl(&pdev->ir_set->irq_pending);
  1160. writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
  1161. cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
  1162. cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
  1163. /* Clear interrupt line */
  1164. temp = readl(&pdev->ir_set->irq_pending);
  1165. temp |= IMAN_IP;
  1166. writel(temp, &pdev->ir_set->irq_pending);
  1167. cdnsp_consume_all_events(pdev);
  1168. cdnsp_clear_cmd_ring(pdev);
  1169. trace_cdnsp_exit("Controller stopped.");
  1170. }
  1171. /*
  1172. * Stop controller.
  1173. * This function is called by the gadget core when the driver is removed.
  1174. * Disable slot, disable IRQs, and quiesce the controller.
  1175. */
  1176. static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
  1177. {
  1178. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1179. unsigned long flags;
  1180. spin_lock_irqsave(&pdev->lock, flags);
  1181. cdnsp_stop(pdev);
  1182. pdev->gadget_driver = NULL;
  1183. spin_unlock_irqrestore(&pdev->lock, flags);
  1184. return 0;
  1185. }
  1186. static int cdnsp_gadget_get_frame(struct usb_gadget *g)
  1187. {
  1188. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1189. return cdnsp_get_frame(pdev);
  1190. }
  1191. static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
  1192. {
  1193. struct cdnsp_port_regs __iomem *port_regs;
  1194. u32 portpm, portsc;
  1195. port_regs = pdev->active_port->regs;
  1196. portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
  1197. /* Remote wakeup feature is not enabled by host. */
  1198. if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
  1199. portpm = readl(&port_regs->portpmsc);
  1200. if (!(portpm & PORT_RWE))
  1201. return;
  1202. }
  1203. if (portsc == XDEV_U3 && !pdev->may_wakeup)
  1204. return;
  1205. cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
  1206. pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
  1207. }
  1208. static int cdnsp_gadget_wakeup(struct usb_gadget *g)
  1209. {
  1210. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1211. unsigned long flags;
  1212. spin_lock_irqsave(&pdev->lock, flags);
  1213. __cdnsp_gadget_wakeup(pdev);
  1214. spin_unlock_irqrestore(&pdev->lock, flags);
  1215. return 0;
  1216. }
  1217. static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
  1218. int is_selfpowered)
  1219. {
  1220. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1221. unsigned long flags;
  1222. spin_lock_irqsave(&pdev->lock, flags);
  1223. g->is_selfpowered = !!is_selfpowered;
  1224. spin_unlock_irqrestore(&pdev->lock, flags);
  1225. return 0;
  1226. }
  1227. static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1228. {
  1229. struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
  1230. struct cdns *cdns = dev_get_drvdata(pdev->dev);
  1231. unsigned long flags;
  1232. trace_cdnsp_pullup(is_on);
  1233. /*
  1234. * Disable events handling while controller is being
  1235. * enabled/disabled.
  1236. */
  1237. disable_irq(cdns->dev_irq);
  1238. spin_lock_irqsave(&pdev->lock, flags);
  1239. if (!is_on) {
  1240. cdnsp_reset_device(pdev);
  1241. cdns_clear_vbus(cdns);
  1242. } else {
  1243. cdns_set_vbus(cdns);
  1244. }
  1245. spin_unlock_irqrestore(&pdev->lock, flags);
  1246. enable_irq(cdns->dev_irq);
  1247. return 0;
  1248. }
  1249. static const struct usb_gadget_ops cdnsp_gadget_ops = {
  1250. .get_frame = cdnsp_gadget_get_frame,
  1251. .wakeup = cdnsp_gadget_wakeup,
  1252. .set_selfpowered = cdnsp_gadget_set_selfpowered,
  1253. .pullup = cdnsp_gadget_pullup,
  1254. .udc_start = cdnsp_gadget_udc_start,
  1255. .udc_stop = cdnsp_gadget_udc_stop,
  1256. };
  1257. static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
  1258. struct cdnsp_ep *pep)
  1259. {
  1260. void __iomem *reg = &pdev->cap_regs->hc_capbase;
  1261. int endpoints;
  1262. reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
  1263. if (!pep->direction) {
  1264. pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
  1265. pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
  1266. pep->buffering = (pep->buffering + 1) / 2;
  1267. pep->buffering_period = (pep->buffering_period + 1) / 2;
  1268. return;
  1269. }
  1270. endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
  1271. /* Set to XBUF_TX_TAG_MASK_0 register. */
  1272. reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
  1273. /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
  1274. reg += pep->number * sizeof(u32) * 2;
  1275. pep->buffering = (readl(reg) + 1) / 2;
  1276. pep->buffering_period = pep->buffering;
  1277. }
  1278. static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
  1279. {
  1280. int max_streams = HCC_MAX_PSA(pdev->hcc_params);
  1281. struct cdnsp_ep *pep;
  1282. int i;
  1283. INIT_LIST_HEAD(&pdev->gadget.ep_list);
  1284. if (max_streams < STREAM_LOG_STREAMS) {
  1285. dev_err(pdev->dev, "Stream size %d not supported\n",
  1286. max_streams);
  1287. return -EINVAL;
  1288. }
  1289. max_streams = STREAM_LOG_STREAMS;
  1290. for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
  1291. bool direction = !(i & 1); /* Start from OUT endpoint. */
  1292. u8 epnum = ((i + 1) >> 1);
  1293. if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
  1294. continue;
  1295. pep = &pdev->eps[i];
  1296. pep->pdev = pdev;
  1297. pep->number = epnum;
  1298. pep->direction = direction; /* 0 for OUT, 1 for IN. */
  1299. /*
  1300. * Ep0 is bidirectional, so ep0in and ep0out are represented by
  1301. * pdev->eps[0]
  1302. */
  1303. if (epnum == 0) {
  1304. snprintf(pep->name, sizeof(pep->name), "ep%d%s",
  1305. epnum, "BiDir");
  1306. pep->idx = 0;
  1307. usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
  1308. pep->endpoint.maxburst = 1;
  1309. pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
  1310. pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
  1311. pep->endpoint.comp_desc = NULL;
  1312. pep->endpoint.caps.type_control = true;
  1313. pep->endpoint.caps.dir_in = true;
  1314. pep->endpoint.caps.dir_out = true;
  1315. pdev->ep0_preq.epnum = pep->number;
  1316. pdev->ep0_preq.pep = pep;
  1317. pdev->gadget.ep0 = &pep->endpoint;
  1318. } else {
  1319. snprintf(pep->name, sizeof(pep->name), "ep%d%s",
  1320. epnum, (pep->direction) ? "in" : "out");
  1321. pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1;
  1322. usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
  1323. pep->endpoint.max_streams = max_streams;
  1324. pep->endpoint.ops = &cdnsp_gadget_ep_ops;
  1325. list_add_tail(&pep->endpoint.ep_list,
  1326. &pdev->gadget.ep_list);
  1327. pep->endpoint.caps.type_iso = true;
  1328. pep->endpoint.caps.type_bulk = true;
  1329. pep->endpoint.caps.type_int = true;
  1330. pep->endpoint.caps.dir_in = direction;
  1331. pep->endpoint.caps.dir_out = !direction;
  1332. }
  1333. pep->endpoint.name = pep->name;
  1334. pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
  1335. pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
  1336. cdnsp_get_ep_buffering(pdev, pep);
  1337. dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
  1338. "CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
  1339. "SupDir IN: %s, OUT: %s\n",
  1340. pep->name, 1024,
  1341. (pep->endpoint.caps.type_control) ? "yes" : "no",
  1342. (pep->endpoint.caps.type_int) ? "yes" : "no",
  1343. (pep->endpoint.caps.type_bulk) ? "yes" : "no",
  1344. (pep->endpoint.caps.type_iso) ? "yes" : "no",
  1345. (pep->endpoint.caps.dir_in) ? "yes" : "no",
  1346. (pep->endpoint.caps.dir_out) ? "yes" : "no");
  1347. INIT_LIST_HEAD(&pep->pending_list);
  1348. }
  1349. return 0;
  1350. }
  1351. static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
  1352. {
  1353. struct cdnsp_ep *pep;
  1354. int i;
  1355. for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
  1356. pep = &pdev->eps[i];
  1357. if (pep->number != 0 && pep->out_ctx)
  1358. list_del(&pep->endpoint.ep_list);
  1359. }
  1360. }
  1361. void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
  1362. {
  1363. pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
  1364. if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
  1365. spin_unlock(&pdev->lock);
  1366. pdev->gadget_driver->disconnect(&pdev->gadget);
  1367. spin_lock(&pdev->lock);
  1368. }
  1369. pdev->gadget.speed = USB_SPEED_UNKNOWN;
  1370. usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
  1371. pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
  1372. }
  1373. void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
  1374. {
  1375. if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
  1376. spin_unlock(&pdev->lock);
  1377. pdev->gadget_driver->suspend(&pdev->gadget);
  1378. spin_lock(&pdev->lock);
  1379. }
  1380. }
  1381. void cdnsp_resume_gadget(struct cdnsp_device *pdev)
  1382. {
  1383. if (pdev->gadget_driver && pdev->gadget_driver->resume) {
  1384. spin_unlock(&pdev->lock);
  1385. pdev->gadget_driver->resume(&pdev->gadget);
  1386. spin_lock(&pdev->lock);
  1387. }
  1388. }
  1389. void cdnsp_irq_reset(struct cdnsp_device *pdev)
  1390. {
  1391. struct cdnsp_port_regs __iomem *port_regs;
  1392. cdnsp_reset_device(pdev);
  1393. port_regs = pdev->active_port->regs;
  1394. pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
  1395. spin_unlock(&pdev->lock);
  1396. usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
  1397. spin_lock(&pdev->lock);
  1398. switch (pdev->gadget.speed) {
  1399. case USB_SPEED_SUPER_PLUS:
  1400. case USB_SPEED_SUPER:
  1401. cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  1402. pdev->gadget.ep0->maxpacket = 512;
  1403. break;
  1404. case USB_SPEED_HIGH:
  1405. case USB_SPEED_FULL:
  1406. cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
  1407. pdev->gadget.ep0->maxpacket = 64;
  1408. break;
  1409. default:
  1410. /* Low speed is not supported. */
  1411. dev_err(pdev->dev, "Unknown device speed\n");
  1412. break;
  1413. }
  1414. cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
  1415. cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
  1416. usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
  1417. }
  1418. static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
  1419. {
  1420. void __iomem *reg = &pdev->cap_regs->hc_capbase;
  1421. reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
  1422. pdev->rev_cap = reg;
  1423. dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
  1424. readl(&pdev->rev_cap->ctrl_revision),
  1425. readl(&pdev->rev_cap->rtl_revision),
  1426. readl(&pdev->rev_cap->ep_supported),
  1427. readl(&pdev->rev_cap->rx_buff_size),
  1428. readl(&pdev->rev_cap->tx_buff_size));
  1429. }
  1430. static int cdnsp_gen_setup(struct cdnsp_device *pdev)
  1431. {
  1432. int ret;
  1433. u32 reg;
  1434. pdev->cap_regs = pdev->regs;
  1435. pdev->op_regs = pdev->regs +
  1436. HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
  1437. pdev->run_regs = pdev->regs +
  1438. (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
  1439. /* Cache read-only capability registers */
  1440. pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
  1441. pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
  1442. pdev->hci_version = HC_VERSION(pdev->hcc_params);
  1443. pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
  1444. cdnsp_get_rev_cap(pdev);
  1445. /* Make sure the Device Controller is halted. */
  1446. ret = cdnsp_halt(pdev);
  1447. if (ret)
  1448. return ret;
  1449. /* Reset the internal controller memory state and registers. */
  1450. ret = cdnsp_reset(pdev);
  1451. if (ret)
  1452. return ret;
  1453. /*
  1454. * Set dma_mask and coherent_dma_mask to 64-bits,
  1455. * if controller supports 64-bit addressing.
  1456. */
  1457. if (HCC_64BIT_ADDR(pdev->hcc_params) &&
  1458. !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
  1459. dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
  1460. dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
  1461. } else {
  1462. /*
  1463. * This is to avoid error in cases where a 32-bit USB
  1464. * controller is used on a 64-bit capable system.
  1465. */
  1466. ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
  1467. if (ret)
  1468. return ret;
  1469. dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
  1470. dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
  1471. }
  1472. spin_lock_init(&pdev->lock);
  1473. ret = cdnsp_mem_init(pdev);
  1474. if (ret)
  1475. return ret;
  1476. /*
  1477. * Software workaround for U1: after transition
  1478. * to U1 the controller starts gating clock, and in some cases,
  1479. * it causes that controller stack.
  1480. */
  1481. reg = readl(&pdev->port3x_regs->mode_2);
  1482. reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
  1483. writel(reg, &pdev->port3x_regs->mode_2);
  1484. return 0;
  1485. }
  1486. static int __cdnsp_gadget_init(struct cdns *cdns)
  1487. {
  1488. struct cdnsp_device *pdev;
  1489. u32 max_speed;
  1490. int ret = -ENOMEM;
  1491. cdns_drd_gadget_on(cdns);
  1492. pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  1493. if (!pdev)
  1494. return -ENOMEM;
  1495. pm_runtime_get_sync(cdns->dev);
  1496. cdns->gadget_dev = pdev;
  1497. pdev->dev = cdns->dev;
  1498. pdev->regs = cdns->dev_regs;
  1499. max_speed = usb_get_maximum_speed(cdns->dev);
  1500. switch (max_speed) {
  1501. case USB_SPEED_FULL:
  1502. case USB_SPEED_HIGH:
  1503. case USB_SPEED_SUPER:
  1504. case USB_SPEED_SUPER_PLUS:
  1505. break;
  1506. default:
  1507. dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
  1508. fallthrough;
  1509. case USB_SPEED_UNKNOWN:
  1510. /* Default to SSP */
  1511. max_speed = USB_SPEED_SUPER_PLUS;
  1512. break;
  1513. }
  1514. pdev->gadget.ops = &cdnsp_gadget_ops;
  1515. pdev->gadget.name = "cdnsp-gadget";
  1516. pdev->gadget.speed = USB_SPEED_UNKNOWN;
  1517. pdev->gadget.sg_supported = 1;
  1518. pdev->gadget.max_speed = max_speed;
  1519. pdev->gadget.lpm_capable = 1;
  1520. pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
  1521. if (!pdev->setup_buf)
  1522. goto free_pdev;
  1523. /*
  1524. * Controller supports not aligned buffer but it should improve
  1525. * performance.
  1526. */
  1527. pdev->gadget.quirk_ep_out_aligned_size = true;
  1528. ret = cdnsp_gen_setup(pdev);
  1529. if (ret) {
  1530. dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
  1531. goto free_setup;
  1532. }
  1533. ret = cdnsp_gadget_init_endpoints(pdev);
  1534. if (ret) {
  1535. dev_err(pdev->dev, "failed to initialize endpoints\n");
  1536. goto halt_pdev;
  1537. }
  1538. ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
  1539. if (ret) {
  1540. dev_err(pdev->dev, "failed to register udc\n");
  1541. goto free_endpoints;
  1542. }
  1543. ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
  1544. cdnsp_irq_handler,
  1545. cdnsp_thread_irq_handler, IRQF_SHARED,
  1546. dev_name(pdev->dev), pdev);
  1547. if (ret)
  1548. goto del_gadget;
  1549. return 0;
  1550. del_gadget:
  1551. usb_del_gadget_udc(&pdev->gadget);
  1552. free_endpoints:
  1553. cdnsp_gadget_free_endpoints(pdev);
  1554. halt_pdev:
  1555. cdnsp_halt(pdev);
  1556. cdnsp_reset(pdev);
  1557. cdnsp_mem_cleanup(pdev);
  1558. free_setup:
  1559. kfree(pdev->setup_buf);
  1560. free_pdev:
  1561. kfree(pdev);
  1562. return ret;
  1563. }
  1564. static void cdnsp_gadget_exit(struct cdns *cdns)
  1565. {
  1566. struct cdnsp_device *pdev = cdns->gadget_dev;
  1567. devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
  1568. pm_runtime_mark_last_busy(cdns->dev);
  1569. pm_runtime_put_autosuspend(cdns->dev);
  1570. usb_del_gadget_udc(&pdev->gadget);
  1571. cdnsp_gadget_free_endpoints(pdev);
  1572. cdnsp_mem_cleanup(pdev);
  1573. kfree(pdev);
  1574. cdns->gadget_dev = NULL;
  1575. cdns_drd_gadget_off(cdns);
  1576. }
  1577. static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
  1578. {
  1579. struct cdnsp_device *pdev = cdns->gadget_dev;
  1580. unsigned long flags;
  1581. if (pdev->link_state == XDEV_U3)
  1582. return 0;
  1583. spin_lock_irqsave(&pdev->lock, flags);
  1584. cdnsp_disconnect_gadget(pdev);
  1585. cdnsp_stop(pdev);
  1586. spin_unlock_irqrestore(&pdev->lock, flags);
  1587. return 0;
  1588. }
  1589. static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
  1590. {
  1591. struct cdnsp_device *pdev = cdns->gadget_dev;
  1592. enum usb_device_speed max_speed;
  1593. unsigned long flags;
  1594. int ret;
  1595. if (!pdev->gadget_driver)
  1596. return 0;
  1597. spin_lock_irqsave(&pdev->lock, flags);
  1598. max_speed = pdev->gadget_driver->max_speed;
  1599. /* Limit speed if necessary. */
  1600. max_speed = min(max_speed, pdev->gadget.max_speed);
  1601. ret = cdnsp_run(pdev, max_speed);
  1602. if (pdev->link_state == XDEV_U3)
  1603. __cdnsp_gadget_wakeup(pdev);
  1604. spin_unlock_irqrestore(&pdev->lock, flags);
  1605. return ret;
  1606. }
  1607. /**
  1608. * cdnsp_gadget_init - initialize device structure
  1609. * @cdns: cdnsp instance
  1610. *
  1611. * This function initializes the gadget.
  1612. */
  1613. int cdnsp_gadget_init(struct cdns *cdns)
  1614. {
  1615. struct cdns_role_driver *rdrv;
  1616. rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
  1617. if (!rdrv)
  1618. return -ENOMEM;
  1619. rdrv->start = __cdnsp_gadget_init;
  1620. rdrv->stop = cdnsp_gadget_exit;
  1621. rdrv->suspend = cdnsp_gadget_suspend;
  1622. rdrv->resume = cdnsp_gadget_resume;
  1623. rdrv->state = CDNS_ROLE_STATE_INACTIVE;
  1624. rdrv->name = "gadget";
  1625. cdns->roles[USB_ROLE_DEVICE] = rdrv;
  1626. return 0;
  1627. }