xhci-dbgcap.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xhci-dbgcap.c - xHCI debug capability support
  4. *
  5. * Copyright (C) 2017 Intel Corporation
  6. *
  7. * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8. */
  9. #include <linux/bug.h>
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/errno.h>
  13. #include <linux/kstrtox.h>
  14. #include <linux/list.h>
  15. #include <linux/nls.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/string.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/types.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/io-64-nonatomic-lo-hi.h>
  24. #include <asm/byteorder.h>
  25. #include "xhci.h"
  26. #include "xhci-trace.h"
  27. #include "xhci-dbgcap.h"
  28. static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
  29. {
  30. if (!ctx)
  31. return;
  32. dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
  33. kfree(ctx);
  34. }
  35. /* we use only one segment for DbC rings */
  36. static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
  37. {
  38. if (!ring)
  39. return;
  40. if (ring->first_seg) {
  41. dma_free_coherent(dev, TRB_SEGMENT_SIZE,
  42. ring->first_seg->trbs,
  43. ring->first_seg->dma);
  44. kfree(ring->first_seg);
  45. }
  46. kfree(ring);
  47. }
  48. static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
  49. {
  50. struct usb_string_descriptor *s_desc;
  51. u32 string_length;
  52. /* Serial string: */
  53. s_desc = (struct usb_string_descriptor *)strings->serial;
  54. utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
  55. UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  56. DBC_MAX_STRING_LENGTH);
  57. s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
  58. s_desc->bDescriptorType = USB_DT_STRING;
  59. string_length = s_desc->bLength;
  60. string_length <<= 8;
  61. /* Product string: */
  62. s_desc = (struct usb_string_descriptor *)strings->product;
  63. utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
  64. UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  65. DBC_MAX_STRING_LENGTH);
  66. s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
  67. s_desc->bDescriptorType = USB_DT_STRING;
  68. string_length += s_desc->bLength;
  69. string_length <<= 8;
  70. /* Manufacture string: */
  71. s_desc = (struct usb_string_descriptor *)strings->manufacturer;
  72. utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
  73. strlen(DBC_STRING_MANUFACTURER),
  74. UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  75. DBC_MAX_STRING_LENGTH);
  76. s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
  77. s_desc->bDescriptorType = USB_DT_STRING;
  78. string_length += s_desc->bLength;
  79. string_length <<= 8;
  80. /* String0: */
  81. strings->string0[0] = 4;
  82. strings->string0[1] = USB_DT_STRING;
  83. strings->string0[2] = 0x09;
  84. strings->string0[3] = 0x04;
  85. string_length += 4;
  86. return string_length;
  87. }
  88. static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
  89. {
  90. struct dbc_info_context *info;
  91. struct xhci_ep_ctx *ep_ctx;
  92. u32 dev_info;
  93. dma_addr_t deq, dma;
  94. unsigned int max_burst;
  95. if (!dbc)
  96. return;
  97. /* Populate info Context: */
  98. info = (struct dbc_info_context *)dbc->ctx->bytes;
  99. dma = dbc->string_dma;
  100. info->string0 = cpu_to_le64(dma);
  101. info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
  102. info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
  103. info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
  104. info->length = cpu_to_le32(string_length);
  105. /* Populate bulk out endpoint context: */
  106. ep_ctx = dbc_bulkout_ctx(dbc);
  107. max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
  108. deq = dbc_bulkout_enq(dbc);
  109. ep_ctx->ep_info = 0;
  110. ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
  111. ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
  112. /* Populate bulk in endpoint context: */
  113. ep_ctx = dbc_bulkin_ctx(dbc);
  114. deq = dbc_bulkin_enq(dbc);
  115. ep_ctx->ep_info = 0;
  116. ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
  117. ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
  118. /* Set DbC context and info registers: */
  119. lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
  120. dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
  121. writel(dev_info, &dbc->regs->devinfo1);
  122. dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
  123. writel(dev_info, &dbc->regs->devinfo2);
  124. }
  125. static void xhci_dbc_giveback(struct dbc_request *req, int status)
  126. __releases(&dbc->lock)
  127. __acquires(&dbc->lock)
  128. {
  129. struct xhci_dbc *dbc = req->dbc;
  130. struct device *dev = dbc->dev;
  131. list_del_init(&req->list_pending);
  132. req->trb_dma = 0;
  133. req->trb = NULL;
  134. if (req->status == -EINPROGRESS)
  135. req->status = status;
  136. trace_xhci_dbc_giveback_request(req);
  137. dma_unmap_single(dev,
  138. req->dma,
  139. req->length,
  140. dbc_ep_dma_direction(req));
  141. /* Give back the transfer request: */
  142. spin_unlock(&dbc->lock);
  143. req->complete(dbc, req);
  144. spin_lock(&dbc->lock);
  145. }
  146. static void trb_to_noop(union xhci_trb *trb)
  147. {
  148. trb->generic.field[0] = 0;
  149. trb->generic.field[1] = 0;
  150. trb->generic.field[2] = 0;
  151. trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  152. trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
  153. }
  154. static void xhci_dbc_flush_single_request(struct dbc_request *req)
  155. {
  156. trb_to_noop(req->trb);
  157. xhci_dbc_giveback(req, -ESHUTDOWN);
  158. }
  159. static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
  160. {
  161. struct dbc_request *req, *tmp;
  162. list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
  163. xhci_dbc_flush_single_request(req);
  164. }
  165. static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
  166. {
  167. xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
  168. xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
  169. }
  170. struct dbc_request *
  171. dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
  172. {
  173. struct dbc_request *req;
  174. if (direction != BULK_IN &&
  175. direction != BULK_OUT)
  176. return NULL;
  177. if (!dbc)
  178. return NULL;
  179. req = kzalloc(sizeof(*req), flags);
  180. if (!req)
  181. return NULL;
  182. req->dbc = dbc;
  183. INIT_LIST_HEAD(&req->list_pending);
  184. INIT_LIST_HEAD(&req->list_pool);
  185. req->direction = direction;
  186. trace_xhci_dbc_alloc_request(req);
  187. return req;
  188. }
  189. void
  190. dbc_free_request(struct dbc_request *req)
  191. {
  192. trace_xhci_dbc_free_request(req);
  193. kfree(req);
  194. }
  195. static void
  196. xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
  197. u32 field2, u32 field3, u32 field4)
  198. {
  199. union xhci_trb *trb, *next;
  200. trb = ring->enqueue;
  201. trb->generic.field[0] = cpu_to_le32(field1);
  202. trb->generic.field[1] = cpu_to_le32(field2);
  203. trb->generic.field[2] = cpu_to_le32(field3);
  204. trb->generic.field[3] = cpu_to_le32(field4);
  205. trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
  206. ring->num_trbs_free--;
  207. next = ++(ring->enqueue);
  208. if (TRB_TYPE_LINK_LE32(next->link.control)) {
  209. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  210. ring->enqueue = ring->enq_seg->trbs;
  211. ring->cycle_state ^= 1;
  212. }
  213. }
  214. static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
  215. struct dbc_request *req)
  216. {
  217. u64 addr;
  218. union xhci_trb *trb;
  219. unsigned int num_trbs;
  220. struct xhci_dbc *dbc = req->dbc;
  221. struct xhci_ring *ring = dep->ring;
  222. u32 length, control, cycle;
  223. num_trbs = count_trbs(req->dma, req->length);
  224. WARN_ON(num_trbs != 1);
  225. if (ring->num_trbs_free < num_trbs)
  226. return -EBUSY;
  227. addr = req->dma;
  228. trb = ring->enqueue;
  229. cycle = ring->cycle_state;
  230. length = TRB_LEN(req->length);
  231. control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
  232. if (cycle)
  233. control &= cpu_to_le32(~TRB_CYCLE);
  234. else
  235. control |= cpu_to_le32(TRB_CYCLE);
  236. req->trb = ring->enqueue;
  237. req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  238. xhci_dbc_queue_trb(ring,
  239. lower_32_bits(addr),
  240. upper_32_bits(addr),
  241. length, control);
  242. /*
  243. * Add a barrier between writes of trb fields and flipping
  244. * the cycle bit:
  245. */
  246. wmb();
  247. if (cycle)
  248. trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
  249. else
  250. trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
  251. writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
  252. return 0;
  253. }
  254. static int
  255. dbc_ep_do_queue(struct dbc_request *req)
  256. {
  257. int ret;
  258. struct xhci_dbc *dbc = req->dbc;
  259. struct device *dev = dbc->dev;
  260. struct dbc_ep *dep = &dbc->eps[req->direction];
  261. if (!req->length || !req->buf)
  262. return -EINVAL;
  263. req->actual = 0;
  264. req->status = -EINPROGRESS;
  265. req->dma = dma_map_single(dev,
  266. req->buf,
  267. req->length,
  268. dbc_ep_dma_direction(dep));
  269. if (dma_mapping_error(dev, req->dma)) {
  270. dev_err(dbc->dev, "failed to map buffer\n");
  271. return -EFAULT;
  272. }
  273. ret = xhci_dbc_queue_bulk_tx(dep, req);
  274. if (ret) {
  275. dev_err(dbc->dev, "failed to queue trbs\n");
  276. dma_unmap_single(dev,
  277. req->dma,
  278. req->length,
  279. dbc_ep_dma_direction(dep));
  280. return -EFAULT;
  281. }
  282. list_add_tail(&req->list_pending, &dep->list_pending);
  283. return 0;
  284. }
  285. int dbc_ep_queue(struct dbc_request *req)
  286. {
  287. unsigned long flags;
  288. struct xhci_dbc *dbc = req->dbc;
  289. int ret = -ESHUTDOWN;
  290. if (!dbc)
  291. return -ENODEV;
  292. if (req->direction != BULK_IN &&
  293. req->direction != BULK_OUT)
  294. return -EINVAL;
  295. spin_lock_irqsave(&dbc->lock, flags);
  296. if (dbc->state == DS_CONFIGURED)
  297. ret = dbc_ep_do_queue(req);
  298. spin_unlock_irqrestore(&dbc->lock, flags);
  299. mod_delayed_work(system_wq, &dbc->event_work, 0);
  300. trace_xhci_dbc_queue_request(req);
  301. return ret;
  302. }
  303. static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
  304. {
  305. struct dbc_ep *dep;
  306. dep = &dbc->eps[direction];
  307. dep->dbc = dbc;
  308. dep->direction = direction;
  309. dep->ring = direction ? dbc->ring_in : dbc->ring_out;
  310. INIT_LIST_HEAD(&dep->list_pending);
  311. }
  312. static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
  313. {
  314. xhci_dbc_do_eps_init(dbc, BULK_OUT);
  315. xhci_dbc_do_eps_init(dbc, BULK_IN);
  316. }
  317. static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
  318. {
  319. memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
  320. }
  321. static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
  322. struct xhci_erst *erst, gfp_t flags)
  323. {
  324. erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
  325. &erst->erst_dma_addr, flags);
  326. if (!erst->entries)
  327. return -ENOMEM;
  328. erst->num_entries = 1;
  329. erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
  330. erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
  331. erst->entries[0].rsvd = 0;
  332. return 0;
  333. }
  334. static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
  335. {
  336. dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
  337. erst->erst_dma_addr);
  338. erst->entries = NULL;
  339. }
  340. static struct xhci_container_ctx *
  341. dbc_alloc_ctx(struct device *dev, gfp_t flags)
  342. {
  343. struct xhci_container_ctx *ctx;
  344. ctx = kzalloc(sizeof(*ctx), flags);
  345. if (!ctx)
  346. return NULL;
  347. /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
  348. ctx->size = 3 * DBC_CONTEXT_SIZE;
  349. ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
  350. if (!ctx->bytes) {
  351. kfree(ctx);
  352. return NULL;
  353. }
  354. return ctx;
  355. }
  356. static struct xhci_ring *
  357. xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
  358. {
  359. struct xhci_ring *ring;
  360. struct xhci_segment *seg;
  361. dma_addr_t dma;
  362. ring = kzalloc(sizeof(*ring), flags);
  363. if (!ring)
  364. return NULL;
  365. ring->num_segs = 1;
  366. ring->type = type;
  367. seg = kzalloc(sizeof(*seg), flags);
  368. if (!seg)
  369. goto seg_fail;
  370. ring->first_seg = seg;
  371. ring->last_seg = seg;
  372. seg->next = seg;
  373. seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
  374. if (!seg->trbs)
  375. goto dma_fail;
  376. seg->dma = dma;
  377. /* Only event ring does not use link TRB */
  378. if (type != TYPE_EVENT) {
  379. union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
  380. trb->link.segment_ptr = cpu_to_le64(dma);
  381. trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
  382. }
  383. INIT_LIST_HEAD(&ring->td_list);
  384. xhci_initialize_ring_info(ring, 1);
  385. return ring;
  386. dma_fail:
  387. kfree(seg);
  388. seg_fail:
  389. kfree(ring);
  390. return NULL;
  391. }
  392. static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
  393. {
  394. int ret;
  395. dma_addr_t deq;
  396. u32 string_length;
  397. struct device *dev = dbc->dev;
  398. /* Allocate various rings for events and transfers: */
  399. dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
  400. if (!dbc->ring_evt)
  401. goto evt_fail;
  402. dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
  403. if (!dbc->ring_in)
  404. goto in_fail;
  405. dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
  406. if (!dbc->ring_out)
  407. goto out_fail;
  408. /* Allocate and populate ERST: */
  409. ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
  410. if (ret)
  411. goto erst_fail;
  412. /* Allocate context data structure: */
  413. dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
  414. if (!dbc->ctx)
  415. goto ctx_fail;
  416. /* Allocate the string table: */
  417. dbc->string_size = sizeof(*dbc->string);
  418. dbc->string = dma_alloc_coherent(dev, dbc->string_size,
  419. &dbc->string_dma, flags);
  420. if (!dbc->string)
  421. goto string_fail;
  422. /* Setup ERST register: */
  423. writel(dbc->erst.num_entries, &dbc->regs->ersts);
  424. lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
  425. deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
  426. dbc->ring_evt->dequeue);
  427. lo_hi_writeq(deq, &dbc->regs->erdp);
  428. /* Setup strings and contexts: */
  429. string_length = xhci_dbc_populate_strings(dbc->string);
  430. xhci_dbc_init_contexts(dbc, string_length);
  431. xhci_dbc_eps_init(dbc);
  432. dbc->state = DS_INITIALIZED;
  433. return 0;
  434. string_fail:
  435. dbc_free_ctx(dev, dbc->ctx);
  436. dbc->ctx = NULL;
  437. ctx_fail:
  438. dbc_erst_free(dev, &dbc->erst);
  439. erst_fail:
  440. dbc_ring_free(dev, dbc->ring_out);
  441. dbc->ring_out = NULL;
  442. out_fail:
  443. dbc_ring_free(dev, dbc->ring_in);
  444. dbc->ring_in = NULL;
  445. in_fail:
  446. dbc_ring_free(dev, dbc->ring_evt);
  447. dbc->ring_evt = NULL;
  448. evt_fail:
  449. return -ENOMEM;
  450. }
  451. static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
  452. {
  453. if (!dbc)
  454. return;
  455. xhci_dbc_eps_exit(dbc);
  456. dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
  457. dbc->string = NULL;
  458. dbc_free_ctx(dbc->dev, dbc->ctx);
  459. dbc->ctx = NULL;
  460. dbc_erst_free(dbc->dev, &dbc->erst);
  461. dbc_ring_free(dbc->dev, dbc->ring_out);
  462. dbc_ring_free(dbc->dev, dbc->ring_in);
  463. dbc_ring_free(dbc->dev, dbc->ring_evt);
  464. dbc->ring_in = NULL;
  465. dbc->ring_out = NULL;
  466. dbc->ring_evt = NULL;
  467. }
  468. static int xhci_do_dbc_start(struct xhci_dbc *dbc)
  469. {
  470. int ret;
  471. u32 ctrl;
  472. if (dbc->state != DS_DISABLED)
  473. return -EINVAL;
  474. writel(0, &dbc->regs->control);
  475. ret = xhci_handshake(&dbc->regs->control,
  476. DBC_CTRL_DBC_ENABLE,
  477. 0, 1000);
  478. if (ret)
  479. return ret;
  480. ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
  481. if (ret)
  482. return ret;
  483. ctrl = readl(&dbc->regs->control);
  484. writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
  485. &dbc->regs->control);
  486. ret = xhci_handshake(&dbc->regs->control,
  487. DBC_CTRL_DBC_ENABLE,
  488. DBC_CTRL_DBC_ENABLE, 1000);
  489. if (ret)
  490. return ret;
  491. dbc->state = DS_ENABLED;
  492. return 0;
  493. }
  494. static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
  495. {
  496. if (dbc->state == DS_DISABLED)
  497. return -EINVAL;
  498. writel(0, &dbc->regs->control);
  499. dbc->state = DS_DISABLED;
  500. return 0;
  501. }
  502. static int xhci_dbc_start(struct xhci_dbc *dbc)
  503. {
  504. int ret;
  505. unsigned long flags;
  506. WARN_ON(!dbc);
  507. pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
  508. spin_lock_irqsave(&dbc->lock, flags);
  509. ret = xhci_do_dbc_start(dbc);
  510. spin_unlock_irqrestore(&dbc->lock, flags);
  511. if (ret) {
  512. pm_runtime_put(dbc->dev); /* note this was self.controller */
  513. return ret;
  514. }
  515. return mod_delayed_work(system_wq, &dbc->event_work,
  516. msecs_to_jiffies(dbc->poll_interval));
  517. }
  518. static void xhci_dbc_stop(struct xhci_dbc *dbc)
  519. {
  520. int ret;
  521. unsigned long flags;
  522. WARN_ON(!dbc);
  523. switch (dbc->state) {
  524. case DS_DISABLED:
  525. return;
  526. case DS_CONFIGURED:
  527. if (dbc->driver->disconnect)
  528. dbc->driver->disconnect(dbc);
  529. break;
  530. default:
  531. break;
  532. }
  533. cancel_delayed_work_sync(&dbc->event_work);
  534. spin_lock_irqsave(&dbc->lock, flags);
  535. ret = xhci_do_dbc_stop(dbc);
  536. spin_unlock_irqrestore(&dbc->lock, flags);
  537. if (ret)
  538. return;
  539. xhci_dbc_mem_cleanup(dbc);
  540. pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
  541. }
  542. static void
  543. handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
  544. {
  545. if (halted) {
  546. dev_info(dbc->dev, "DbC Endpoint halted\n");
  547. dep->halted = 1;
  548. } else if (dep->halted) {
  549. dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
  550. dep->halted = 0;
  551. if (!list_empty(&dep->list_pending))
  552. writel(DBC_DOOR_BELL_TARGET(dep->direction),
  553. &dbc->regs->doorbell);
  554. }
  555. }
  556. static void
  557. dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
  558. {
  559. u32 portsc;
  560. portsc = readl(&dbc->regs->portsc);
  561. if (portsc & DBC_PORTSC_CONN_CHANGE)
  562. dev_info(dbc->dev, "DbC port connect change\n");
  563. if (portsc & DBC_PORTSC_RESET_CHANGE)
  564. dev_info(dbc->dev, "DbC port reset change\n");
  565. if (portsc & DBC_PORTSC_LINK_CHANGE)
  566. dev_info(dbc->dev, "DbC port link status change\n");
  567. if (portsc & DBC_PORTSC_CONFIG_CHANGE)
  568. dev_info(dbc->dev, "DbC config error change\n");
  569. /* Port reset change bit will be cleared in other place: */
  570. writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
  571. }
  572. static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
  573. {
  574. struct dbc_ep *dep;
  575. struct xhci_ring *ring;
  576. int ep_id;
  577. int status;
  578. struct xhci_ep_ctx *ep_ctx;
  579. u32 comp_code;
  580. size_t remain_length;
  581. struct dbc_request *req = NULL, *r;
  582. comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
  583. remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
  584. ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
  585. dep = (ep_id == EPID_OUT) ?
  586. get_out_ep(dbc) : get_in_ep(dbc);
  587. ep_ctx = (ep_id == EPID_OUT) ?
  588. dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
  589. ring = dep->ring;
  590. /* Match the pending request: */
  591. list_for_each_entry(r, &dep->list_pending, list_pending) {
  592. if (r->trb_dma == event->trans_event.buffer) {
  593. req = r;
  594. break;
  595. }
  596. if (r->status == -COMP_STALL_ERROR) {
  597. dev_warn(dbc->dev, "Give back stale stalled req\n");
  598. ring->num_trbs_free++;
  599. xhci_dbc_giveback(r, 0);
  600. }
  601. }
  602. if (!req) {
  603. dev_warn(dbc->dev, "no matched request\n");
  604. return;
  605. }
  606. trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
  607. switch (comp_code) {
  608. case COMP_SUCCESS:
  609. remain_length = 0;
  610. fallthrough;
  611. case COMP_SHORT_PACKET:
  612. status = 0;
  613. break;
  614. case COMP_TRB_ERROR:
  615. case COMP_BABBLE_DETECTED_ERROR:
  616. case COMP_USB_TRANSACTION_ERROR:
  617. dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
  618. status = -comp_code;
  619. break;
  620. case COMP_STALL_ERROR:
  621. dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
  622. event->trans_event.buffer, remain_length, ep_ctx->deq);
  623. status = 0;
  624. dep->halted = 1;
  625. /*
  626. * xHC DbC may trigger a STALL bulk xfer event when host sends a
  627. * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
  628. * active bulk transfer.
  629. *
  630. * Don't give back this transfer request as hardware will later
  631. * start processing TRBs starting from this 'STALLED' TRB,
  632. * causing TRBs and requests to be out of sync.
  633. *
  634. * If STALL event shows some bytes were transferred then assume
  635. * it's an actual transfer issue and give back the request.
  636. * In this case mark the TRB as No-Op to avoid hw from using the
  637. * TRB again.
  638. */
  639. if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
  640. dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
  641. if (remain_length == req->length) {
  642. dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
  643. req->status = -COMP_STALL_ERROR;
  644. req->actual = 0;
  645. return;
  646. }
  647. dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
  648. trb_to_noop(req->trb);
  649. }
  650. break;
  651. default:
  652. dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
  653. status = -comp_code;
  654. break;
  655. }
  656. ring->num_trbs_free++;
  657. req->actual = req->length - remain_length;
  658. xhci_dbc_giveback(req, status);
  659. }
  660. static void inc_evt_deq(struct xhci_ring *ring)
  661. {
  662. /* If on the last TRB of the segment go back to the beginning */
  663. if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
  664. ring->cycle_state ^= 1;
  665. ring->dequeue = ring->deq_seg->trbs;
  666. return;
  667. }
  668. ring->dequeue++;
  669. }
  670. static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
  671. {
  672. dma_addr_t deq;
  673. union xhci_trb *evt;
  674. u32 ctrl, portsc;
  675. bool update_erdp = false;
  676. /* DbC state machine: */
  677. switch (dbc->state) {
  678. case DS_DISABLED:
  679. case DS_INITIALIZED:
  680. return EVT_ERR;
  681. case DS_ENABLED:
  682. portsc = readl(&dbc->regs->portsc);
  683. if (portsc & DBC_PORTSC_CONN_STATUS) {
  684. dbc->state = DS_CONNECTED;
  685. dev_info(dbc->dev, "DbC connected\n");
  686. }
  687. return EVT_DONE;
  688. case DS_CONNECTED:
  689. ctrl = readl(&dbc->regs->control);
  690. if (ctrl & DBC_CTRL_DBC_RUN) {
  691. dbc->state = DS_CONFIGURED;
  692. dev_info(dbc->dev, "DbC configured\n");
  693. portsc = readl(&dbc->regs->portsc);
  694. writel(portsc, &dbc->regs->portsc);
  695. return EVT_GSER;
  696. }
  697. return EVT_DONE;
  698. case DS_CONFIGURED:
  699. /* Handle cable unplug event: */
  700. portsc = readl(&dbc->regs->portsc);
  701. if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
  702. !(portsc & DBC_PORTSC_CONN_STATUS)) {
  703. dev_info(dbc->dev, "DbC cable unplugged\n");
  704. dbc->state = DS_ENABLED;
  705. xhci_dbc_flush_requests(dbc);
  706. return EVT_DISC;
  707. }
  708. /* Handle debug port reset event: */
  709. if (portsc & DBC_PORTSC_RESET_CHANGE) {
  710. dev_info(dbc->dev, "DbC port reset\n");
  711. writel(portsc, &dbc->regs->portsc);
  712. dbc->state = DS_ENABLED;
  713. xhci_dbc_flush_requests(dbc);
  714. return EVT_DISC;
  715. }
  716. /* Check and handle changes in endpoint halt status */
  717. ctrl = readl(&dbc->regs->control);
  718. handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
  719. handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
  720. /* Clear DbC run change bit: */
  721. if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
  722. writel(ctrl, &dbc->regs->control);
  723. ctrl = readl(&dbc->regs->control);
  724. }
  725. break;
  726. default:
  727. dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
  728. break;
  729. }
  730. /* Handle the events in the event ring: */
  731. evt = dbc->ring_evt->dequeue;
  732. while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
  733. dbc->ring_evt->cycle_state) {
  734. /*
  735. * Add a barrier between reading the cycle flag and any
  736. * reads of the event's flags/data below:
  737. */
  738. rmb();
  739. trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
  740. switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
  741. case TRB_TYPE(TRB_PORT_STATUS):
  742. dbc_handle_port_status(dbc, evt);
  743. break;
  744. case TRB_TYPE(TRB_TRANSFER):
  745. dbc_handle_xfer_event(dbc, evt);
  746. break;
  747. default:
  748. break;
  749. }
  750. inc_evt_deq(dbc->ring_evt);
  751. evt = dbc->ring_evt->dequeue;
  752. update_erdp = true;
  753. }
  754. /* Update event ring dequeue pointer: */
  755. if (update_erdp) {
  756. deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
  757. dbc->ring_evt->dequeue);
  758. lo_hi_writeq(deq, &dbc->regs->erdp);
  759. }
  760. return EVT_DONE;
  761. }
  762. static void xhci_dbc_handle_events(struct work_struct *work)
  763. {
  764. enum evtreturn evtr;
  765. struct xhci_dbc *dbc;
  766. unsigned long flags;
  767. unsigned int poll_interval;
  768. dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
  769. poll_interval = dbc->poll_interval;
  770. spin_lock_irqsave(&dbc->lock, flags);
  771. evtr = xhci_dbc_do_handle_events(dbc);
  772. spin_unlock_irqrestore(&dbc->lock, flags);
  773. switch (evtr) {
  774. case EVT_GSER:
  775. if (dbc->driver->configure)
  776. dbc->driver->configure(dbc);
  777. break;
  778. case EVT_DISC:
  779. if (dbc->driver->disconnect)
  780. dbc->driver->disconnect(dbc);
  781. break;
  782. case EVT_DONE:
  783. /* set fast poll rate if there are pending data transfers */
  784. if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
  785. !list_empty(&dbc->eps[BULK_IN].list_pending))
  786. poll_interval = 1;
  787. break;
  788. default:
  789. dev_info(dbc->dev, "stop handling dbc events\n");
  790. return;
  791. }
  792. mod_delayed_work(system_wq, &dbc->event_work,
  793. msecs_to_jiffies(poll_interval));
  794. }
  795. static const char * const dbc_state_strings[DS_MAX] = {
  796. [DS_DISABLED] = "disabled",
  797. [DS_INITIALIZED] = "initialized",
  798. [DS_ENABLED] = "enabled",
  799. [DS_CONNECTED] = "connected",
  800. [DS_CONFIGURED] = "configured",
  801. };
  802. static ssize_t dbc_show(struct device *dev,
  803. struct device_attribute *attr,
  804. char *buf)
  805. {
  806. struct xhci_dbc *dbc;
  807. struct xhci_hcd *xhci;
  808. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  809. dbc = xhci->dbc;
  810. if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
  811. return sysfs_emit(buf, "unknown\n");
  812. return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
  813. }
  814. static ssize_t dbc_store(struct device *dev,
  815. struct device_attribute *attr,
  816. const char *buf, size_t count)
  817. {
  818. struct xhci_hcd *xhci;
  819. struct xhci_dbc *dbc;
  820. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  821. dbc = xhci->dbc;
  822. if (sysfs_streq(buf, "enable"))
  823. xhci_dbc_start(dbc);
  824. else if (sysfs_streq(buf, "disable"))
  825. xhci_dbc_stop(dbc);
  826. else
  827. return -EINVAL;
  828. return count;
  829. }
  830. static ssize_t dbc_idVendor_show(struct device *dev,
  831. struct device_attribute *attr,
  832. char *buf)
  833. {
  834. struct xhci_dbc *dbc;
  835. struct xhci_hcd *xhci;
  836. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  837. dbc = xhci->dbc;
  838. return sysfs_emit(buf, "%04x\n", dbc->idVendor);
  839. }
  840. static ssize_t dbc_idVendor_store(struct device *dev,
  841. struct device_attribute *attr,
  842. const char *buf, size_t size)
  843. {
  844. struct xhci_dbc *dbc;
  845. struct xhci_hcd *xhci;
  846. void __iomem *ptr;
  847. u16 value;
  848. u32 dev_info;
  849. int ret;
  850. ret = kstrtou16(buf, 0, &value);
  851. if (ret)
  852. return ret;
  853. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  854. dbc = xhci->dbc;
  855. if (dbc->state != DS_DISABLED)
  856. return -EBUSY;
  857. dbc->idVendor = value;
  858. ptr = &dbc->regs->devinfo1;
  859. dev_info = readl(ptr);
  860. dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
  861. writel(dev_info, ptr);
  862. return size;
  863. }
  864. static ssize_t dbc_idProduct_show(struct device *dev,
  865. struct device_attribute *attr,
  866. char *buf)
  867. {
  868. struct xhci_dbc *dbc;
  869. struct xhci_hcd *xhci;
  870. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  871. dbc = xhci->dbc;
  872. return sysfs_emit(buf, "%04x\n", dbc->idProduct);
  873. }
  874. static ssize_t dbc_idProduct_store(struct device *dev,
  875. struct device_attribute *attr,
  876. const char *buf, size_t size)
  877. {
  878. struct xhci_dbc *dbc;
  879. struct xhci_hcd *xhci;
  880. void __iomem *ptr;
  881. u32 dev_info;
  882. u16 value;
  883. int ret;
  884. ret = kstrtou16(buf, 0, &value);
  885. if (ret)
  886. return ret;
  887. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  888. dbc = xhci->dbc;
  889. if (dbc->state != DS_DISABLED)
  890. return -EBUSY;
  891. dbc->idProduct = value;
  892. ptr = &dbc->regs->devinfo2;
  893. dev_info = readl(ptr);
  894. dev_info = (dev_info & ~(0xffffu)) | value;
  895. writel(dev_info, ptr);
  896. return size;
  897. }
  898. static ssize_t dbc_bcdDevice_show(struct device *dev,
  899. struct device_attribute *attr,
  900. char *buf)
  901. {
  902. struct xhci_dbc *dbc;
  903. struct xhci_hcd *xhci;
  904. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  905. dbc = xhci->dbc;
  906. return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
  907. }
  908. static ssize_t dbc_bcdDevice_store(struct device *dev,
  909. struct device_attribute *attr,
  910. const char *buf, size_t size)
  911. {
  912. struct xhci_dbc *dbc;
  913. struct xhci_hcd *xhci;
  914. void __iomem *ptr;
  915. u32 dev_info;
  916. u16 value;
  917. int ret;
  918. ret = kstrtou16(buf, 0, &value);
  919. if (ret)
  920. return ret;
  921. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  922. dbc = xhci->dbc;
  923. if (dbc->state != DS_DISABLED)
  924. return -EBUSY;
  925. dbc->bcdDevice = value;
  926. ptr = &dbc->regs->devinfo2;
  927. dev_info = readl(ptr);
  928. dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
  929. writel(dev_info, ptr);
  930. return size;
  931. }
  932. static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
  933. struct device_attribute *attr,
  934. char *buf)
  935. {
  936. struct xhci_dbc *dbc;
  937. struct xhci_hcd *xhci;
  938. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  939. dbc = xhci->dbc;
  940. return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
  941. }
  942. static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
  943. struct device_attribute *attr,
  944. const char *buf, size_t size)
  945. {
  946. struct xhci_dbc *dbc;
  947. struct xhci_hcd *xhci;
  948. void __iomem *ptr;
  949. u32 dev_info;
  950. u8 value;
  951. int ret;
  952. /* bInterfaceProtocol is 8 bit, but... */
  953. ret = kstrtou8(buf, 0, &value);
  954. if (ret)
  955. return ret;
  956. /* ...xhci only supports values 0 and 1 */
  957. if (value > 1)
  958. return -EINVAL;
  959. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  960. dbc = xhci->dbc;
  961. if (dbc->state != DS_DISABLED)
  962. return -EBUSY;
  963. dbc->bInterfaceProtocol = value;
  964. ptr = &dbc->regs->devinfo1;
  965. dev_info = readl(ptr);
  966. dev_info = (dev_info & ~(0xffu)) | value;
  967. writel(dev_info, ptr);
  968. return size;
  969. }
  970. static ssize_t dbc_poll_interval_ms_show(struct device *dev,
  971. struct device_attribute *attr,
  972. char *buf)
  973. {
  974. struct xhci_dbc *dbc;
  975. struct xhci_hcd *xhci;
  976. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  977. dbc = xhci->dbc;
  978. return sysfs_emit(buf, "%u\n", dbc->poll_interval);
  979. }
  980. static ssize_t dbc_poll_interval_ms_store(struct device *dev,
  981. struct device_attribute *attr,
  982. const char *buf, size_t size)
  983. {
  984. struct xhci_dbc *dbc;
  985. struct xhci_hcd *xhci;
  986. u32 value;
  987. int ret;
  988. ret = kstrtou32(buf, 0, &value);
  989. if (ret || value > DBC_POLL_INTERVAL_MAX)
  990. return -EINVAL;
  991. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  992. dbc = xhci->dbc;
  993. dbc->poll_interval = value;
  994. mod_delayed_work(system_wq, &dbc->event_work, 0);
  995. return size;
  996. }
  997. static DEVICE_ATTR_RW(dbc);
  998. static DEVICE_ATTR_RW(dbc_idVendor);
  999. static DEVICE_ATTR_RW(dbc_idProduct);
  1000. static DEVICE_ATTR_RW(dbc_bcdDevice);
  1001. static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
  1002. static DEVICE_ATTR_RW(dbc_poll_interval_ms);
  1003. static struct attribute *dbc_dev_attrs[] = {
  1004. &dev_attr_dbc.attr,
  1005. &dev_attr_dbc_idVendor.attr,
  1006. &dev_attr_dbc_idProduct.attr,
  1007. &dev_attr_dbc_bcdDevice.attr,
  1008. &dev_attr_dbc_bInterfaceProtocol.attr,
  1009. &dev_attr_dbc_poll_interval_ms.attr,
  1010. NULL
  1011. };
  1012. ATTRIBUTE_GROUPS(dbc_dev);
  1013. struct xhci_dbc *
  1014. xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
  1015. {
  1016. struct xhci_dbc *dbc;
  1017. int ret;
  1018. dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
  1019. if (!dbc)
  1020. return NULL;
  1021. dbc->regs = base;
  1022. dbc->dev = dev;
  1023. dbc->driver = driver;
  1024. dbc->idProduct = DBC_PRODUCT_ID;
  1025. dbc->idVendor = DBC_VENDOR_ID;
  1026. dbc->bcdDevice = DBC_DEVICE_REV;
  1027. dbc->bInterfaceProtocol = DBC_PROTOCOL;
  1028. dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
  1029. if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
  1030. goto err;
  1031. INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
  1032. spin_lock_init(&dbc->lock);
  1033. ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
  1034. if (ret)
  1035. goto err;
  1036. return dbc;
  1037. err:
  1038. kfree(dbc);
  1039. return NULL;
  1040. }
  1041. /* undo what xhci_alloc_dbc() did */
  1042. void xhci_dbc_remove(struct xhci_dbc *dbc)
  1043. {
  1044. if (!dbc)
  1045. return;
  1046. /* stop hw, stop wq and call dbc->ops->stop() */
  1047. xhci_dbc_stop(dbc);
  1048. /* remove sysfs files */
  1049. sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
  1050. kfree(dbc);
  1051. }
  1052. int xhci_create_dbc_dev(struct xhci_hcd *xhci)
  1053. {
  1054. struct device *dev;
  1055. void __iomem *base;
  1056. int ret;
  1057. int dbc_cap_offs;
  1058. /* create all parameters needed resembling a dbc device */
  1059. dev = xhci_to_hcd(xhci)->self.controller;
  1060. base = &xhci->cap_regs->hc_capbase;
  1061. dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
  1062. if (!dbc_cap_offs)
  1063. return -ENODEV;
  1064. /* already allocated and in use */
  1065. if (xhci->dbc)
  1066. return -EBUSY;
  1067. ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
  1068. return ret;
  1069. }
  1070. void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
  1071. {
  1072. unsigned long flags;
  1073. if (!xhci->dbc)
  1074. return;
  1075. xhci_dbc_tty_remove(xhci->dbc);
  1076. spin_lock_irqsave(&xhci->lock, flags);
  1077. xhci->dbc = NULL;
  1078. spin_unlock_irqrestore(&xhci->lock, flags);
  1079. }
  1080. #ifdef CONFIG_PM
  1081. int xhci_dbc_suspend(struct xhci_hcd *xhci)
  1082. {
  1083. struct xhci_dbc *dbc = xhci->dbc;
  1084. if (!dbc)
  1085. return 0;
  1086. if (dbc->state == DS_CONFIGURED)
  1087. dbc->resume_required = 1;
  1088. xhci_dbc_stop(dbc);
  1089. return 0;
  1090. }
  1091. int xhci_dbc_resume(struct xhci_hcd *xhci)
  1092. {
  1093. int ret = 0;
  1094. struct xhci_dbc *dbc = xhci->dbc;
  1095. if (!dbc)
  1096. return 0;
  1097. if (dbc->resume_required) {
  1098. dbc->resume_required = 0;
  1099. xhci_dbc_start(dbc);
  1100. }
  1101. return ret;
  1102. }
  1103. #endif /* CONFIG_PM */
  1104. int xhci_dbc_init(void)
  1105. {
  1106. return dbc_tty_init();
  1107. }
  1108. void xhci_dbc_exit(void)
  1109. {
  1110. dbc_tty_exit();
  1111. }