qset.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Wireless Host Controller (WHC) qset management.
  4. *
  5. * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/slab.h>
  10. #include <linux/uwb/umc.h>
  11. #include <linux/usb.h>
  12. #include "../../wusbcore/wusbhc.h"
  13. #include "whcd.h"
  14. struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
  15. {
  16. struct whc_qset *qset;
  17. dma_addr_t dma;
  18. qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
  19. if (qset == NULL)
  20. return NULL;
  21. qset->qset_dma = dma;
  22. qset->whc = whc;
  23. INIT_LIST_HEAD(&qset->list_node);
  24. INIT_LIST_HEAD(&qset->stds);
  25. return qset;
  26. }
  27. /**
  28. * qset_fill_qh - fill the static endpoint state in a qset's QHead
  29. * @qset: the qset whose QH needs initializing with static endpoint
  30. * state
  31. * @urb: an urb for a transfer to this endpoint
  32. */
  33. static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
  34. {
  35. struct usb_device *usb_dev = urb->dev;
  36. struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
  37. struct usb_wireless_ep_comp_descriptor *epcd;
  38. bool is_out;
  39. uint8_t phy_rate;
  40. is_out = usb_pipeout(urb->pipe);
  41. qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
  42. epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
  43. if (epcd) {
  44. qset->max_seq = epcd->bMaxSequence;
  45. qset->max_burst = epcd->bMaxBurst;
  46. } else {
  47. qset->max_seq = 2;
  48. qset->max_burst = 1;
  49. }
  50. /*
  51. * Initial PHY rate is 53.3 Mbit/s for control endpoints or
  52. * the maximum supported by the device for other endpoints
  53. * (unless limited by the user).
  54. */
  55. if (usb_pipecontrol(urb->pipe))
  56. phy_rate = UWB_PHY_RATE_53;
  57. else {
  58. uint16_t phy_rates;
  59. phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
  60. phy_rate = fls(phy_rates) - 1;
  61. if (phy_rate > whc->wusbhc.phy_rate)
  62. phy_rate = whc->wusbhc.phy_rate;
  63. }
  64. qset->qh.info1 = cpu_to_le32(
  65. QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
  66. | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
  67. | usb_pipe_to_qh_type(urb->pipe)
  68. | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
  69. | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
  70. );
  71. qset->qh.info2 = cpu_to_le32(
  72. QH_INFO2_BURST(qset->max_burst)
  73. | QH_INFO2_DBP(0)
  74. | QH_INFO2_MAX_COUNT(3)
  75. | QH_INFO2_MAX_RETRY(3)
  76. | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
  77. );
  78. /* FIXME: where can we obtain these Tx parameters from? Why
  79. * doesn't the chip know what Tx power to use? It knows the Rx
  80. * strength and can presumably guess the Tx power required
  81. * from that? */
  82. qset->qh.info3 = cpu_to_le32(
  83. QH_INFO3_TX_RATE(phy_rate)
  84. | QH_INFO3_TX_PWR(0) /* 0 == max power */
  85. );
  86. qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
  87. }
  88. /**
  89. * qset_clear - clear fields in a qset so it may be reinserted into a
  90. * schedule.
  91. *
  92. * The sequence number and current window are not cleared (see
  93. * qset_reset()).
  94. */
  95. void qset_clear(struct whc *whc, struct whc_qset *qset)
  96. {
  97. qset->td_start = qset->td_end = qset->ntds = 0;
  98. qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
  99. qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
  100. qset->qh.err_count = 0;
  101. qset->qh.scratch[0] = 0;
  102. qset->qh.scratch[1] = 0;
  103. qset->qh.scratch[2] = 0;
  104. memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
  105. init_completion(&qset->remove_complete);
  106. }
  107. /**
  108. * qset_reset - reset endpoint state in a qset.
  109. *
  110. * Clears the sequence number and current window. This qset must not
  111. * be in the ASL or PZL.
  112. */
  113. void qset_reset(struct whc *whc, struct whc_qset *qset)
  114. {
  115. qset->reset = 0;
  116. qset->qh.status &= ~QH_STATUS_SEQ_MASK;
  117. qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
  118. }
  119. /**
  120. * get_qset - get the qset for an async endpoint
  121. *
  122. * A new qset is created if one does not already exist.
  123. */
  124. struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
  125. gfp_t mem_flags)
  126. {
  127. struct whc_qset *qset;
  128. qset = urb->ep->hcpriv;
  129. if (qset == NULL) {
  130. qset = qset_alloc(whc, mem_flags);
  131. if (qset == NULL)
  132. return NULL;
  133. qset->ep = urb->ep;
  134. urb->ep->hcpriv = qset;
  135. qset_fill_qh(whc, qset, urb);
  136. }
  137. return qset;
  138. }
  139. void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
  140. {
  141. qset->remove = 0;
  142. list_del_init(&qset->list_node);
  143. complete(&qset->remove_complete);
  144. }
  145. /**
  146. * qset_add_qtds - add qTDs for an URB to a qset
  147. *
  148. * Returns true if the list (ASL/PZL) must be updated because (for a
  149. * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
  150. */
  151. enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
  152. {
  153. struct whc_std *std;
  154. enum whc_update update = 0;
  155. list_for_each_entry(std, &qset->stds, list_node) {
  156. struct whc_qtd *qtd;
  157. uint32_t status;
  158. if (qset->ntds >= WHCI_QSET_TD_MAX
  159. || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
  160. break;
  161. if (std->qtd)
  162. continue; /* already has a qTD */
  163. qtd = std->qtd = &qset->qtd[qset->td_end];
  164. /* Fill in setup bytes for control transfers. */
  165. if (usb_pipecontrol(std->urb->pipe))
  166. memcpy(qtd->setup, std->urb->setup_packet, 8);
  167. status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
  168. if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
  169. status |= QTD_STS_LAST_PKT;
  170. /*
  171. * For an IN transfer the iAlt field should be set so
  172. * the h/w will automatically advance to the next
  173. * transfer. However, if there are 8 or more TDs
  174. * remaining in this transfer then iAlt cannot be set
  175. * as it could point to somewhere in this transfer.
  176. */
  177. if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
  178. int ialt;
  179. ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
  180. status |= QTD_STS_IALT(ialt);
  181. } else if (usb_pipein(std->urb->pipe))
  182. qset->pause_after_urb = std->urb;
  183. if (std->num_pointers)
  184. qtd->options = cpu_to_le32(QTD_OPT_IOC);
  185. else
  186. qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
  187. qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
  188. qtd->status = cpu_to_le32(status);
  189. if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
  190. update = WHC_UPDATE_UPDATED;
  191. if (++qset->td_end >= WHCI_QSET_TD_MAX)
  192. qset->td_end = 0;
  193. qset->ntds++;
  194. }
  195. return update;
  196. }
  197. /**
  198. * qset_remove_qtd - remove the first qTD from a qset.
  199. *
  200. * The qTD might be still active (if it's part of a IN URB that
  201. * resulted in a short read) so ensure it's deactivated.
  202. */
  203. static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
  204. {
  205. qset->qtd[qset->td_start].status = 0;
  206. if (++qset->td_start >= WHCI_QSET_TD_MAX)
  207. qset->td_start = 0;
  208. qset->ntds--;
  209. }
  210. static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
  211. {
  212. struct scatterlist *sg;
  213. void *bounce;
  214. size_t remaining, offset;
  215. bounce = std->bounce_buf;
  216. remaining = std->len;
  217. sg = std->bounce_sg;
  218. offset = std->bounce_offset;
  219. while (remaining) {
  220. size_t len;
  221. len = min(sg->length - offset, remaining);
  222. memcpy(sg_virt(sg) + offset, bounce, len);
  223. bounce += len;
  224. remaining -= len;
  225. offset += len;
  226. if (offset >= sg->length) {
  227. sg = sg_next(sg);
  228. offset = 0;
  229. }
  230. }
  231. }
  232. /**
  233. * qset_free_std - remove an sTD and free it.
  234. * @whc: the WHCI host controller
  235. * @std: the sTD to remove and free.
  236. */
  237. void qset_free_std(struct whc *whc, struct whc_std *std)
  238. {
  239. list_del(&std->list_node);
  240. if (std->bounce_buf) {
  241. bool is_out = usb_pipeout(std->urb->pipe);
  242. dma_addr_t dma_addr;
  243. if (std->num_pointers)
  244. dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
  245. else
  246. dma_addr = std->dma_addr;
  247. dma_unmap_single(whc->wusbhc.dev, dma_addr,
  248. std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  249. if (!is_out)
  250. qset_copy_bounce_to_sg(whc, std);
  251. kfree(std->bounce_buf);
  252. }
  253. if (std->pl_virt) {
  254. if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
  255. dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
  256. std->num_pointers * sizeof(struct whc_page_list_entry),
  257. DMA_TO_DEVICE);
  258. kfree(std->pl_virt);
  259. std->pl_virt = NULL;
  260. }
  261. kfree(std);
  262. }
  263. /**
  264. * qset_remove_qtds - remove an URB's qTDs (and sTDs).
  265. */
  266. static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
  267. struct urb *urb)
  268. {
  269. struct whc_std *std, *t;
  270. list_for_each_entry_safe(std, t, &qset->stds, list_node) {
  271. if (std->urb != urb)
  272. break;
  273. if (std->qtd != NULL)
  274. qset_remove_qtd(whc, qset);
  275. qset_free_std(whc, std);
  276. }
  277. }
  278. /**
  279. * qset_free_stds - free any remaining sTDs for an URB.
  280. */
  281. static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
  282. {
  283. struct whc_std *std, *t;
  284. list_for_each_entry_safe(std, t, &qset->stds, list_node) {
  285. if (std->urb == urb)
  286. qset_free_std(qset->whc, std);
  287. }
  288. }
  289. static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
  290. {
  291. dma_addr_t dma_addr = std->dma_addr;
  292. dma_addr_t sp, ep;
  293. size_t pl_len;
  294. int p;
  295. /* Short buffers don't need a page list. */
  296. if (std->len <= WHCI_PAGE_SIZE) {
  297. std->num_pointers = 0;
  298. return 0;
  299. }
  300. sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
  301. ep = dma_addr + std->len;
  302. std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
  303. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  304. std->pl_virt = kmalloc(pl_len, mem_flags);
  305. if (std->pl_virt == NULL)
  306. return -ENOMEM;
  307. std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
  308. if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
  309. kfree(std->pl_virt);
  310. return -EFAULT;
  311. }
  312. for (p = 0; p < std->num_pointers; p++) {
  313. std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
  314. dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
  315. }
  316. return 0;
  317. }
  318. /**
  319. * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
  320. */
  321. static void urb_dequeue_work(struct work_struct *work)
  322. {
  323. struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
  324. struct whc_qset *qset = wurb->qset;
  325. struct whc *whc = qset->whc;
  326. unsigned long flags;
  327. if (wurb->is_async)
  328. asl_update(whc, WUSBCMD_ASYNC_UPDATED
  329. | WUSBCMD_ASYNC_SYNCED_DB
  330. | WUSBCMD_ASYNC_QSET_RM);
  331. else
  332. pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
  333. | WUSBCMD_PERIODIC_SYNCED_DB
  334. | WUSBCMD_PERIODIC_QSET_RM);
  335. spin_lock_irqsave(&whc->lock, flags);
  336. qset_remove_urb(whc, qset, wurb->urb, wurb->status);
  337. spin_unlock_irqrestore(&whc->lock, flags);
  338. }
  339. static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
  340. struct urb *urb, gfp_t mem_flags)
  341. {
  342. struct whc_std *std;
  343. std = kzalloc(sizeof(struct whc_std), mem_flags);
  344. if (std == NULL)
  345. return NULL;
  346. std->urb = urb;
  347. std->qtd = NULL;
  348. INIT_LIST_HEAD(&std->list_node);
  349. list_add_tail(&std->list_node, &qset->stds);
  350. return std;
  351. }
  352. static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
  353. gfp_t mem_flags)
  354. {
  355. size_t remaining;
  356. struct scatterlist *sg;
  357. int i;
  358. int ntds = 0;
  359. struct whc_std *std = NULL;
  360. struct whc_page_list_entry *new_pl_virt;
  361. dma_addr_t prev_end = 0;
  362. size_t pl_len;
  363. int p = 0;
  364. remaining = urb->transfer_buffer_length;
  365. for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
  366. dma_addr_t dma_addr;
  367. size_t dma_remaining;
  368. dma_addr_t sp, ep;
  369. int num_pointers;
  370. if (remaining == 0) {
  371. break;
  372. }
  373. dma_addr = sg_dma_address(sg);
  374. dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
  375. while (dma_remaining) {
  376. size_t dma_len;
  377. /*
  378. * We can use the previous std (if it exists) provided that:
  379. * - the previous one ended on a page boundary.
  380. * - the current one begins on a page boundary.
  381. * - the previous one isn't full.
  382. *
  383. * If a new std is needed but the previous one
  384. * was not a whole number of packets then this
  385. * sg list cannot be mapped onto multiple
  386. * qTDs. Return an error and let the caller
  387. * sort it out.
  388. */
  389. if (!std
  390. || (prev_end & (WHCI_PAGE_SIZE-1))
  391. || (dma_addr & (WHCI_PAGE_SIZE-1))
  392. || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
  393. if (std && std->len % qset->max_packet != 0)
  394. return -EINVAL;
  395. std = qset_new_std(whc, qset, urb, mem_flags);
  396. if (std == NULL) {
  397. return -ENOMEM;
  398. }
  399. ntds++;
  400. p = 0;
  401. }
  402. dma_len = dma_remaining;
  403. /*
  404. * If the remainder of this element doesn't
  405. * fit in a single qTD, limit the qTD to a
  406. * whole number of packets. This allows the
  407. * remainder to go into the next qTD.
  408. */
  409. if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
  410. dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
  411. * qset->max_packet - std->len;
  412. }
  413. std->len += dma_len;
  414. std->ntds_remaining = -1; /* filled in later */
  415. sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
  416. ep = dma_addr + dma_len;
  417. num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
  418. std->num_pointers += num_pointers;
  419. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  420. new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
  421. if (new_pl_virt == NULL) {
  422. kfree(std->pl_virt);
  423. std->pl_virt = NULL;
  424. return -ENOMEM;
  425. }
  426. std->pl_virt = new_pl_virt;
  427. for (;p < std->num_pointers; p++) {
  428. std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
  429. dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
  430. }
  431. prev_end = dma_addr = ep;
  432. dma_remaining -= dma_len;
  433. remaining -= dma_len;
  434. }
  435. }
  436. /* Now the number of stds is know, go back and fill in
  437. std->ntds_remaining. */
  438. list_for_each_entry(std, &qset->stds, list_node) {
  439. if (std->ntds_remaining == -1) {
  440. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  441. std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
  442. pl_len, DMA_TO_DEVICE);
  443. if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
  444. return -EFAULT;
  445. std->ntds_remaining = ntds--;
  446. }
  447. }
  448. return 0;
  449. }
  450. /**
  451. * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
  452. *
  453. * If the URB contains an sg list whose elements cannot be directly
  454. * mapped to qTDs then the data must be transferred via bounce
  455. * buffers.
  456. */
  457. static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
  458. struct urb *urb, gfp_t mem_flags)
  459. {
  460. bool is_out = usb_pipeout(urb->pipe);
  461. size_t max_std_len;
  462. size_t remaining;
  463. int ntds = 0;
  464. struct whc_std *std = NULL;
  465. void *bounce = NULL;
  466. struct scatterlist *sg;
  467. int i;
  468. /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
  469. max_std_len = qset->max_burst * qset->max_packet;
  470. remaining = urb->transfer_buffer_length;
  471. for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
  472. size_t len;
  473. size_t sg_remaining;
  474. void *orig;
  475. if (remaining == 0) {
  476. break;
  477. }
  478. sg_remaining = min_t(size_t, remaining, sg->length);
  479. orig = sg_virt(sg);
  480. while (sg_remaining) {
  481. if (!std || std->len == max_std_len) {
  482. std = qset_new_std(whc, qset, urb, mem_flags);
  483. if (std == NULL)
  484. return -ENOMEM;
  485. std->bounce_buf = kmalloc(max_std_len, mem_flags);
  486. if (std->bounce_buf == NULL)
  487. return -ENOMEM;
  488. std->bounce_sg = sg;
  489. std->bounce_offset = orig - sg_virt(sg);
  490. bounce = std->bounce_buf;
  491. ntds++;
  492. }
  493. len = min(sg_remaining, max_std_len - std->len);
  494. if (is_out)
  495. memcpy(bounce, orig, len);
  496. std->len += len;
  497. std->ntds_remaining = -1; /* filled in later */
  498. bounce += len;
  499. orig += len;
  500. sg_remaining -= len;
  501. remaining -= len;
  502. }
  503. }
  504. /*
  505. * For each of the new sTDs, map the bounce buffers, create
  506. * page lists (if necessary), and fill in std->ntds_remaining.
  507. */
  508. list_for_each_entry(std, &qset->stds, list_node) {
  509. if (std->ntds_remaining != -1)
  510. continue;
  511. std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
  512. is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  513. if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
  514. return -EFAULT;
  515. if (qset_fill_page_list(whc, std, mem_flags) < 0)
  516. return -ENOMEM;
  517. std->ntds_remaining = ntds--;
  518. }
  519. return 0;
  520. }
  521. /**
  522. * qset_add_urb - add an urb to the qset's queue.
  523. *
  524. * The URB is chopped into sTDs, one for each qTD that will required.
  525. * At least one qTD (and sTD) is required even if the transfer has no
  526. * data (e.g., for some control transfers).
  527. */
  528. int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
  529. gfp_t mem_flags)
  530. {
  531. struct whc_urb *wurb;
  532. int remaining = urb->transfer_buffer_length;
  533. u64 transfer_dma = urb->transfer_dma;
  534. int ntds_remaining;
  535. int ret;
  536. wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
  537. if (wurb == NULL)
  538. goto err_no_mem;
  539. urb->hcpriv = wurb;
  540. wurb->qset = qset;
  541. wurb->urb = urb;
  542. INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
  543. if (urb->num_sgs) {
  544. ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
  545. if (ret == -EINVAL) {
  546. qset_free_stds(qset, urb);
  547. ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
  548. }
  549. if (ret < 0)
  550. goto err_no_mem;
  551. return 0;
  552. }
  553. ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
  554. if (ntds_remaining == 0)
  555. ntds_remaining = 1;
  556. while (ntds_remaining) {
  557. struct whc_std *std;
  558. size_t std_len;
  559. std_len = remaining;
  560. if (std_len > QTD_MAX_XFER_SIZE)
  561. std_len = QTD_MAX_XFER_SIZE;
  562. std = qset_new_std(whc, qset, urb, mem_flags);
  563. if (std == NULL)
  564. goto err_no_mem;
  565. std->dma_addr = transfer_dma;
  566. std->len = std_len;
  567. std->ntds_remaining = ntds_remaining;
  568. if (qset_fill_page_list(whc, std, mem_flags) < 0)
  569. goto err_no_mem;
  570. ntds_remaining--;
  571. remaining -= std_len;
  572. transfer_dma += std_len;
  573. }
  574. return 0;
  575. err_no_mem:
  576. qset_free_stds(qset, urb);
  577. return -ENOMEM;
  578. }
  579. /**
  580. * qset_remove_urb - remove an URB from the urb queue.
  581. *
  582. * The URB is returned to the USB subsystem.
  583. */
  584. void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
  585. struct urb *urb, int status)
  586. {
  587. struct wusbhc *wusbhc = &whc->wusbhc;
  588. struct whc_urb *wurb = urb->hcpriv;
  589. usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
  590. /* Drop the lock as urb->complete() may enqueue another urb. */
  591. spin_unlock(&whc->lock);
  592. wusbhc_giveback_urb(wusbhc, urb, status);
  593. spin_lock(&whc->lock);
  594. kfree(wurb);
  595. }
  596. /**
  597. * get_urb_status_from_qtd - get the completed urb status from qTD status
  598. * @urb: completed urb
  599. * @status: qTD status
  600. */
  601. static int get_urb_status_from_qtd(struct urb *urb, u32 status)
  602. {
  603. if (status & QTD_STS_HALTED) {
  604. if (status & QTD_STS_DBE)
  605. return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
  606. else if (status & QTD_STS_BABBLE)
  607. return -EOVERFLOW;
  608. else if (status & QTD_STS_RCE)
  609. return -ETIME;
  610. return -EPIPE;
  611. }
  612. if (usb_pipein(urb->pipe)
  613. && (urb->transfer_flags & URB_SHORT_NOT_OK)
  614. && urb->actual_length < urb->transfer_buffer_length)
  615. return -EREMOTEIO;
  616. return 0;
  617. }
  618. /**
  619. * process_inactive_qtd - process an inactive (but not halted) qTD.
  620. *
  621. * Update the urb with the transfer bytes from the qTD, if the urb is
  622. * completely transferred or (in the case of an IN only) the LPF is
  623. * set, then the transfer is complete and the urb should be returned
  624. * to the system.
  625. */
  626. void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
  627. struct whc_qtd *qtd)
  628. {
  629. struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
  630. struct urb *urb = std->urb;
  631. uint32_t status;
  632. bool complete;
  633. status = le32_to_cpu(qtd->status);
  634. urb->actual_length += std->len - QTD_STS_TO_LEN(status);
  635. if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
  636. complete = true;
  637. else
  638. complete = whc_std_last(std);
  639. qset_remove_qtd(whc, qset);
  640. qset_free_std(whc, std);
  641. /*
  642. * Transfers for this URB are complete? Then return it to the
  643. * USB subsystem.
  644. */
  645. if (complete) {
  646. qset_remove_qtds(whc, qset, urb);
  647. qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
  648. /*
  649. * If iAlt isn't valid then the hardware didn't
  650. * advance iCur. Adjust the start and end pointers to
  651. * match iCur.
  652. */
  653. if (!(status & QTD_STS_IALT_VALID))
  654. qset->td_start = qset->td_end
  655. = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
  656. qset->pause_after_urb = NULL;
  657. }
  658. }
  659. /**
  660. * process_halted_qtd - process a qset with a halted qtd
  661. *
  662. * Remove all the qTDs for the failed URB and return the failed URB to
  663. * the USB subsystem. Then remove all other qTDs so the qset can be
  664. * removed.
  665. *
  666. * FIXME: this is the point where rate adaptation can be done. If a
  667. * transfer failed because it exceeded the maximum number of retries
  668. * then it could be reactivated with a slower rate without having to
  669. * remove the qset.
  670. */
  671. void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
  672. struct whc_qtd *qtd)
  673. {
  674. struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
  675. struct urb *urb = std->urb;
  676. int urb_status;
  677. urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
  678. qset_remove_qtds(whc, qset, urb);
  679. qset_remove_urb(whc, qset, urb, urb_status);
  680. list_for_each_entry(std, &qset->stds, list_node) {
  681. if (qset->ntds == 0)
  682. break;
  683. qset_remove_qtd(whc, qset);
  684. std->qtd = NULL;
  685. }
  686. qset->remove = 1;
  687. }
  688. void qset_free(struct whc *whc, struct whc_qset *qset)
  689. {
  690. dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
  691. }
  692. /**
  693. * qset_delete - wait for a qset to be unused, then free it.
  694. */
  695. void qset_delete(struct whc *whc, struct whc_qset *qset)
  696. {
  697. wait_for_completion(&qset->remove_complete);
  698. qset_free(whc, qset);
  699. }