usb.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844
  1. /*
  2. * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "mt76.h"
  17. #include "usb_trace.h"
  18. #include "dma.h"
  19. #define MT_VEND_REQ_MAX_RETRY 10
  20. #define MT_VEND_REQ_TOUT_MS 300
  21. /* should be called with usb_ctrl_mtx locked */
  22. static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  23. u8 req_type, u16 val, u16 offset,
  24. void *buf, size_t len)
  25. {
  26. struct usb_interface *intf = to_usb_interface(dev->dev);
  27. struct usb_device *udev = interface_to_usbdev(intf);
  28. unsigned int pipe;
  29. int i, ret;
  30. pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  31. : usb_sndctrlpipe(udev, 0);
  32. for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  33. if (test_bit(MT76_REMOVED, &dev->state))
  34. return -EIO;
  35. ret = usb_control_msg(udev, pipe, req, req_type, val,
  36. offset, buf, len, MT_VEND_REQ_TOUT_MS);
  37. if (ret == -ENODEV)
  38. set_bit(MT76_REMOVED, &dev->state);
  39. if (ret >= 0 || ret == -ENODEV)
  40. return ret;
  41. usleep_range(5000, 10000);
  42. }
  43. dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  44. req, offset, ret);
  45. return ret;
  46. }
  47. int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  48. u8 req_type, u16 val, u16 offset,
  49. void *buf, size_t len)
  50. {
  51. int ret;
  52. mutex_lock(&dev->usb.usb_ctrl_mtx);
  53. ret = __mt76u_vendor_request(dev, req, req_type,
  54. val, offset, buf, len);
  55. trace_usb_reg_wr(dev, offset, val);
  56. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  57. return ret;
  58. }
  59. EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  60. /* should be called with usb_ctrl_mtx locked */
  61. static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  62. {
  63. struct mt76_usb *usb = &dev->usb;
  64. u32 data = ~0;
  65. u16 offset;
  66. int ret;
  67. u8 req;
  68. switch (addr & MT_VEND_TYPE_MASK) {
  69. case MT_VEND_TYPE_EEPROM:
  70. req = MT_VEND_READ_EEPROM;
  71. break;
  72. case MT_VEND_TYPE_CFG:
  73. req = MT_VEND_READ_CFG;
  74. break;
  75. default:
  76. req = MT_VEND_MULTI_READ;
  77. break;
  78. }
  79. offset = addr & ~MT_VEND_TYPE_MASK;
  80. ret = __mt76u_vendor_request(dev, req,
  81. USB_DIR_IN | USB_TYPE_VENDOR,
  82. 0, offset, usb->data, sizeof(__le32));
  83. if (ret == sizeof(__le32))
  84. data = get_unaligned_le32(usb->data);
  85. trace_usb_reg_rr(dev, addr, data);
  86. return data;
  87. }
  88. u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
  89. {
  90. u32 ret;
  91. mutex_lock(&dev->usb.usb_ctrl_mtx);
  92. ret = __mt76u_rr(dev, addr);
  93. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  94. return ret;
  95. }
  96. /* should be called with usb_ctrl_mtx locked */
  97. static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
  98. {
  99. struct mt76_usb *usb = &dev->usb;
  100. u16 offset;
  101. u8 req;
  102. switch (addr & MT_VEND_TYPE_MASK) {
  103. case MT_VEND_TYPE_CFG:
  104. req = MT_VEND_WRITE_CFG;
  105. break;
  106. default:
  107. req = MT_VEND_MULTI_WRITE;
  108. break;
  109. }
  110. offset = addr & ~MT_VEND_TYPE_MASK;
  111. put_unaligned_le32(val, usb->data);
  112. __mt76u_vendor_request(dev, req,
  113. USB_DIR_OUT | USB_TYPE_VENDOR, 0,
  114. offset, usb->data, sizeof(__le32));
  115. trace_usb_reg_wr(dev, addr, val);
  116. }
  117. void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
  118. {
  119. mutex_lock(&dev->usb.usb_ctrl_mtx);
  120. __mt76u_wr(dev, addr, val);
  121. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  122. }
  123. static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
  124. u32 mask, u32 val)
  125. {
  126. mutex_lock(&dev->usb.usb_ctrl_mtx);
  127. val |= __mt76u_rr(dev, addr) & ~mask;
  128. __mt76u_wr(dev, addr, val);
  129. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  130. return val;
  131. }
  132. static void mt76u_copy(struct mt76_dev *dev, u32 offset,
  133. const void *data, int len)
  134. {
  135. struct mt76_usb *usb = &dev->usb;
  136. const u32 *val = data;
  137. int i, ret;
  138. mutex_lock(&usb->usb_ctrl_mtx);
  139. for (i = 0; i < (len / 4); i++) {
  140. put_unaligned_le32(val[i], usb->data);
  141. ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
  142. USB_DIR_OUT | USB_TYPE_VENDOR,
  143. 0, offset + i * 4, usb->data,
  144. sizeof(__le32));
  145. if (ret < 0)
  146. break;
  147. }
  148. mutex_unlock(&usb->usb_ctrl_mtx);
  149. }
  150. void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
  151. const u16 offset, const u32 val)
  152. {
  153. mutex_lock(&dev->usb.usb_ctrl_mtx);
  154. __mt76u_vendor_request(dev, req,
  155. USB_DIR_OUT | USB_TYPE_VENDOR,
  156. val & 0xffff, offset, NULL, 0);
  157. __mt76u_vendor_request(dev, req,
  158. USB_DIR_OUT | USB_TYPE_VENDOR,
  159. val >> 16, offset + 2, NULL, 0);
  160. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  161. }
  162. EXPORT_SYMBOL_GPL(mt76u_single_wr);
  163. static int
  164. mt76u_set_endpoints(struct usb_interface *intf,
  165. struct mt76_usb *usb)
  166. {
  167. struct usb_host_interface *intf_desc = intf->cur_altsetting;
  168. struct usb_endpoint_descriptor *ep_desc;
  169. int i, in_ep = 0, out_ep = 0;
  170. for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
  171. ep_desc = &intf_desc->endpoint[i].desc;
  172. if (usb_endpoint_is_bulk_in(ep_desc) &&
  173. in_ep < __MT_EP_IN_MAX) {
  174. usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
  175. usb->in_max_packet = usb_endpoint_maxp(ep_desc);
  176. in_ep++;
  177. } else if (usb_endpoint_is_bulk_out(ep_desc) &&
  178. out_ep < __MT_EP_OUT_MAX) {
  179. usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
  180. usb->out_max_packet = usb_endpoint_maxp(ep_desc);
  181. out_ep++;
  182. }
  183. }
  184. if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
  185. return -EINVAL;
  186. return 0;
  187. }
  188. static int
  189. mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
  190. int nsgs, int len, int sglen)
  191. {
  192. struct urb *urb = buf->urb;
  193. int i;
  194. for (i = 0; i < nsgs; i++) {
  195. struct page *page;
  196. void *data;
  197. int offset;
  198. data = netdev_alloc_frag(len);
  199. if (!data)
  200. break;
  201. page = virt_to_head_page(data);
  202. offset = data - page_address(page);
  203. sg_set_page(&urb->sg[i], page, sglen, offset);
  204. }
  205. if (i < nsgs) {
  206. int j;
  207. for (j = nsgs; j < urb->num_sgs; j++)
  208. skb_free_frag(sg_virt(&urb->sg[j]));
  209. urb->num_sgs = i;
  210. }
  211. urb->num_sgs = max_t(int, i, urb->num_sgs);
  212. buf->len = urb->num_sgs * sglen,
  213. sg_init_marker(urb->sg, urb->num_sgs);
  214. return i ? : -ENOMEM;
  215. }
  216. int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
  217. int nsgs, int len, int sglen, gfp_t gfp)
  218. {
  219. buf->urb = usb_alloc_urb(0, gfp);
  220. if (!buf->urb)
  221. return -ENOMEM;
  222. buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
  223. gfp);
  224. if (!buf->urb->sg)
  225. return -ENOMEM;
  226. sg_init_table(buf->urb->sg, nsgs);
  227. buf->dev = dev;
  228. return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
  229. }
  230. EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
  231. void mt76u_buf_free(struct mt76u_buf *buf)
  232. {
  233. struct urb *urb = buf->urb;
  234. struct scatterlist *sg;
  235. int i;
  236. for (i = 0; i < urb->num_sgs; i++) {
  237. sg = &urb->sg[i];
  238. if (!sg)
  239. continue;
  240. skb_free_frag(sg_virt(sg));
  241. }
  242. usb_free_urb(buf->urb);
  243. }
  244. EXPORT_SYMBOL_GPL(mt76u_buf_free);
  245. int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
  246. struct mt76u_buf *buf, gfp_t gfp,
  247. usb_complete_t complete_fn, void *context)
  248. {
  249. struct usb_interface *intf = to_usb_interface(dev->dev);
  250. struct usb_device *udev = interface_to_usbdev(intf);
  251. unsigned int pipe;
  252. if (dir == USB_DIR_IN)
  253. pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
  254. else
  255. pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
  256. usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
  257. complete_fn, context);
  258. return usb_submit_urb(buf->urb, gfp);
  259. }
  260. EXPORT_SYMBOL_GPL(mt76u_submit_buf);
  261. static inline struct mt76u_buf
  262. *mt76u_get_next_rx_entry(struct mt76_queue *q)
  263. {
  264. struct mt76u_buf *buf = NULL;
  265. unsigned long flags;
  266. spin_lock_irqsave(&q->lock, flags);
  267. if (q->queued > 0) {
  268. buf = &q->entry[q->head].ubuf;
  269. q->head = (q->head + 1) % q->ndesc;
  270. q->queued--;
  271. }
  272. spin_unlock_irqrestore(&q->lock, flags);
  273. return buf;
  274. }
  275. static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
  276. {
  277. u16 dma_len, min_len;
  278. dma_len = get_unaligned_le16(data);
  279. min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
  280. MT_FCE_INFO_LEN;
  281. if (data_len < min_len || WARN_ON(!dma_len) ||
  282. WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
  283. WARN_ON(dma_len & 0x3))
  284. return -EINVAL;
  285. return dma_len;
  286. }
  287. static int
  288. mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
  289. {
  290. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  291. u8 *data = sg_virt(&urb->sg[0]);
  292. int data_len, len, nsgs = 1;
  293. struct sk_buff *skb;
  294. if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
  295. return 0;
  296. len = mt76u_get_rx_entry_len(data, urb->actual_length);
  297. if (len < 0)
  298. return 0;
  299. skb = build_skb(data, q->buf_size);
  300. if (!skb)
  301. return 0;
  302. data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
  303. skb_reserve(skb, MT_DMA_HDR_LEN);
  304. if (skb->tail + data_len > skb->end) {
  305. dev_kfree_skb(skb);
  306. return 1;
  307. }
  308. __skb_put(skb, data_len);
  309. len -= data_len;
  310. while (len > 0) {
  311. data_len = min_t(int, len, urb->sg[nsgs].length);
  312. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  313. sg_page(&urb->sg[nsgs]),
  314. urb->sg[nsgs].offset,
  315. data_len, q->buf_size);
  316. len -= data_len;
  317. nsgs++;
  318. }
  319. dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
  320. return nsgs;
  321. }
  322. static void mt76u_complete_rx(struct urb *urb)
  323. {
  324. struct mt76_dev *dev = urb->context;
  325. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  326. unsigned long flags;
  327. switch (urb->status) {
  328. case -ECONNRESET:
  329. case -ESHUTDOWN:
  330. case -ENOENT:
  331. return;
  332. default:
  333. dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
  334. /* fall through */
  335. case 0:
  336. break;
  337. }
  338. spin_lock_irqsave(&q->lock, flags);
  339. if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
  340. goto out;
  341. q->tail = (q->tail + 1) % q->ndesc;
  342. q->queued++;
  343. tasklet_schedule(&dev->usb.rx_tasklet);
  344. out:
  345. spin_unlock_irqrestore(&q->lock, flags);
  346. }
  347. static void mt76u_rx_tasklet(unsigned long data)
  348. {
  349. struct mt76_dev *dev = (struct mt76_dev *)data;
  350. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  351. int err, nsgs, buf_len = q->buf_size;
  352. struct mt76u_buf *buf;
  353. rcu_read_lock();
  354. while (true) {
  355. buf = mt76u_get_next_rx_entry(q);
  356. if (!buf)
  357. break;
  358. nsgs = mt76u_process_rx_entry(dev, buf->urb);
  359. if (nsgs > 0) {
  360. err = mt76u_fill_rx_sg(dev, buf, nsgs,
  361. buf_len,
  362. SKB_WITH_OVERHEAD(buf_len));
  363. if (err < 0)
  364. break;
  365. }
  366. mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
  367. buf, GFP_ATOMIC,
  368. mt76u_complete_rx, dev);
  369. }
  370. mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
  371. rcu_read_unlock();
  372. }
  373. int mt76u_submit_rx_buffers(struct mt76_dev *dev)
  374. {
  375. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  376. unsigned long flags;
  377. int i, err = 0;
  378. spin_lock_irqsave(&q->lock, flags);
  379. for (i = 0; i < q->ndesc; i++) {
  380. err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
  381. &q->entry[i].ubuf, GFP_ATOMIC,
  382. mt76u_complete_rx, dev);
  383. if (err < 0)
  384. break;
  385. }
  386. q->head = q->tail = 0;
  387. q->queued = 0;
  388. spin_unlock_irqrestore(&q->lock, flags);
  389. return err;
  390. }
  391. EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
  392. static int mt76u_alloc_rx(struct mt76_dev *dev)
  393. {
  394. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  395. int i, err, nsgs;
  396. spin_lock_init(&q->lock);
  397. q->entry = devm_kcalloc(dev->dev,
  398. MT_NUM_RX_ENTRIES, sizeof(*q->entry),
  399. GFP_KERNEL);
  400. if (!q->entry)
  401. return -ENOMEM;
  402. if (mt76u_check_sg(dev)) {
  403. q->buf_size = MT_RX_BUF_SIZE;
  404. nsgs = MT_SG_MAX_SIZE;
  405. } else {
  406. q->buf_size = PAGE_SIZE;
  407. nsgs = 1;
  408. }
  409. q->ndesc = MT_NUM_RX_ENTRIES;
  410. for (i = 0; i < q->ndesc; i++) {
  411. err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
  412. nsgs, q->buf_size,
  413. SKB_WITH_OVERHEAD(q->buf_size),
  414. GFP_KERNEL);
  415. if (err < 0)
  416. return err;
  417. }
  418. return mt76u_submit_rx_buffers(dev);
  419. }
  420. static void mt76u_free_rx(struct mt76_dev *dev)
  421. {
  422. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  423. int i;
  424. for (i = 0; i < q->ndesc; i++)
  425. mt76u_buf_free(&q->entry[i].ubuf);
  426. }
  427. static void mt76u_stop_rx(struct mt76_dev *dev)
  428. {
  429. struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
  430. int i;
  431. for (i = 0; i < q->ndesc; i++)
  432. usb_kill_urb(q->entry[i].ubuf.urb);
  433. }
  434. int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
  435. {
  436. struct sk_buff *iter, *last = skb;
  437. u32 info, pad;
  438. /* Buffer layout:
  439. * | 4B | xfer len | pad | 4B |
  440. * | TXINFO | pkt/cmd | zero pad to 4B | zero |
  441. *
  442. * length field of TXINFO should be set to 'xfer len'.
  443. */
  444. info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
  445. FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
  446. put_unaligned_le32(info, skb_push(skb, sizeof(info)));
  447. pad = round_up(skb->len, 4) + 4 - skb->len;
  448. skb_walk_frags(skb, iter) {
  449. last = iter;
  450. if (!iter->next) {
  451. skb->data_len += pad;
  452. skb->len += pad;
  453. break;
  454. }
  455. }
  456. if (unlikely(pad)) {
  457. if (__skb_pad(last, pad, true))
  458. return -ENOMEM;
  459. __skb_put(last, pad);
  460. }
  461. return 0;
  462. }
  463. EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
  464. static void mt76u_tx_tasklet(unsigned long data)
  465. {
  466. struct mt76_dev *dev = (struct mt76_dev *)data;
  467. struct mt76u_buf *buf;
  468. struct mt76_queue *q;
  469. bool wake;
  470. int i;
  471. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  472. q = &dev->q_tx[i];
  473. spin_lock_bh(&q->lock);
  474. while (true) {
  475. buf = &q->entry[q->head].ubuf;
  476. if (!buf->done || !q->queued)
  477. break;
  478. dev->drv->tx_complete_skb(dev, q,
  479. &q->entry[q->head],
  480. false);
  481. if (q->entry[q->head].schedule) {
  482. q->entry[q->head].schedule = false;
  483. q->swq_queued--;
  484. }
  485. q->head = (q->head + 1) % q->ndesc;
  486. q->queued--;
  487. }
  488. mt76_txq_schedule(dev, q);
  489. wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
  490. if (!q->queued)
  491. wake_up(&dev->tx_wait);
  492. spin_unlock_bh(&q->lock);
  493. if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
  494. ieee80211_queue_delayed_work(dev->hw,
  495. &dev->usb.stat_work,
  496. msecs_to_jiffies(10));
  497. if (wake)
  498. ieee80211_wake_queue(dev->hw, i);
  499. }
  500. }
  501. static void mt76u_tx_status_data(struct work_struct *work)
  502. {
  503. struct mt76_usb *usb;
  504. struct mt76_dev *dev;
  505. u8 update = 1;
  506. u16 count = 0;
  507. usb = container_of(work, struct mt76_usb, stat_work.work);
  508. dev = container_of(usb, struct mt76_dev, usb);
  509. while (true) {
  510. if (test_bit(MT76_REMOVED, &dev->state))
  511. break;
  512. if (!dev->drv->tx_status_data(dev, &update))
  513. break;
  514. count++;
  515. }
  516. if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
  517. ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
  518. msecs_to_jiffies(10));
  519. else
  520. clear_bit(MT76_READING_STATS, &dev->state);
  521. }
  522. static void mt76u_complete_tx(struct urb *urb)
  523. {
  524. struct mt76u_buf *buf = urb->context;
  525. struct mt76_dev *dev = buf->dev;
  526. if (mt76u_urb_error(urb))
  527. dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
  528. buf->done = true;
  529. tasklet_schedule(&dev->usb.tx_tasklet);
  530. }
  531. static int
  532. mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
  533. {
  534. int nsgs = 1 + skb_shinfo(skb)->nr_frags;
  535. struct sk_buff *iter;
  536. skb_walk_frags(skb, iter)
  537. nsgs += 1 + skb_shinfo(iter)->nr_frags;
  538. memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
  539. nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
  540. sg_init_marker(urb->sg, nsgs);
  541. urb->num_sgs = nsgs;
  542. return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
  543. }
  544. static int
  545. mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  546. struct sk_buff *skb, struct mt76_wcid *wcid,
  547. struct ieee80211_sta *sta)
  548. {
  549. struct usb_interface *intf = to_usb_interface(dev->dev);
  550. struct usb_device *udev = interface_to_usbdev(intf);
  551. u8 ep = q2ep(q->hw_idx);
  552. struct mt76u_buf *buf;
  553. u16 idx = q->tail;
  554. unsigned int pipe;
  555. int err;
  556. if (q->queued == q->ndesc)
  557. return -ENOSPC;
  558. err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
  559. if (err < 0)
  560. return err;
  561. buf = &q->entry[idx].ubuf;
  562. buf->done = false;
  563. err = mt76u_tx_build_sg(skb, buf->urb);
  564. if (err < 0)
  565. return err;
  566. pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
  567. usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
  568. mt76u_complete_tx, buf);
  569. q->tail = (q->tail + 1) % q->ndesc;
  570. q->entry[idx].skb = skb;
  571. q->queued++;
  572. return idx;
  573. }
  574. static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
  575. {
  576. struct mt76u_buf *buf;
  577. int err;
  578. while (q->first != q->tail) {
  579. buf = &q->entry[q->first].ubuf;
  580. err = usb_submit_urb(buf->urb, GFP_ATOMIC);
  581. if (err < 0) {
  582. if (err == -ENODEV)
  583. set_bit(MT76_REMOVED, &dev->state);
  584. else
  585. dev_err(dev->dev, "tx urb submit failed:%d\n",
  586. err);
  587. break;
  588. }
  589. q->first = (q->first + 1) % q->ndesc;
  590. }
  591. }
  592. static int mt76u_alloc_tx(struct mt76_dev *dev)
  593. {
  594. struct mt76u_buf *buf;
  595. struct mt76_queue *q;
  596. size_t size;
  597. int i, j;
  598. size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
  599. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  600. q = &dev->q_tx[i];
  601. spin_lock_init(&q->lock);
  602. INIT_LIST_HEAD(&q->swq);
  603. q->hw_idx = q2hwq(i);
  604. q->entry = devm_kcalloc(dev->dev,
  605. MT_NUM_TX_ENTRIES, sizeof(*q->entry),
  606. GFP_KERNEL);
  607. if (!q->entry)
  608. return -ENOMEM;
  609. q->ndesc = MT_NUM_TX_ENTRIES;
  610. for (j = 0; j < q->ndesc; j++) {
  611. buf = &q->entry[j].ubuf;
  612. buf->dev = dev;
  613. buf->urb = usb_alloc_urb(0, GFP_KERNEL);
  614. if (!buf->urb)
  615. return -ENOMEM;
  616. buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
  617. if (!buf->urb->sg)
  618. return -ENOMEM;
  619. }
  620. }
  621. return 0;
  622. }
  623. static void mt76u_free_tx(struct mt76_dev *dev)
  624. {
  625. struct mt76_queue *q;
  626. int i, j;
  627. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  628. q = &dev->q_tx[i];
  629. for (j = 0; j < q->ndesc; j++)
  630. usb_free_urb(q->entry[j].ubuf.urb);
  631. }
  632. }
  633. static void mt76u_stop_tx(struct mt76_dev *dev)
  634. {
  635. struct mt76_queue *q;
  636. int i, j;
  637. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  638. q = &dev->q_tx[i];
  639. for (j = 0; j < q->ndesc; j++)
  640. usb_kill_urb(q->entry[j].ubuf.urb);
  641. }
  642. }
  643. void mt76u_stop_queues(struct mt76_dev *dev)
  644. {
  645. tasklet_disable(&dev->usb.rx_tasklet);
  646. tasklet_disable(&dev->usb.tx_tasklet);
  647. mt76u_stop_rx(dev);
  648. mt76u_stop_tx(dev);
  649. }
  650. EXPORT_SYMBOL_GPL(mt76u_stop_queues);
  651. void mt76u_stop_stat_wk(struct mt76_dev *dev)
  652. {
  653. cancel_delayed_work_sync(&dev->usb.stat_work);
  654. clear_bit(MT76_READING_STATS, &dev->state);
  655. }
  656. EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
  657. void mt76u_queues_deinit(struct mt76_dev *dev)
  658. {
  659. mt76u_stop_queues(dev);
  660. mt76u_free_rx(dev);
  661. mt76u_free_tx(dev);
  662. }
  663. EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
  664. int mt76u_alloc_queues(struct mt76_dev *dev)
  665. {
  666. int err;
  667. err = mt76u_alloc_rx(dev);
  668. if (err < 0)
  669. return err;
  670. return mt76u_alloc_tx(dev);
  671. }
  672. EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
  673. static const struct mt76_queue_ops usb_queue_ops = {
  674. .tx_queue_skb = mt76u_tx_queue_skb,
  675. .kick = mt76u_tx_kick,
  676. };
  677. int mt76u_init(struct mt76_dev *dev,
  678. struct usb_interface *intf)
  679. {
  680. static const struct mt76_bus_ops mt76u_ops = {
  681. .rr = mt76u_rr,
  682. .wr = mt76u_wr,
  683. .rmw = mt76u_rmw,
  684. .copy = mt76u_copy,
  685. };
  686. struct mt76_usb *usb = &dev->usb;
  687. tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
  688. tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
  689. INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
  690. skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
  691. init_completion(&usb->mcu.cmpl);
  692. mutex_init(&usb->mcu.mutex);
  693. mutex_init(&usb->usb_ctrl_mtx);
  694. dev->bus = &mt76u_ops;
  695. dev->queue_ops = &usb_queue_ops;
  696. return mt76u_set_endpoints(intf, usb);
  697. }
  698. EXPORT_SYMBOL_GPL(mt76u_init);
  699. MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
  700. MODULE_LICENSE("Dual BSD/GPL");