mt76.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifndef __MT76_H
  17. #define __MT76_H
  18. #include <linux/kernel.h>
  19. #include <linux/io.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/leds.h>
  23. #include <linux/usb.h>
  24. #include <net/mac80211.h>
  25. #include "util.h"
  26. #define MT_TX_RING_SIZE 256
  27. #define MT_MCU_RING_SIZE 32
  28. #define MT_RX_BUF_SIZE 2048
  29. struct mt76_dev;
  30. struct mt76_wcid;
  31. struct mt76_bus_ops {
  32. u32 (*rr)(struct mt76_dev *dev, u32 offset);
  33. void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
  34. u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  35. void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
  36. int len);
  37. };
  38. enum mt76_txq_id {
  39. MT_TXQ_VO = IEEE80211_AC_VO,
  40. MT_TXQ_VI = IEEE80211_AC_VI,
  41. MT_TXQ_BE = IEEE80211_AC_BE,
  42. MT_TXQ_BK = IEEE80211_AC_BK,
  43. MT_TXQ_PSD,
  44. MT_TXQ_MCU,
  45. MT_TXQ_BEACON,
  46. MT_TXQ_CAB,
  47. __MT_TXQ_MAX
  48. };
  49. enum mt76_rxq_id {
  50. MT_RXQ_MAIN,
  51. MT_RXQ_MCU,
  52. __MT_RXQ_MAX
  53. };
  54. struct mt76_queue_buf {
  55. dma_addr_t addr;
  56. int len;
  57. };
  58. struct mt76u_buf {
  59. struct mt76_dev *dev;
  60. struct urb *urb;
  61. size_t len;
  62. bool done;
  63. };
  64. struct mt76_queue_entry {
  65. union {
  66. void *buf;
  67. struct sk_buff *skb;
  68. };
  69. union {
  70. struct mt76_txwi_cache *txwi;
  71. struct mt76u_buf ubuf;
  72. };
  73. bool schedule;
  74. };
  75. struct mt76_queue_regs {
  76. u32 desc_base;
  77. u32 ring_size;
  78. u32 cpu_idx;
  79. u32 dma_idx;
  80. } __packed __aligned(4);
  81. struct mt76_queue {
  82. struct mt76_queue_regs __iomem *regs;
  83. spinlock_t lock;
  84. struct mt76_queue_entry *entry;
  85. struct mt76_desc *desc;
  86. struct list_head swq;
  87. int swq_queued;
  88. u16 first;
  89. u16 head;
  90. u16 tail;
  91. int ndesc;
  92. int queued;
  93. int buf_size;
  94. u8 buf_offset;
  95. u8 hw_idx;
  96. dma_addr_t desc_dma;
  97. struct sk_buff *rx_head;
  98. };
  99. struct mt76_queue_ops {
  100. int (*init)(struct mt76_dev *dev);
  101. int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
  102. int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
  103. struct mt76_queue_buf *buf, int nbufs, u32 info,
  104. struct sk_buff *skb, void *txwi);
  105. int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
  106. struct sk_buff *skb, struct mt76_wcid *wcid,
  107. struct ieee80211_sta *sta);
  108. void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
  109. int *len, u32 *info, bool *more);
  110. void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
  111. void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
  112. bool flush);
  113. void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
  114. };
  115. enum mt76_wcid_flags {
  116. MT_WCID_FLAG_CHECK_PS,
  117. MT_WCID_FLAG_PS,
  118. };
  119. struct mt76_wcid {
  120. struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
  121. struct work_struct aggr_work;
  122. unsigned long flags;
  123. u8 idx;
  124. u8 hw_key_idx;
  125. u8 sta:1;
  126. u8 rx_check_pn;
  127. u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
  128. __le16 tx_rate;
  129. bool tx_rate_set;
  130. u8 tx_rate_nss;
  131. s8 max_txpwr_adj;
  132. bool sw_iv;
  133. };
  134. struct mt76_txq {
  135. struct list_head list;
  136. struct mt76_queue *hwq;
  137. struct mt76_wcid *wcid;
  138. struct sk_buff_head retry_q;
  139. u16 agg_ssn;
  140. bool send_bar;
  141. bool aggr;
  142. };
  143. struct mt76_txwi_cache {
  144. u32 txwi[8];
  145. dma_addr_t dma_addr;
  146. struct list_head list;
  147. };
  148. struct mt76_rx_tid {
  149. struct rcu_head rcu_head;
  150. struct mt76_dev *dev;
  151. spinlock_t lock;
  152. struct delayed_work reorder_work;
  153. u16 head;
  154. u16 size;
  155. u16 nframes;
  156. u8 started:1, stopped:1, timer_pending:1;
  157. struct sk_buff *reorder_buf[];
  158. };
  159. enum {
  160. MT76_STATE_INITIALIZED,
  161. MT76_STATE_RUNNING,
  162. MT76_STATE_MCU_RUNNING,
  163. MT76_SCANNING,
  164. MT76_RESET,
  165. MT76_OFFCHANNEL,
  166. MT76_REMOVED,
  167. MT76_READING_STATS,
  168. MT76_MORE_STATS,
  169. };
  170. struct mt76_hw_cap {
  171. bool has_2ghz;
  172. bool has_5ghz;
  173. };
  174. struct mt76_driver_ops {
  175. u16 txwi_size;
  176. void (*update_survey)(struct mt76_dev *dev);
  177. int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
  178. struct sk_buff *skb, struct mt76_queue *q,
  179. struct mt76_wcid *wcid,
  180. struct ieee80211_sta *sta, u32 *tx_info);
  181. void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
  182. struct mt76_queue_entry *e, bool flush);
  183. bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
  184. void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
  185. struct sk_buff *skb);
  186. void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
  187. void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
  188. bool ps);
  189. };
  190. struct mt76_channel_state {
  191. u64 cc_active;
  192. u64 cc_busy;
  193. };
  194. struct mt76_sband {
  195. struct ieee80211_supported_band sband;
  196. struct mt76_channel_state *chan;
  197. };
  198. /* addr req mask */
  199. #define MT_VEND_TYPE_EEPROM BIT(31)
  200. #define MT_VEND_TYPE_CFG BIT(30)
  201. #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
  202. #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
  203. enum mt_vendor_req {
  204. MT_VEND_DEV_MODE = 0x1,
  205. MT_VEND_WRITE = 0x2,
  206. MT_VEND_MULTI_WRITE = 0x6,
  207. MT_VEND_MULTI_READ = 0x7,
  208. MT_VEND_READ_EEPROM = 0x9,
  209. MT_VEND_WRITE_FCE = 0x42,
  210. MT_VEND_WRITE_CFG = 0x46,
  211. MT_VEND_READ_CFG = 0x47,
  212. };
  213. enum mt76u_in_ep {
  214. MT_EP_IN_PKT_RX,
  215. MT_EP_IN_CMD_RESP,
  216. __MT_EP_IN_MAX,
  217. };
  218. enum mt76u_out_ep {
  219. MT_EP_OUT_INBAND_CMD,
  220. MT_EP_OUT_AC_BK,
  221. MT_EP_OUT_AC_BE,
  222. MT_EP_OUT_AC_VI,
  223. MT_EP_OUT_AC_VO,
  224. MT_EP_OUT_HCCA,
  225. __MT_EP_OUT_MAX,
  226. };
  227. #define MT_SG_MAX_SIZE 8
  228. #define MT_NUM_TX_ENTRIES 256
  229. #define MT_NUM_RX_ENTRIES 128
  230. #define MCU_RESP_URB_SIZE 1024
  231. struct mt76_usb {
  232. struct mutex usb_ctrl_mtx;
  233. u8 data[32];
  234. struct tasklet_struct rx_tasklet;
  235. struct tasklet_struct tx_tasklet;
  236. struct delayed_work stat_work;
  237. u8 out_ep[__MT_EP_OUT_MAX];
  238. u16 out_max_packet;
  239. u8 in_ep[__MT_EP_IN_MAX];
  240. u16 in_max_packet;
  241. struct mt76u_mcu {
  242. struct mutex mutex;
  243. struct completion cmpl;
  244. struct mt76u_buf res;
  245. u32 msg_seq;
  246. } mcu;
  247. };
  248. struct mt76_dev {
  249. struct ieee80211_hw *hw;
  250. struct cfg80211_chan_def chandef;
  251. struct ieee80211_channel *main_chan;
  252. spinlock_t lock;
  253. spinlock_t cc_lock;
  254. const struct mt76_bus_ops *bus;
  255. const struct mt76_driver_ops *drv;
  256. void __iomem *regs;
  257. struct device *dev;
  258. struct net_device napi_dev;
  259. spinlock_t rx_lock;
  260. struct napi_struct napi[__MT_RXQ_MAX];
  261. struct sk_buff_head rx_skb[__MT_RXQ_MAX];
  262. struct list_head txwi_cache;
  263. struct mt76_queue q_tx[__MT_TXQ_MAX];
  264. struct mt76_queue q_rx[__MT_RXQ_MAX];
  265. const struct mt76_queue_ops *queue_ops;
  266. wait_queue_head_t tx_wait;
  267. u8 macaddr[ETH_ALEN];
  268. u32 rev;
  269. unsigned long state;
  270. u8 antenna_mask;
  271. struct mt76_sband sband_2g;
  272. struct mt76_sband sband_5g;
  273. struct debugfs_blob_wrapper eeprom;
  274. struct debugfs_blob_wrapper otp;
  275. struct mt76_hw_cap cap;
  276. u32 debugfs_reg;
  277. struct led_classdev led_cdev;
  278. char led_name[32];
  279. bool led_al;
  280. u8 led_pin;
  281. struct mt76_usb usb;
  282. };
  283. enum mt76_phy_type {
  284. MT_PHY_TYPE_CCK,
  285. MT_PHY_TYPE_OFDM,
  286. MT_PHY_TYPE_HT,
  287. MT_PHY_TYPE_HT_GF,
  288. MT_PHY_TYPE_VHT,
  289. };
  290. struct mt76_rate_power {
  291. union {
  292. struct {
  293. s8 cck[4];
  294. s8 ofdm[8];
  295. s8 ht[16];
  296. s8 vht[10];
  297. };
  298. s8 all[38];
  299. };
  300. };
  301. struct mt76_rx_status {
  302. struct mt76_wcid *wcid;
  303. unsigned long reorder_time;
  304. u8 iv[6];
  305. u8 aggr:1;
  306. u8 tid;
  307. u16 seqno;
  308. u16 freq;
  309. u32 flag;
  310. u8 enc_flags;
  311. u8 encoding:2, bw:3;
  312. u8 rate_idx;
  313. u8 nss;
  314. u8 band;
  315. u8 signal;
  316. u8 chains;
  317. s8 chain_signal[IEEE80211_MAX_CHAINS];
  318. };
  319. #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
  320. #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
  321. #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
  322. #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
  323. #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
  324. #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
  325. #define mt76_get_field(_dev, _reg, _field) \
  326. FIELD_GET(_field, mt76_rr(dev, _reg))
  327. #define mt76_rmw_field(_dev, _reg, _field, _val) \
  328. mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
  329. #define mt76_hw(dev) (dev)->mt76.hw
  330. bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
  331. int timeout);
  332. #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
  333. bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
  334. int timeout);
  335. #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
  336. void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
  337. static inline u16 mt76_chip(struct mt76_dev *dev)
  338. {
  339. return dev->rev >> 16;
  340. }
  341. static inline u16 mt76_rev(struct mt76_dev *dev)
  342. {
  343. return dev->rev & 0xffff;
  344. }
  345. #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
  346. #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
  347. #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
  348. #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
  349. #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
  350. #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
  351. #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
  352. #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
  353. static inline struct mt76_channel_state *
  354. mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
  355. {
  356. struct mt76_sband *msband;
  357. int idx;
  358. if (c->band == NL80211_BAND_2GHZ)
  359. msband = &dev->sband_2g;
  360. else
  361. msband = &dev->sband_5g;
  362. idx = c - &msband->sband.channels[0];
  363. return &msband->chan[idx];
  364. }
  365. struct mt76_dev *mt76_alloc_device(unsigned int size,
  366. const struct ieee80211_ops *ops);
  367. int mt76_register_device(struct mt76_dev *dev, bool vht,
  368. struct ieee80211_rate *rates, int n_rates);
  369. void mt76_unregister_device(struct mt76_dev *dev);
  370. struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
  371. int mt76_eeprom_init(struct mt76_dev *dev, int len);
  372. void mt76_eeprom_override(struct mt76_dev *dev);
  373. /* increment with wrap-around */
  374. static inline int mt76_incr(int val, int size)
  375. {
  376. return (val + 1) & (size - 1);
  377. }
  378. /* decrement with wrap-around */
  379. static inline int mt76_decr(int val, int size)
  380. {
  381. return (val - 1) & (size - 1);
  382. }
  383. /* Hardware uses mirrored order of queues with Q3
  384. * having the highest priority
  385. */
  386. static inline u8 q2hwq(u8 q)
  387. {
  388. return q ^ 0x3;
  389. }
  390. static inline struct ieee80211_txq *
  391. mtxq_to_txq(struct mt76_txq *mtxq)
  392. {
  393. void *ptr = mtxq;
  394. return container_of(ptr, struct ieee80211_txq, drv_priv);
  395. }
  396. static inline struct ieee80211_sta *
  397. wcid_to_sta(struct mt76_wcid *wcid)
  398. {
  399. void *ptr = wcid;
  400. if (!wcid || !wcid->sta)
  401. return NULL;
  402. return container_of(ptr, struct ieee80211_sta, drv_priv);
  403. }
  404. int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  405. struct sk_buff *skb, struct mt76_wcid *wcid,
  406. struct ieee80211_sta *sta);
  407. void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
  408. void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
  409. struct mt76_wcid *wcid, struct sk_buff *skb);
  410. void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
  411. void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
  412. void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
  413. void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
  414. bool send_bar);
  415. void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
  416. void mt76_txq_schedule_all(struct mt76_dev *dev);
  417. void mt76_release_buffered_frames(struct ieee80211_hw *hw,
  418. struct ieee80211_sta *sta,
  419. u16 tids, int nframes,
  420. enum ieee80211_frame_release_type reason,
  421. bool more_data);
  422. void mt76_set_channel(struct mt76_dev *dev);
  423. int mt76_get_survey(struct ieee80211_hw *hw, int idx,
  424. struct survey_info *survey);
  425. void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
  426. int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
  427. u16 ssn, u16 size);
  428. void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
  429. void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
  430. struct ieee80211_key_conf *key);
  431. /* internal */
  432. void mt76_tx_free(struct mt76_dev *dev);
  433. struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
  434. void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
  435. void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
  436. struct napi_struct *napi);
  437. void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
  438. struct napi_struct *napi);
  439. void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
  440. /* usb */
  441. static inline bool mt76u_urb_error(struct urb *urb)
  442. {
  443. return urb->status &&
  444. urb->status != -ECONNRESET &&
  445. urb->status != -ESHUTDOWN &&
  446. urb->status != -ENOENT;
  447. }
  448. /* Map hardware queues to usb endpoints */
  449. static inline u8 q2ep(u8 qid)
  450. {
  451. /* TODO: take management packets to queue 5 */
  452. return qid + 1;
  453. }
  454. static inline bool mt76u_check_sg(struct mt76_dev *dev)
  455. {
  456. struct usb_interface *intf = to_usb_interface(dev->dev);
  457. struct usb_device *udev = interface_to_usbdev(intf);
  458. return (udev->bus->sg_tablesize > 0 &&
  459. (udev->bus->no_sg_constraint ||
  460. udev->speed == USB_SPEED_WIRELESS));
  461. }
  462. int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  463. u8 req_type, u16 val, u16 offset,
  464. void *buf, size_t len);
  465. void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
  466. const u16 offset, const u32 val);
  467. u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
  468. void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
  469. int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
  470. void mt76u_deinit(struct mt76_dev *dev);
  471. int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
  472. int nsgs, int len, int sglen, gfp_t gfp);
  473. void mt76u_buf_free(struct mt76u_buf *buf);
  474. int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
  475. struct mt76u_buf *buf, gfp_t gfp,
  476. usb_complete_t complete_fn, void *context);
  477. int mt76u_submit_rx_buffers(struct mt76_dev *dev);
  478. int mt76u_alloc_queues(struct mt76_dev *dev);
  479. void mt76u_stop_queues(struct mt76_dev *dev);
  480. void mt76u_stop_stat_wk(struct mt76_dev *dev);
  481. void mt76u_queues_deinit(struct mt76_dev *dev);
  482. int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
  483. int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
  484. int data_len, u32 max_payload, u32 offset);
  485. void mt76u_mcu_complete_urb(struct urb *urb);
  486. struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
  487. int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
  488. int cmd, bool wait_resp);
  489. void mt76u_mcu_fw_reset(struct mt76_dev *dev);
  490. int mt76u_mcu_init_rx(struct mt76_dev *dev);
  491. #endif