mt76x2_mac.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/delay.h>
  17. #include "mt76x2.h"
  18. #include "mt76x2_mcu.h"
  19. #include "mt76x2_eeprom.h"
  20. #include "mt76x2_trace.h"
  21. void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
  22. {
  23. idx &= 7;
  24. mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
  25. mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
  26. get_unaligned_le16(addr + 4));
  27. }
  28. void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
  29. {
  30. struct mt76x2_tx_status stat = {};
  31. unsigned long flags;
  32. u8 update = 1;
  33. bool ret;
  34. if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
  35. return;
  36. trace_mac_txstat_poll(dev);
  37. while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
  38. spin_lock_irqsave(&dev->irq_lock, flags);
  39. ret = mt76x2_mac_load_tx_status(dev, &stat);
  40. spin_unlock_irqrestore(&dev->irq_lock, flags);
  41. if (!ret)
  42. break;
  43. trace_mac_txstat_fetch(dev, &stat);
  44. if (!irq) {
  45. mt76x2_send_tx_status(dev, &stat, &update);
  46. continue;
  47. }
  48. kfifo_put(&dev->txstatus_fifo, stat);
  49. }
  50. }
  51. static void
  52. mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
  53. void *txwi_ptr)
  54. {
  55. struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
  56. struct mt76x2_txwi *txwi = txwi_ptr;
  57. mt76x2_mac_poll_tx_status(dev, false);
  58. txi->tries = 0;
  59. txi->jiffies = jiffies;
  60. txi->wcid = txwi->wcid;
  61. txi->pktid = txwi->pktid;
  62. trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
  63. mt76x2_tx_complete(dev, skb);
  64. }
  65. void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
  66. {
  67. struct mt76x2_tx_status stat;
  68. u8 update = 1;
  69. while (kfifo_get(&dev->txstatus_fifo, &stat))
  70. mt76x2_send_tx_status(dev, &stat, &update);
  71. }
  72. void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
  73. struct mt76_queue_entry *e, bool flush)
  74. {
  75. struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
  76. if (e->txwi)
  77. mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
  78. else
  79. dev_kfree_skb_any(e->skb);
  80. }
  81. static int
  82. mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
  83. {
  84. int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
  85. struct mt76x2_txwi txwi;
  86. if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
  87. return -ENOSPC;
  88. mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
  89. mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
  90. offset += sizeof(txwi);
  91. mt76_wr_copy(dev, offset, skb->data, skb->len);
  92. return 0;
  93. }
  94. static int
  95. __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
  96. {
  97. int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
  98. int beacon_addr = dev->beacon_offsets[bcn_idx];
  99. int ret = 0;
  100. int i;
  101. /* Prevent corrupt transmissions during update */
  102. mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
  103. if (skb) {
  104. ret = mt76_write_beacon(dev, beacon_addr, skb);
  105. if (!ret)
  106. dev->beacon_data_mask |= BIT(bcn_idx);
  107. } else {
  108. dev->beacon_data_mask &= ~BIT(bcn_idx);
  109. for (i = 0; i < beacon_len; i += 4)
  110. mt76_wr(dev, beacon_addr + i, 0);
  111. }
  112. mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
  113. return ret;
  114. }
  115. int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
  116. struct sk_buff *skb)
  117. {
  118. bool force_update = false;
  119. int bcn_idx = 0;
  120. int i;
  121. for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
  122. if (vif_idx == i) {
  123. force_update = !!dev->beacons[i] ^ !!skb;
  124. if (dev->beacons[i])
  125. dev_kfree_skb(dev->beacons[i]);
  126. dev->beacons[i] = skb;
  127. __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
  128. } else if (force_update && dev->beacons[i]) {
  129. __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
  130. }
  131. bcn_idx += !!dev->beacons[i];
  132. }
  133. for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
  134. if (!(dev->beacon_data_mask & BIT(i)))
  135. break;
  136. __mt76x2_mac_set_beacon(dev, i, NULL);
  137. }
  138. mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
  139. bcn_idx - 1);
  140. return 0;
  141. }
  142. void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
  143. {
  144. u8 old_mask = dev->beacon_mask;
  145. bool en;
  146. u32 reg;
  147. if (val) {
  148. dev->beacon_mask |= BIT(vif_idx);
  149. } else {
  150. dev->beacon_mask &= ~BIT(vif_idx);
  151. mt76x2_mac_set_beacon(dev, vif_idx, NULL);
  152. }
  153. if (!!old_mask == !!dev->beacon_mask)
  154. return;
  155. en = dev->beacon_mask;
  156. mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
  157. reg = MT_BEACON_TIME_CFG_BEACON_TX |
  158. MT_BEACON_TIME_CFG_TBTT_EN |
  159. MT_BEACON_TIME_CFG_TIMER_EN;
  160. mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
  161. if (en)
  162. mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
  163. else
  164. mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
  165. }
  166. void mt76x2_update_channel(struct mt76_dev *mdev)
  167. {
  168. struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
  169. struct mt76_channel_state *state;
  170. u32 active, busy;
  171. state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
  172. busy = mt76_rr(dev, MT_CH_BUSY);
  173. active = busy + mt76_rr(dev, MT_CH_IDLE);
  174. spin_lock_bh(&dev->mt76.cc_lock);
  175. state->cc_busy += busy;
  176. state->cc_active += active;
  177. spin_unlock_bh(&dev->mt76.cc_lock);
  178. }
  179. void mt76x2_mac_work(struct work_struct *work)
  180. {
  181. struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
  182. mac_work.work);
  183. int i, idx;
  184. mt76x2_update_channel(&dev->mt76);
  185. for (i = 0, idx = 0; i < 16; i++) {
  186. u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
  187. dev->aggr_stats[idx++] += val & 0xffff;
  188. dev->aggr_stats[idx++] += val >> 16;
  189. }
  190. ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
  191. MT_CALIBRATE_INTERVAL);
  192. }
  193. void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
  194. {
  195. u32 data = 0;
  196. if (val != ~0)
  197. data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
  198. MT_PROT_CFG_RTS_THRESH;
  199. mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
  200. mt76_rmw(dev, MT_CCK_PROT_CFG,
  201. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  202. mt76_rmw(dev, MT_OFDM_PROT_CFG,
  203. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  204. mt76_rmw(dev, MT_MM20_PROT_CFG,
  205. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  206. mt76_rmw(dev, MT_MM40_PROT_CFG,
  207. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  208. mt76_rmw(dev, MT_GF20_PROT_CFG,
  209. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  210. mt76_rmw(dev, MT_GF40_PROT_CFG,
  211. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  212. mt76_rmw(dev, MT_TX_PROT_CFG6,
  213. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  214. mt76_rmw(dev, MT_TX_PROT_CFG7,
  215. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  216. mt76_rmw(dev, MT_TX_PROT_CFG8,
  217. MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
  218. }