mt76x2_common.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. /*
  2. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3. * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "mt76x2.h"
  18. void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
  19. {
  20. struct mt76_txq *mtxq;
  21. if (!txq)
  22. return;
  23. mtxq = (struct mt76_txq *) txq->drv_priv;
  24. if (txq->sta) {
  25. struct mt76x2_sta *sta;
  26. sta = (struct mt76x2_sta *) txq->sta->drv_priv;
  27. mtxq->wcid = &sta->wcid;
  28. } else {
  29. struct mt76x2_vif *mvif;
  30. mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
  31. mtxq->wcid = &mvif->group_wcid;
  32. }
  33. mt76_txq_init(&dev->mt76, txq);
  34. }
  35. EXPORT_SYMBOL_GPL(mt76x2_txq_init);
  36. int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  37. struct ieee80211_ampdu_params *params)
  38. {
  39. enum ieee80211_ampdu_mlme_action action = params->action;
  40. struct ieee80211_sta *sta = params->sta;
  41. struct mt76x2_dev *dev = hw->priv;
  42. struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
  43. struct ieee80211_txq *txq = sta->txq[params->tid];
  44. u16 tid = params->tid;
  45. u16 *ssn = &params->ssn;
  46. struct mt76_txq *mtxq;
  47. if (!txq)
  48. return -EINVAL;
  49. mtxq = (struct mt76_txq *)txq->drv_priv;
  50. switch (action) {
  51. case IEEE80211_AMPDU_RX_START:
  52. mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
  53. mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
  54. break;
  55. case IEEE80211_AMPDU_RX_STOP:
  56. mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
  57. mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
  58. BIT(16 + tid));
  59. break;
  60. case IEEE80211_AMPDU_TX_OPERATIONAL:
  61. mtxq->aggr = true;
  62. mtxq->send_bar = false;
  63. ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
  64. break;
  65. case IEEE80211_AMPDU_TX_STOP_FLUSH:
  66. case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
  67. mtxq->aggr = false;
  68. ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
  69. break;
  70. case IEEE80211_AMPDU_TX_START:
  71. mtxq->agg_ssn = *ssn << 4;
  72. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  73. break;
  74. case IEEE80211_AMPDU_TX_STOP_CONT:
  75. mtxq->aggr = false;
  76. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  77. break;
  78. }
  79. return 0;
  80. }
  81. EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
  82. int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  83. struct ieee80211_sta *sta)
  84. {
  85. struct mt76x2_dev *dev = hw->priv;
  86. struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
  87. struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
  88. int ret = 0;
  89. int idx = 0;
  90. int i;
  91. mutex_lock(&dev->mutex);
  92. idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
  93. if (idx < 0) {
  94. ret = -ENOSPC;
  95. goto out;
  96. }
  97. msta->vif = mvif;
  98. msta->wcid.sta = 1;
  99. msta->wcid.idx = idx;
  100. msta->wcid.hw_key_idx = -1;
  101. mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
  102. mt76x2_mac_wcid_set_drop(dev, idx, false);
  103. for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
  104. mt76x2_txq_init(dev, sta->txq[i]);
  105. if (vif->type == NL80211_IFTYPE_AP)
  106. set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
  107. ewma_signal_init(&msta->rssi);
  108. rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
  109. out:
  110. mutex_unlock(&dev->mutex);
  111. return ret;
  112. }
  113. EXPORT_SYMBOL_GPL(mt76x2_sta_add);
  114. int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  115. struct ieee80211_sta *sta)
  116. {
  117. struct mt76x2_dev *dev = hw->priv;
  118. struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
  119. int idx = msta->wcid.idx;
  120. int i;
  121. mutex_lock(&dev->mutex);
  122. rcu_assign_pointer(dev->wcid[idx], NULL);
  123. for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
  124. mt76_txq_remove(&dev->mt76, sta->txq[i]);
  125. mt76x2_mac_wcid_set_drop(dev, idx, true);
  126. mt76_wcid_free(dev->wcid_mask, idx);
  127. mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
  128. mutex_unlock(&dev->mutex);
  129. return 0;
  130. }
  131. EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
  132. void mt76x2_remove_interface(struct ieee80211_hw *hw,
  133. struct ieee80211_vif *vif)
  134. {
  135. struct mt76x2_dev *dev = hw->priv;
  136. mt76_txq_remove(&dev->mt76, vif->txq);
  137. }
  138. EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
  139. int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
  140. struct ieee80211_vif *vif, struct ieee80211_sta *sta,
  141. struct ieee80211_key_conf *key)
  142. {
  143. struct mt76x2_dev *dev = hw->priv;
  144. struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
  145. struct mt76x2_sta *msta;
  146. struct mt76_wcid *wcid;
  147. int idx = key->keyidx;
  148. int ret;
  149. /* fall back to sw encryption for unsupported ciphers */
  150. switch (key->cipher) {
  151. case WLAN_CIPHER_SUITE_WEP40:
  152. case WLAN_CIPHER_SUITE_WEP104:
  153. case WLAN_CIPHER_SUITE_TKIP:
  154. case WLAN_CIPHER_SUITE_CCMP:
  155. break;
  156. default:
  157. return -EOPNOTSUPP;
  158. }
  159. /*
  160. * The hardware does not support per-STA RX GTK, fall back
  161. * to software mode for these.
  162. */
  163. if ((vif->type == NL80211_IFTYPE_ADHOC ||
  164. vif->type == NL80211_IFTYPE_MESH_POINT) &&
  165. (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
  166. key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
  167. !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
  168. return -EOPNOTSUPP;
  169. msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
  170. wcid = msta ? &msta->wcid : &mvif->group_wcid;
  171. if (cmd == SET_KEY) {
  172. key->hw_key_idx = wcid->idx;
  173. wcid->hw_key_idx = idx;
  174. if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
  175. key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
  176. wcid->sw_iv = true;
  177. }
  178. } else {
  179. if (idx == wcid->hw_key_idx) {
  180. wcid->hw_key_idx = -1;
  181. wcid->sw_iv = true;
  182. }
  183. key = NULL;
  184. }
  185. mt76_wcid_key_setup(&dev->mt76, wcid, key);
  186. if (!msta) {
  187. if (key || wcid->hw_key_idx == idx) {
  188. ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
  189. if (ret)
  190. return ret;
  191. }
  192. return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
  193. }
  194. return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
  195. }
  196. EXPORT_SYMBOL_GPL(mt76x2_set_key);
  197. int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  198. u16 queue, const struct ieee80211_tx_queue_params *params)
  199. {
  200. struct mt76x2_dev *dev = hw->priv;
  201. u8 cw_min = 5, cw_max = 10, qid;
  202. u32 val;
  203. qid = dev->mt76.q_tx[queue].hw_idx;
  204. if (params->cw_min)
  205. cw_min = fls(params->cw_min);
  206. if (params->cw_max)
  207. cw_max = fls(params->cw_max);
  208. val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
  209. FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
  210. FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
  211. FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
  212. mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
  213. val = mt76_rr(dev, MT_WMM_TXOP(qid));
  214. val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
  215. val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
  216. mt76_wr(dev, MT_WMM_TXOP(qid), val);
  217. val = mt76_rr(dev, MT_WMM_AIFSN);
  218. val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
  219. val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
  220. mt76_wr(dev, MT_WMM_AIFSN, val);
  221. val = mt76_rr(dev, MT_WMM_CWMIN);
  222. val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
  223. val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
  224. mt76_wr(dev, MT_WMM_CWMIN, val);
  225. val = mt76_rr(dev, MT_WMM_CWMAX);
  226. val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
  227. val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
  228. mt76_wr(dev, MT_WMM_CWMAX, val);
  229. return 0;
  230. }
  231. EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
  232. void mt76x2_configure_filter(struct ieee80211_hw *hw,
  233. unsigned int changed_flags,
  234. unsigned int *total_flags, u64 multicast)
  235. {
  236. struct mt76x2_dev *dev = hw->priv;
  237. u32 flags = 0;
  238. #define MT76_FILTER(_flag, _hw) do { \
  239. flags |= *total_flags & FIF_##_flag; \
  240. dev->rxfilter &= ~(_hw); \
  241. dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
  242. } while (0)
  243. mutex_lock(&dev->mutex);
  244. dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
  245. MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
  246. MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
  247. MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
  248. MT_RX_FILTR_CFG_CTS |
  249. MT_RX_FILTR_CFG_CFEND |
  250. MT_RX_FILTR_CFG_CFACK |
  251. MT_RX_FILTR_CFG_BA |
  252. MT_RX_FILTR_CFG_CTRL_RSV);
  253. MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
  254. *total_flags = flags;
  255. mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
  256. mutex_unlock(&dev->mutex);
  257. }
  258. EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
  259. void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
  260. struct ieee80211_vif *vif,
  261. struct ieee80211_sta *sta)
  262. {
  263. struct mt76x2_dev *dev = hw->priv;
  264. struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
  265. struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
  266. struct ieee80211_tx_rate rate = {};
  267. if (!rates)
  268. return;
  269. rate.idx = rates->rate[0].idx;
  270. rate.flags = rates->rate[0].flags;
  271. mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
  272. msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
  273. }
  274. EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
  275. void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
  276. struct sk_buff *skb)
  277. {
  278. struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
  279. void *rxwi = skb->data;
  280. if (q == MT_RXQ_MCU) {
  281. skb_queue_tail(&dev->mcu.res_q, skb);
  282. wake_up(&dev->mcu.wait);
  283. return;
  284. }
  285. skb_pull(skb, sizeof(struct mt76x2_rxwi));
  286. if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
  287. dev_kfree_skb(skb);
  288. return;
  289. }
  290. mt76_rx(&dev->mt76, q, skb);
  291. }
  292. EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);