mt76x2_phy_common.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /*
  2. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3. * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "mt76x2.h"
  18. #include "mt76x2_eeprom.h"
  19. static void
  20. mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
  21. {
  22. s8 gain;
  23. gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
  24. gain -= offset / 2;
  25. mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
  26. }
  27. static void
  28. mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
  29. {
  30. s8 gain;
  31. gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
  32. gain += offset;
  33. mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
  34. }
  35. void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
  36. {
  37. s8 *gain_adj = dev->cal.rx.high_gain;
  38. mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
  39. mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
  40. mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
  41. mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
  42. }
  43. EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
  44. void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
  45. enum nl80211_band band)
  46. {
  47. u32 pa_mode[2];
  48. u32 pa_mode_adj;
  49. if (band == NL80211_BAND_2GHZ) {
  50. pa_mode[0] = 0x010055ff;
  51. pa_mode[1] = 0x00550055;
  52. mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
  53. mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
  54. if (mt76x2_ext_pa_enabled(dev, band)) {
  55. mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
  56. mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
  57. } else {
  58. mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
  59. mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
  60. }
  61. } else {
  62. pa_mode[0] = 0x0000ffff;
  63. pa_mode[1] = 0x00ff00ff;
  64. if (mt76x2_ext_pa_enabled(dev, band)) {
  65. mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
  66. mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
  67. } else {
  68. mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
  69. mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
  70. }
  71. if (mt76x2_ext_pa_enabled(dev, band))
  72. pa_mode_adj = 0x04000000;
  73. else
  74. pa_mode_adj = 0;
  75. mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
  76. mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
  77. }
  78. mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
  79. mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
  80. mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
  81. mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
  82. if (mt76x2_ext_pa_enabled(dev, band)) {
  83. u32 val;
  84. if (band == NL80211_BAND_2GHZ)
  85. val = 0x3c3c023c;
  86. else
  87. val = 0x363c023c;
  88. mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
  89. mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
  90. mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
  91. } else {
  92. if (band == NL80211_BAND_2GHZ) {
  93. u32 val = 0x0f3c3c3c;
  94. mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
  95. mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
  96. mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
  97. } else {
  98. mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
  99. mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
  100. mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
  101. }
  102. }
  103. }
  104. EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
  105. static void
  106. mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
  107. {
  108. int i;
  109. for (i = 0; i < sizeof(r->all); i++)
  110. if (r->all[i] > limit)
  111. r->all[i] = limit;
  112. }
  113. static u32
  114. mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
  115. {
  116. u32 val = 0;
  117. val |= (v1 & (BIT(6) - 1)) << 0;
  118. val |= (v2 & (BIT(6) - 1)) << 8;
  119. val |= (v3 & (BIT(6) - 1)) << 16;
  120. val |= (v4 & (BIT(6) - 1)) << 24;
  121. return val;
  122. }
  123. static void
  124. mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
  125. {
  126. int i;
  127. for (i = 0; i < sizeof(r->all); i++)
  128. r->all[i] += offset;
  129. }
  130. static int
  131. mt76x2_get_min_rate_power(struct mt76_rate_power *r)
  132. {
  133. int i;
  134. s8 ret = 0;
  135. for (i = 0; i < sizeof(r->all); i++) {
  136. if (!r->all[i])
  137. continue;
  138. if (ret)
  139. ret = min(ret, r->all[i]);
  140. else
  141. ret = r->all[i];
  142. }
  143. return ret;
  144. }
  145. void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
  146. {
  147. enum nl80211_chan_width width = dev->mt76.chandef.width;
  148. struct ieee80211_channel *chan = dev->mt76.chandef.chan;
  149. struct mt76x2_tx_power_info txp;
  150. int txp_0, txp_1, delta = 0;
  151. struct mt76_rate_power t = {};
  152. int base_power, gain;
  153. mt76x2_get_power_info(dev, &txp, chan);
  154. if (width == NL80211_CHAN_WIDTH_40)
  155. delta = txp.delta_bw40;
  156. else if (width == NL80211_CHAN_WIDTH_80)
  157. delta = txp.delta_bw80;
  158. mt76x2_get_rate_power(dev, &t, chan);
  159. mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
  160. mt76x2_limit_rate_power(&t, dev->txpower_conf);
  161. dev->txpower_cur = mt76x2_get_max_rate_power(&t);
  162. base_power = mt76x2_get_min_rate_power(&t);
  163. delta += base_power - txp.chain[0].target_power;
  164. txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
  165. txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
  166. gain = min(txp_0, txp_1);
  167. if (gain < 0) {
  168. base_power -= gain;
  169. txp_0 -= gain;
  170. txp_1 -= gain;
  171. } else if (gain > 0x2f) {
  172. base_power -= gain - 0x2f;
  173. txp_0 = 0x2f;
  174. txp_1 = 0x2f;
  175. }
  176. mt76x2_add_rate_power_offset(&t, -base_power);
  177. dev->target_power = txp.chain[0].target_power;
  178. dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
  179. dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
  180. dev->rate_power = t;
  181. mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
  182. mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
  183. mt76_wr(dev, MT_TX_PWR_CFG_0,
  184. mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
  185. mt76_wr(dev, MT_TX_PWR_CFG_1,
  186. mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
  187. mt76_wr(dev, MT_TX_PWR_CFG_2,
  188. mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
  189. mt76_wr(dev, MT_TX_PWR_CFG_3,
  190. mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
  191. mt76_wr(dev, MT_TX_PWR_CFG_4,
  192. mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
  193. mt76_wr(dev, MT_TX_PWR_CFG_7,
  194. mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
  195. mt76_wr(dev, MT_TX_PWR_CFG_8,
  196. mt76x2_tx_power_mask(t.ht[14], 0, t.vht[8], t.vht[8]));
  197. mt76_wr(dev, MT_TX_PWR_CFG_9,
  198. mt76x2_tx_power_mask(t.ht[6], 0, t.vht[8], t.vht[8]));
  199. }
  200. EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
  201. void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
  202. enum nl80211_band band, u8 bw)
  203. {
  204. u32 cfg0, cfg1;
  205. if (mt76x2_ext_pa_enabled(dev, band)) {
  206. cfg0 = bw ? 0x000b0c01 : 0x00101101;
  207. cfg1 = 0x00011414;
  208. } else {
  209. cfg0 = bw ? 0x000b0b01 : 0x00101001;
  210. cfg1 = 0x00021414;
  211. }
  212. mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
  213. mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
  214. mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
  215. }
  216. EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
  217. void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
  218. {
  219. int core_val, agc_val;
  220. switch (width) {
  221. case NL80211_CHAN_WIDTH_80:
  222. core_val = 3;
  223. agc_val = 7;
  224. break;
  225. case NL80211_CHAN_WIDTH_40:
  226. core_val = 2;
  227. agc_val = 3;
  228. break;
  229. default:
  230. core_val = 0;
  231. agc_val = 1;
  232. break;
  233. }
  234. mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
  235. mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
  236. mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
  237. mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
  238. }
  239. EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
  240. void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
  241. {
  242. switch (band) {
  243. case NL80211_BAND_2GHZ:
  244. mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
  245. mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
  246. break;
  247. case NL80211_BAND_5GHZ:
  248. mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
  249. mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
  250. break;
  251. }
  252. mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
  253. primary_upper);
  254. }
  255. EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
  256. int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
  257. {
  258. struct mt76x2_sta *sta;
  259. struct mt76_wcid *wcid;
  260. int i, j, min_rssi = 0;
  261. s8 cur_rssi;
  262. local_bh_disable();
  263. rcu_read_lock();
  264. for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
  265. unsigned long mask = dev->wcid_mask[i];
  266. if (!mask)
  267. continue;
  268. for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
  269. if (!(mask & 1))
  270. continue;
  271. wcid = rcu_dereference(dev->wcid[j]);
  272. if (!wcid)
  273. continue;
  274. sta = container_of(wcid, struct mt76x2_sta, wcid);
  275. spin_lock(&dev->mt76.rx_lock);
  276. if (sta->inactive_count++ < 5)
  277. cur_rssi = ewma_signal_read(&sta->rssi);
  278. else
  279. cur_rssi = 0;
  280. spin_unlock(&dev->mt76.rx_lock);
  281. if (cur_rssi < min_rssi)
  282. min_rssi = cur_rssi;
  283. }
  284. }
  285. rcu_read_unlock();
  286. local_bh_enable();
  287. if (!min_rssi)
  288. return -75;
  289. return min_rssi;
  290. }
  291. EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);