htt_rx.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "core.h"
  19. #include "htc.h"
  20. #include "htt.h"
  21. #include "txrx.h"
  22. #include "debug.h"
  23. #include "trace.h"
  24. #include "mac.h"
  25. #include <linux/log2.h>
  26. #include <linux/bitfield.h>
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. #define HTT_RX_RING_REFILL_RESCHED_MS 5
  30. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  31. static struct sk_buff *
  32. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
  33. {
  34. struct ath10k_skb_rxcb *rxcb;
  35. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  36. if (rxcb->paddr == paddr)
  37. return ATH10K_RXCB_SKB(rxcb);
  38. WARN_ON_ONCE(1);
  39. return NULL;
  40. }
  41. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  42. {
  43. struct sk_buff *skb;
  44. struct ath10k_skb_rxcb *rxcb;
  45. struct hlist_node *n;
  46. int i;
  47. if (htt->rx_ring.in_ord_rx) {
  48. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  49. skb = ATH10K_RXCB_SKB(rxcb);
  50. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  51. skb->len + skb_tailroom(skb),
  52. DMA_FROM_DEVICE);
  53. hash_del(&rxcb->hlist);
  54. dev_kfree_skb_any(skb);
  55. }
  56. } else {
  57. for (i = 0; i < htt->rx_ring.size; i++) {
  58. skb = htt->rx_ring.netbufs_ring[i];
  59. if (!skb)
  60. continue;
  61. rxcb = ATH10K_SKB_RXCB(skb);
  62. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63. skb->len + skb_tailroom(skb),
  64. DMA_FROM_DEVICE);
  65. dev_kfree_skb_any(skb);
  66. }
  67. }
  68. htt->rx_ring.fill_cnt = 0;
  69. hash_init(htt->rx_ring.skb_table);
  70. memset(htt->rx_ring.netbufs_ring, 0,
  71. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  72. }
  73. static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
  74. {
  75. return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
  76. }
  77. static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
  78. {
  79. return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
  80. }
  81. static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
  82. void *vaddr)
  83. {
  84. htt->rx_ring.paddrs_ring_32 = vaddr;
  85. }
  86. static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
  87. void *vaddr)
  88. {
  89. htt->rx_ring.paddrs_ring_64 = vaddr;
  90. }
  91. static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
  92. dma_addr_t paddr, int idx)
  93. {
  94. htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
  95. }
  96. static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
  97. dma_addr_t paddr, int idx)
  98. {
  99. htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
  100. }
  101. static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
  102. {
  103. htt->rx_ring.paddrs_ring_32[idx] = 0;
  104. }
  105. static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
  106. {
  107. htt->rx_ring.paddrs_ring_64[idx] = 0;
  108. }
  109. static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
  110. {
  111. return (void *)htt->rx_ring.paddrs_ring_32;
  112. }
  113. static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
  114. {
  115. return (void *)htt->rx_ring.paddrs_ring_64;
  116. }
  117. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  118. {
  119. struct htt_rx_desc *rx_desc;
  120. struct ath10k_skb_rxcb *rxcb;
  121. struct sk_buff *skb;
  122. dma_addr_t paddr;
  123. int ret = 0, idx;
  124. /* The Full Rx Reorder firmware has no way of telling the host
  125. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  126. * To keep things simple make sure ring is always half empty. This
  127. * guarantees there'll be no replenishment overruns possible.
  128. */
  129. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  130. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  131. if (idx < 0 || idx >= htt->rx_ring.size) {
  132. ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
  133. idx &= htt->rx_ring.size_mask;
  134. ret = -ENOMEM;
  135. goto fail;
  136. }
  137. while (num > 0) {
  138. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  139. if (!skb) {
  140. ret = -ENOMEM;
  141. goto fail;
  142. }
  143. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  144. skb_pull(skb,
  145. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  146. skb->data);
  147. /* Clear rx_desc attention word before posting to Rx ring */
  148. rx_desc = (struct htt_rx_desc *)skb->data;
  149. rx_desc->attention.flags = __cpu_to_le32(0);
  150. paddr = dma_map_single(htt->ar->dev, skb->data,
  151. skb->len + skb_tailroom(skb),
  152. DMA_FROM_DEVICE);
  153. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  154. dev_kfree_skb_any(skb);
  155. ret = -ENOMEM;
  156. goto fail;
  157. }
  158. rxcb = ATH10K_SKB_RXCB(skb);
  159. rxcb->paddr = paddr;
  160. htt->rx_ring.netbufs_ring[idx] = skb;
  161. ath10k_htt_set_paddrs_ring(htt, paddr, idx);
  162. htt->rx_ring.fill_cnt++;
  163. if (htt->rx_ring.in_ord_rx) {
  164. hash_add(htt->rx_ring.skb_table,
  165. &ATH10K_SKB_RXCB(skb)->hlist,
  166. paddr);
  167. }
  168. num--;
  169. idx++;
  170. idx &= htt->rx_ring.size_mask;
  171. }
  172. fail:
  173. /*
  174. * Make sure the rx buffer is updated before available buffer
  175. * index to avoid any potential rx ring corruption.
  176. */
  177. mb();
  178. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  179. return ret;
  180. }
  181. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  182. {
  183. lockdep_assert_held(&htt->rx_ring.lock);
  184. return __ath10k_htt_rx_ring_fill_n(htt, num);
  185. }
  186. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  187. {
  188. int ret, num_deficit, num_to_fill;
  189. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  190. * reason is RX may take up significant amount of CPU cycles and starve
  191. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  192. * with ath10k wlan interface. This ended up with very poor performance
  193. * once CPU the host system was overwhelmed with RX on ath10k.
  194. *
  195. * By limiting the number of refills the replenishing occurs
  196. * progressively. This in turns makes use of the fact tasklets are
  197. * processed in FIFO order. This means actual RX processing can starve
  198. * out refilling. If there's not enough buffers on RX ring FW will not
  199. * report RX until it is refilled with enough buffers. This
  200. * automatically balances load wrt to CPU power.
  201. *
  202. * This probably comes at a cost of lower maximum throughput but
  203. * improves the average and stability.
  204. */
  205. spin_lock_bh(&htt->rx_ring.lock);
  206. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  207. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  208. num_deficit -= num_to_fill;
  209. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  210. if (ret == -ENOMEM) {
  211. /*
  212. * Failed to fill it to the desired level -
  213. * we'll start a timer and try again next time.
  214. * As long as enough buffers are left in the ring for
  215. * another A-MPDU rx, no special recovery is needed.
  216. */
  217. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  218. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  219. } else if (num_deficit > 0) {
  220. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  221. msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
  222. }
  223. spin_unlock_bh(&htt->rx_ring.lock);
  224. }
  225. static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
  226. {
  227. struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
  228. ath10k_htt_rx_msdu_buff_replenish(htt);
  229. }
  230. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  231. {
  232. struct ath10k_htt *htt = &ar->htt;
  233. int ret;
  234. spin_lock_bh(&htt->rx_ring.lock);
  235. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  236. htt->rx_ring.fill_cnt));
  237. if (ret)
  238. ath10k_htt_rx_ring_free(htt);
  239. spin_unlock_bh(&htt->rx_ring.lock);
  240. return ret;
  241. }
  242. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  243. {
  244. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  245. skb_queue_purge(&htt->rx_msdus_q);
  246. skb_queue_purge(&htt->rx_in_ord_compl_q);
  247. skb_queue_purge(&htt->tx_fetch_ind_q);
  248. spin_lock_bh(&htt->rx_ring.lock);
  249. ath10k_htt_rx_ring_free(htt);
  250. spin_unlock_bh(&htt->rx_ring.lock);
  251. dma_free_coherent(htt->ar->dev,
  252. ath10k_htt_get_rx_ring_size(htt),
  253. ath10k_htt_get_vaddr_ring(htt),
  254. htt->rx_ring.base_paddr);
  255. dma_free_coherent(htt->ar->dev,
  256. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  257. htt->rx_ring.alloc_idx.vaddr,
  258. htt->rx_ring.alloc_idx.paddr);
  259. kfree(htt->rx_ring.netbufs_ring);
  260. }
  261. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  262. {
  263. struct ath10k *ar = htt->ar;
  264. int idx;
  265. struct sk_buff *msdu;
  266. lockdep_assert_held(&htt->rx_ring.lock);
  267. if (htt->rx_ring.fill_cnt == 0) {
  268. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  269. return NULL;
  270. }
  271. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  272. msdu = htt->rx_ring.netbufs_ring[idx];
  273. htt->rx_ring.netbufs_ring[idx] = NULL;
  274. ath10k_htt_reset_paddrs_ring(htt, idx);
  275. idx++;
  276. idx &= htt->rx_ring.size_mask;
  277. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  278. htt->rx_ring.fill_cnt--;
  279. dma_unmap_single(htt->ar->dev,
  280. ATH10K_SKB_RXCB(msdu)->paddr,
  281. msdu->len + skb_tailroom(msdu),
  282. DMA_FROM_DEVICE);
  283. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  284. msdu->data, msdu->len + skb_tailroom(msdu));
  285. return msdu;
  286. }
  287. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  288. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  289. struct sk_buff_head *amsdu)
  290. {
  291. struct ath10k *ar = htt->ar;
  292. int msdu_len, msdu_chaining = 0;
  293. struct sk_buff *msdu;
  294. struct htt_rx_desc *rx_desc;
  295. lockdep_assert_held(&htt->rx_ring.lock);
  296. for (;;) {
  297. int last_msdu, msdu_len_invalid, msdu_chained;
  298. msdu = ath10k_htt_rx_netbuf_pop(htt);
  299. if (!msdu) {
  300. __skb_queue_purge(amsdu);
  301. return -ENOENT;
  302. }
  303. __skb_queue_tail(amsdu, msdu);
  304. rx_desc = (struct htt_rx_desc *)msdu->data;
  305. /* FIXME: we must report msdu payload since this is what caller
  306. * expects now
  307. */
  308. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  309. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  310. /*
  311. * Sanity check - confirm the HW is finished filling in the
  312. * rx data.
  313. * If the HW and SW are working correctly, then it's guaranteed
  314. * that the HW's MAC DMA is done before this point in the SW.
  315. * To prevent the case that we handle a stale Rx descriptor,
  316. * just assert for now until we have a way to recover.
  317. */
  318. if (!(__le32_to_cpu(rx_desc->attention.flags)
  319. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  320. __skb_queue_purge(amsdu);
  321. return -EIO;
  322. }
  323. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  324. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  325. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  326. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  327. RX_MSDU_START_INFO0_MSDU_LENGTH);
  328. msdu_chained = rx_desc->frag_info.ring2_more_count;
  329. if (msdu_len_invalid)
  330. msdu_len = 0;
  331. skb_trim(msdu, 0);
  332. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  333. msdu_len -= msdu->len;
  334. /* Note: Chained buffers do not contain rx descriptor */
  335. while (msdu_chained--) {
  336. msdu = ath10k_htt_rx_netbuf_pop(htt);
  337. if (!msdu) {
  338. __skb_queue_purge(amsdu);
  339. return -ENOENT;
  340. }
  341. __skb_queue_tail(amsdu, msdu);
  342. skb_trim(msdu, 0);
  343. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  344. msdu_len -= msdu->len;
  345. msdu_chaining = 1;
  346. }
  347. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  348. RX_MSDU_END_INFO0_LAST_MSDU;
  349. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  350. sizeof(*rx_desc) - sizeof(u32));
  351. if (last_msdu)
  352. break;
  353. }
  354. if (skb_queue_empty(amsdu))
  355. msdu_chaining = -1;
  356. /*
  357. * Don't refill the ring yet.
  358. *
  359. * First, the elements popped here are still in use - it is not
  360. * safe to overwrite them until the matching call to
  361. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  362. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  363. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  364. * (something like 3 buffers). Consequently, we'll rely on the txrx
  365. * SW to tell us when it is done pulling all the PPDU's rx buffers
  366. * out of the rx ring, and then refill it just once.
  367. */
  368. return msdu_chaining;
  369. }
  370. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  371. u64 paddr)
  372. {
  373. struct ath10k *ar = htt->ar;
  374. struct ath10k_skb_rxcb *rxcb;
  375. struct sk_buff *msdu;
  376. lockdep_assert_held(&htt->rx_ring.lock);
  377. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  378. if (!msdu)
  379. return NULL;
  380. rxcb = ATH10K_SKB_RXCB(msdu);
  381. hash_del(&rxcb->hlist);
  382. htt->rx_ring.fill_cnt--;
  383. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  384. msdu->len + skb_tailroom(msdu),
  385. DMA_FROM_DEVICE);
  386. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  387. msdu->data, msdu->len + skb_tailroom(msdu));
  388. return msdu;
  389. }
  390. static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
  391. struct htt_rx_in_ord_ind *ev,
  392. struct sk_buff_head *list)
  393. {
  394. struct ath10k *ar = htt->ar;
  395. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
  396. struct htt_rx_desc *rxd;
  397. struct sk_buff *msdu;
  398. int msdu_count;
  399. bool is_offload;
  400. u32 paddr;
  401. lockdep_assert_held(&htt->rx_ring.lock);
  402. msdu_count = __le16_to_cpu(ev->msdu_count);
  403. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  404. while (msdu_count--) {
  405. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  406. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  407. if (!msdu) {
  408. __skb_queue_purge(list);
  409. return -ENOENT;
  410. }
  411. __skb_queue_tail(list, msdu);
  412. if (!is_offload) {
  413. rxd = (void *)msdu->data;
  414. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  415. skb_put(msdu, sizeof(*rxd));
  416. skb_pull(msdu, sizeof(*rxd));
  417. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  418. if (!(__le32_to_cpu(rxd->attention.flags) &
  419. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  420. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  421. return -EIO;
  422. }
  423. }
  424. msdu_desc++;
  425. }
  426. return 0;
  427. }
  428. static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
  429. struct htt_rx_in_ord_ind *ev,
  430. struct sk_buff_head *list)
  431. {
  432. struct ath10k *ar = htt->ar;
  433. struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
  434. struct htt_rx_desc *rxd;
  435. struct sk_buff *msdu;
  436. int msdu_count;
  437. bool is_offload;
  438. u64 paddr;
  439. lockdep_assert_held(&htt->rx_ring.lock);
  440. msdu_count = __le16_to_cpu(ev->msdu_count);
  441. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  442. while (msdu_count--) {
  443. paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
  444. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  445. if (!msdu) {
  446. __skb_queue_purge(list);
  447. return -ENOENT;
  448. }
  449. __skb_queue_tail(list, msdu);
  450. if (!is_offload) {
  451. rxd = (void *)msdu->data;
  452. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  453. skb_put(msdu, sizeof(*rxd));
  454. skb_pull(msdu, sizeof(*rxd));
  455. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  456. if (!(__le32_to_cpu(rxd->attention.flags) &
  457. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  458. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  459. return -EIO;
  460. }
  461. }
  462. msdu_desc++;
  463. }
  464. return 0;
  465. }
  466. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  467. {
  468. struct ath10k *ar = htt->ar;
  469. dma_addr_t paddr;
  470. void *vaddr, *vaddr_ring;
  471. size_t size;
  472. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  473. htt->rx_confused = false;
  474. /* XXX: The fill level could be changed during runtime in response to
  475. * the host processing latency. Is this really worth it?
  476. */
  477. htt->rx_ring.size = HTT_RX_RING_SIZE;
  478. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  479. htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
  480. if (!is_power_of_2(htt->rx_ring.size)) {
  481. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  482. return -EINVAL;
  483. }
  484. htt->rx_ring.netbufs_ring =
  485. kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
  486. GFP_KERNEL);
  487. if (!htt->rx_ring.netbufs_ring)
  488. goto err_netbuf;
  489. size = ath10k_htt_get_rx_ring_size(htt);
  490. vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
  491. if (!vaddr_ring)
  492. goto err_dma_ring;
  493. ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
  494. htt->rx_ring.base_paddr = paddr;
  495. vaddr = dma_alloc_coherent(htt->ar->dev,
  496. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  497. &paddr, GFP_KERNEL);
  498. if (!vaddr)
  499. goto err_dma_idx;
  500. htt->rx_ring.alloc_idx.vaddr = vaddr;
  501. htt->rx_ring.alloc_idx.paddr = paddr;
  502. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  503. *htt->rx_ring.alloc_idx.vaddr = 0;
  504. /* Initialize the Rx refill retry timer */
  505. timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
  506. spin_lock_init(&htt->rx_ring.lock);
  507. htt->rx_ring.fill_cnt = 0;
  508. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  509. hash_init(htt->rx_ring.skb_table);
  510. skb_queue_head_init(&htt->rx_msdus_q);
  511. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  512. skb_queue_head_init(&htt->tx_fetch_ind_q);
  513. atomic_set(&htt->num_mpdus_ready, 0);
  514. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  515. htt->rx_ring.size, htt->rx_ring.fill_level);
  516. return 0;
  517. err_dma_idx:
  518. dma_free_coherent(htt->ar->dev,
  519. ath10k_htt_get_rx_ring_size(htt),
  520. vaddr_ring,
  521. htt->rx_ring.base_paddr);
  522. err_dma_ring:
  523. kfree(htt->rx_ring.netbufs_ring);
  524. err_netbuf:
  525. return -ENOMEM;
  526. }
  527. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  528. enum htt_rx_mpdu_encrypt_type type)
  529. {
  530. switch (type) {
  531. case HTT_RX_MPDU_ENCRYPT_NONE:
  532. return 0;
  533. case HTT_RX_MPDU_ENCRYPT_WEP40:
  534. case HTT_RX_MPDU_ENCRYPT_WEP104:
  535. return IEEE80211_WEP_IV_LEN;
  536. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  537. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  538. return IEEE80211_TKIP_IV_LEN;
  539. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  540. return IEEE80211_CCMP_HDR_LEN;
  541. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  542. return IEEE80211_CCMP_256_HDR_LEN;
  543. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  544. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  545. return IEEE80211_GCMP_HDR_LEN;
  546. case HTT_RX_MPDU_ENCRYPT_WEP128:
  547. case HTT_RX_MPDU_ENCRYPT_WAPI:
  548. break;
  549. }
  550. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  551. return 0;
  552. }
  553. #define MICHAEL_MIC_LEN 8
  554. static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
  555. enum htt_rx_mpdu_encrypt_type type)
  556. {
  557. switch (type) {
  558. case HTT_RX_MPDU_ENCRYPT_NONE:
  559. case HTT_RX_MPDU_ENCRYPT_WEP40:
  560. case HTT_RX_MPDU_ENCRYPT_WEP104:
  561. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  562. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  563. return 0;
  564. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  565. return IEEE80211_CCMP_MIC_LEN;
  566. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  567. return IEEE80211_CCMP_256_MIC_LEN;
  568. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  569. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  570. return IEEE80211_GCMP_MIC_LEN;
  571. case HTT_RX_MPDU_ENCRYPT_WEP128:
  572. case HTT_RX_MPDU_ENCRYPT_WAPI:
  573. break;
  574. }
  575. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  576. return 0;
  577. }
  578. static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
  579. enum htt_rx_mpdu_encrypt_type type)
  580. {
  581. switch (type) {
  582. case HTT_RX_MPDU_ENCRYPT_NONE:
  583. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  584. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  585. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  586. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  587. return 0;
  588. case HTT_RX_MPDU_ENCRYPT_WEP40:
  589. case HTT_RX_MPDU_ENCRYPT_WEP104:
  590. return IEEE80211_WEP_ICV_LEN;
  591. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  592. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  593. return IEEE80211_TKIP_ICV_LEN;
  594. case HTT_RX_MPDU_ENCRYPT_WEP128:
  595. case HTT_RX_MPDU_ENCRYPT_WAPI:
  596. break;
  597. }
  598. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  599. return 0;
  600. }
  601. struct amsdu_subframe_hdr {
  602. u8 dst[ETH_ALEN];
  603. u8 src[ETH_ALEN];
  604. __be16 len;
  605. } __packed;
  606. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  607. static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
  608. {
  609. u8 ret = 0;
  610. switch (bw) {
  611. case 0:
  612. ret = RATE_INFO_BW_20;
  613. break;
  614. case 1:
  615. ret = RATE_INFO_BW_40;
  616. break;
  617. case 2:
  618. ret = RATE_INFO_BW_80;
  619. break;
  620. case 3:
  621. ret = RATE_INFO_BW_160;
  622. break;
  623. }
  624. return ret;
  625. }
  626. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  627. struct ieee80211_rx_status *status,
  628. struct htt_rx_desc *rxd)
  629. {
  630. struct ieee80211_supported_band *sband;
  631. u8 cck, rate, bw, sgi, mcs, nss;
  632. u8 preamble = 0;
  633. u8 group_id;
  634. u32 info1, info2, info3;
  635. u32 stbc, nsts_su;
  636. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  637. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  638. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  639. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  640. switch (preamble) {
  641. case HTT_RX_LEGACY:
  642. /* To get legacy rate index band is required. Since band can't
  643. * be undefined check if freq is non-zero.
  644. */
  645. if (!status->freq)
  646. return;
  647. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  648. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  649. rate &= ~RX_PPDU_START_RATE_FLAG;
  650. sband = &ar->mac.sbands[status->band];
  651. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
  652. break;
  653. case HTT_RX_HT:
  654. case HTT_RX_HT_WITH_TXBF:
  655. /* HT-SIG - Table 20-11 in info2 and info3 */
  656. mcs = info2 & 0x1F;
  657. nss = mcs >> 3;
  658. bw = (info2 >> 7) & 1;
  659. sgi = (info3 >> 7) & 1;
  660. status->rate_idx = mcs;
  661. status->encoding = RX_ENC_HT;
  662. if (sgi)
  663. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  664. if (bw)
  665. status->bw = RATE_INFO_BW_40;
  666. break;
  667. case HTT_RX_VHT:
  668. case HTT_RX_VHT_WITH_TXBF:
  669. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  670. * TODO check this
  671. */
  672. bw = info2 & 3;
  673. sgi = info3 & 1;
  674. stbc = (info2 >> 3) & 1;
  675. group_id = (info2 >> 4) & 0x3F;
  676. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  677. mcs = (info3 >> 4) & 0x0F;
  678. nsts_su = ((info2 >> 10) & 0x07);
  679. if (stbc)
  680. nss = (nsts_su >> 2) + 1;
  681. else
  682. nss = (nsts_su + 1);
  683. } else {
  684. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  685. * so it's impossible to decode MCS. Also since
  686. * firmware consumes Group Id Management frames host
  687. * has no knowledge regarding group/user position
  688. * mapping so it's impossible to pick the correct Nsts
  689. * from VHT-SIG-A1.
  690. *
  691. * Bandwidth and SGI are valid so report the rateinfo
  692. * on best-effort basis.
  693. */
  694. mcs = 0;
  695. nss = 1;
  696. }
  697. if (mcs > 0x09) {
  698. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  699. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  700. __le32_to_cpu(rxd->attention.flags),
  701. __le32_to_cpu(rxd->mpdu_start.info0),
  702. __le32_to_cpu(rxd->mpdu_start.info1),
  703. __le32_to_cpu(rxd->msdu_start.common.info0),
  704. __le32_to_cpu(rxd->msdu_start.common.info1),
  705. rxd->ppdu_start.info0,
  706. __le32_to_cpu(rxd->ppdu_start.info1),
  707. __le32_to_cpu(rxd->ppdu_start.info2),
  708. __le32_to_cpu(rxd->ppdu_start.info3),
  709. __le32_to_cpu(rxd->ppdu_start.info4));
  710. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  711. __le32_to_cpu(rxd->msdu_end.common.info0),
  712. __le32_to_cpu(rxd->mpdu_end.info0));
  713. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  714. "rx desc msdu payload: ",
  715. rxd->msdu_payload, 50);
  716. }
  717. status->rate_idx = mcs;
  718. status->nss = nss;
  719. if (sgi)
  720. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  721. status->bw = ath10k_bw_to_mac80211_bw(bw);
  722. status->encoding = RX_ENC_VHT;
  723. break;
  724. default:
  725. break;
  726. }
  727. }
  728. static struct ieee80211_channel *
  729. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  730. {
  731. struct ath10k_peer *peer;
  732. struct ath10k_vif *arvif;
  733. struct cfg80211_chan_def def;
  734. u16 peer_id;
  735. lockdep_assert_held(&ar->data_lock);
  736. if (!rxd)
  737. return NULL;
  738. if (rxd->attention.flags &
  739. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  740. return NULL;
  741. if (!(rxd->msdu_end.common.info0 &
  742. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  743. return NULL;
  744. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  745. RX_MPDU_START_INFO0_PEER_IDX);
  746. peer = ath10k_peer_find_by_id(ar, peer_id);
  747. if (!peer)
  748. return NULL;
  749. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  750. if (WARN_ON_ONCE(!arvif))
  751. return NULL;
  752. if (ath10k_mac_vif_chan(arvif->vif, &def))
  753. return NULL;
  754. return def.chan;
  755. }
  756. static struct ieee80211_channel *
  757. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  758. {
  759. struct ath10k_vif *arvif;
  760. struct cfg80211_chan_def def;
  761. lockdep_assert_held(&ar->data_lock);
  762. list_for_each_entry(arvif, &ar->arvifs, list) {
  763. if (arvif->vdev_id == vdev_id &&
  764. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  765. return def.chan;
  766. }
  767. return NULL;
  768. }
  769. static void
  770. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  771. struct ieee80211_chanctx_conf *conf,
  772. void *data)
  773. {
  774. struct cfg80211_chan_def *def = data;
  775. *def = conf->def;
  776. }
  777. static struct ieee80211_channel *
  778. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  779. {
  780. struct cfg80211_chan_def def = {};
  781. ieee80211_iter_chan_contexts_atomic(ar->hw,
  782. ath10k_htt_rx_h_any_chan_iter,
  783. &def);
  784. return def.chan;
  785. }
  786. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  787. struct ieee80211_rx_status *status,
  788. struct htt_rx_desc *rxd,
  789. u32 vdev_id)
  790. {
  791. struct ieee80211_channel *ch;
  792. spin_lock_bh(&ar->data_lock);
  793. ch = ar->scan_channel;
  794. if (!ch)
  795. ch = ar->rx_channel;
  796. if (!ch)
  797. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  798. if (!ch)
  799. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  800. if (!ch)
  801. ch = ath10k_htt_rx_h_any_channel(ar);
  802. if (!ch)
  803. ch = ar->tgt_oper_chan;
  804. spin_unlock_bh(&ar->data_lock);
  805. if (!ch)
  806. return false;
  807. status->band = ch->band;
  808. status->freq = ch->center_freq;
  809. return true;
  810. }
  811. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  812. struct ieee80211_rx_status *status,
  813. struct htt_rx_desc *rxd)
  814. {
  815. int i;
  816. for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
  817. status->chains &= ~BIT(i);
  818. if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
  819. status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
  820. rxd->ppdu_start.rssi_chains[i].pri20_mhz;
  821. status->chains |= BIT(i);
  822. }
  823. }
  824. /* FIXME: Get real NF */
  825. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  826. rxd->ppdu_start.rssi_comb;
  827. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  828. }
  829. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  830. struct ieee80211_rx_status *status,
  831. struct htt_rx_desc *rxd)
  832. {
  833. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  834. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  835. * TSF. Is it worth holding frames until end of PPDU is known?
  836. *
  837. * FIXME: Can we get/compute 64bit TSF?
  838. */
  839. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  840. status->flag |= RX_FLAG_MACTIME_END;
  841. }
  842. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  843. struct sk_buff_head *amsdu,
  844. struct ieee80211_rx_status *status,
  845. u32 vdev_id)
  846. {
  847. struct sk_buff *first;
  848. struct htt_rx_desc *rxd;
  849. bool is_first_ppdu;
  850. bool is_last_ppdu;
  851. if (skb_queue_empty(amsdu))
  852. return;
  853. first = skb_peek(amsdu);
  854. rxd = (void *)first->data - sizeof(*rxd);
  855. is_first_ppdu = !!(rxd->attention.flags &
  856. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  857. is_last_ppdu = !!(rxd->attention.flags &
  858. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  859. if (is_first_ppdu) {
  860. /* New PPDU starts so clear out the old per-PPDU status. */
  861. status->freq = 0;
  862. status->rate_idx = 0;
  863. status->nss = 0;
  864. status->encoding = RX_ENC_LEGACY;
  865. status->bw = RATE_INFO_BW_20;
  866. status->flag &= ~RX_FLAG_MACTIME_END;
  867. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  868. status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
  869. status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
  870. status->ampdu_reference = ar->ampdu_reference;
  871. ath10k_htt_rx_h_signal(ar, status, rxd);
  872. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  873. ath10k_htt_rx_h_rates(ar, status, rxd);
  874. }
  875. if (is_last_ppdu) {
  876. ath10k_htt_rx_h_mactime(ar, status, rxd);
  877. /* set ampdu last segment flag */
  878. status->flag |= RX_FLAG_AMPDU_IS_LAST;
  879. ar->ampdu_reference++;
  880. }
  881. }
  882. static const char * const tid_to_ac[] = {
  883. "BE",
  884. "BK",
  885. "BK",
  886. "BE",
  887. "VI",
  888. "VI",
  889. "VO",
  890. "VO",
  891. };
  892. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  893. {
  894. u8 *qc;
  895. int tid;
  896. if (!ieee80211_is_data_qos(hdr->frame_control))
  897. return "";
  898. qc = ieee80211_get_qos_ctl(hdr);
  899. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  900. if (tid < 8)
  901. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  902. else
  903. snprintf(out, size, "tid %d", tid);
  904. return out;
  905. }
  906. static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
  907. struct ieee80211_rx_status *rx_status,
  908. struct sk_buff *skb)
  909. {
  910. struct ieee80211_rx_status *status;
  911. status = IEEE80211_SKB_RXCB(skb);
  912. *status = *rx_status;
  913. skb_queue_tail(&ar->htt.rx_msdus_q, skb);
  914. }
  915. static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
  916. {
  917. struct ieee80211_rx_status *status;
  918. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  919. char tid[32];
  920. status = IEEE80211_SKB_RXCB(skb);
  921. ath10k_dbg(ar, ATH10K_DBG_DATA,
  922. "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  923. skb,
  924. skb->len,
  925. ieee80211_get_SA(hdr),
  926. ath10k_get_tid(hdr, tid, sizeof(tid)),
  927. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  928. "mcast" : "ucast",
  929. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  930. (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
  931. (status->encoding == RX_ENC_HT) ? "ht" : "",
  932. (status->encoding == RX_ENC_VHT) ? "vht" : "",
  933. (status->bw == RATE_INFO_BW_40) ? "40" : "",
  934. (status->bw == RATE_INFO_BW_80) ? "80" : "",
  935. (status->bw == RATE_INFO_BW_160) ? "160" : "",
  936. status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
  937. status->rate_idx,
  938. status->nss,
  939. status->freq,
  940. status->band, status->flag,
  941. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  942. !!(status->flag & RX_FLAG_MMIC_ERROR),
  943. !!(status->flag & RX_FLAG_AMSDU_MORE));
  944. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  945. skb->data, skb->len);
  946. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  947. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  948. ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
  949. }
  950. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  951. struct ieee80211_hdr *hdr)
  952. {
  953. int len = ieee80211_hdrlen(hdr->frame_control);
  954. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  955. ar->running_fw->fw_file.fw_features))
  956. len = round_up(len, 4);
  957. return len;
  958. }
  959. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  960. struct sk_buff *msdu,
  961. struct ieee80211_rx_status *status,
  962. enum htt_rx_mpdu_encrypt_type enctype,
  963. bool is_decrypted)
  964. {
  965. struct ieee80211_hdr *hdr;
  966. struct htt_rx_desc *rxd;
  967. size_t hdr_len;
  968. size_t crypto_len;
  969. bool is_first;
  970. bool is_last;
  971. rxd = (void *)msdu->data - sizeof(*rxd);
  972. is_first = !!(rxd->msdu_end.common.info0 &
  973. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  974. is_last = !!(rxd->msdu_end.common.info0 &
  975. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  976. /* Delivered decapped frame:
  977. * [802.11 header]
  978. * [crypto param] <-- can be trimmed if !fcs_err &&
  979. * !decrypt_err && !peer_idx_invalid
  980. * [amsdu header] <-- only if A-MSDU
  981. * [rfc1042/llc]
  982. * [payload]
  983. * [FCS] <-- at end, needs to be trimmed
  984. */
  985. /* This probably shouldn't happen but warn just in case */
  986. if (unlikely(WARN_ON_ONCE(!is_first)))
  987. return;
  988. /* This probably shouldn't happen but warn just in case */
  989. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  990. return;
  991. skb_trim(msdu, msdu->len - FCS_LEN);
  992. /* In most cases this will be true for sniffed frames. It makes sense
  993. * to deliver them as-is without stripping the crypto param. This is
  994. * necessary for software based decryption.
  995. *
  996. * If there's no error then the frame is decrypted. At least that is
  997. * the case for frames that come in via fragmented rx indication.
  998. */
  999. if (!is_decrypted)
  1000. return;
  1001. /* The payload is decrypted so strip crypto params. Start from tail
  1002. * since hdr is used to compute some stuff.
  1003. */
  1004. hdr = (void *)msdu->data;
  1005. /* Tail */
  1006. if (status->flag & RX_FLAG_IV_STRIPPED) {
  1007. skb_trim(msdu, msdu->len -
  1008. ath10k_htt_rx_crypto_mic_len(ar, enctype));
  1009. skb_trim(msdu, msdu->len -
  1010. ath10k_htt_rx_crypto_icv_len(ar, enctype));
  1011. } else {
  1012. /* MIC */
  1013. if (status->flag & RX_FLAG_MIC_STRIPPED)
  1014. skb_trim(msdu, msdu->len -
  1015. ath10k_htt_rx_crypto_mic_len(ar, enctype));
  1016. /* ICV */
  1017. if (status->flag & RX_FLAG_ICV_STRIPPED)
  1018. skb_trim(msdu, msdu->len -
  1019. ath10k_htt_rx_crypto_icv_len(ar, enctype));
  1020. }
  1021. /* MMIC */
  1022. if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
  1023. !ieee80211_has_morefrags(hdr->frame_control) &&
  1024. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  1025. skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
  1026. /* Head */
  1027. if (status->flag & RX_FLAG_IV_STRIPPED) {
  1028. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1029. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1030. memmove((void *)msdu->data + crypto_len,
  1031. (void *)msdu->data, hdr_len);
  1032. skb_pull(msdu, crypto_len);
  1033. }
  1034. }
  1035. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  1036. struct sk_buff *msdu,
  1037. struct ieee80211_rx_status *status,
  1038. const u8 first_hdr[64],
  1039. enum htt_rx_mpdu_encrypt_type enctype)
  1040. {
  1041. struct ieee80211_hdr *hdr;
  1042. struct htt_rx_desc *rxd;
  1043. size_t hdr_len;
  1044. u8 da[ETH_ALEN];
  1045. u8 sa[ETH_ALEN];
  1046. int l3_pad_bytes;
  1047. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1048. /* Delivered decapped frame:
  1049. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  1050. * [rfc1042/llc]
  1051. *
  1052. * Note: The nwifi header doesn't have QoS Control and is
  1053. * (always?) a 3addr frame.
  1054. *
  1055. * Note2: There's no A-MSDU subframe header. Even if it's part
  1056. * of an A-MSDU.
  1057. */
  1058. /* pull decapped header and copy SA & DA */
  1059. rxd = (void *)msdu->data - sizeof(*rxd);
  1060. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1061. skb_put(msdu, l3_pad_bytes);
  1062. hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
  1063. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  1064. ether_addr_copy(da, ieee80211_get_DA(hdr));
  1065. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  1066. skb_pull(msdu, hdr_len);
  1067. /* push original 802.11 header */
  1068. hdr = (struct ieee80211_hdr *)first_hdr;
  1069. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1070. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1071. memcpy(skb_push(msdu,
  1072. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1073. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1074. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1075. }
  1076. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1077. /* original 802.11 header has a different DA and in
  1078. * case of 4addr it may also have different SA
  1079. */
  1080. hdr = (struct ieee80211_hdr *)msdu->data;
  1081. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1082. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1083. }
  1084. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  1085. struct sk_buff *msdu,
  1086. enum htt_rx_mpdu_encrypt_type enctype)
  1087. {
  1088. struct ieee80211_hdr *hdr;
  1089. struct htt_rx_desc *rxd;
  1090. size_t hdr_len, crypto_len;
  1091. void *rfc1042;
  1092. bool is_first, is_last, is_amsdu;
  1093. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1094. rxd = (void *)msdu->data - sizeof(*rxd);
  1095. hdr = (void *)rxd->rx_hdr_status;
  1096. is_first = !!(rxd->msdu_end.common.info0 &
  1097. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  1098. is_last = !!(rxd->msdu_end.common.info0 &
  1099. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  1100. is_amsdu = !(is_first && is_last);
  1101. rfc1042 = hdr;
  1102. if (is_first) {
  1103. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1104. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1105. rfc1042 += round_up(hdr_len, bytes_aligned) +
  1106. round_up(crypto_len, bytes_aligned);
  1107. }
  1108. if (is_amsdu)
  1109. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  1110. return rfc1042;
  1111. }
  1112. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  1113. struct sk_buff *msdu,
  1114. struct ieee80211_rx_status *status,
  1115. const u8 first_hdr[64],
  1116. enum htt_rx_mpdu_encrypt_type enctype)
  1117. {
  1118. struct ieee80211_hdr *hdr;
  1119. struct ethhdr *eth;
  1120. size_t hdr_len;
  1121. void *rfc1042;
  1122. u8 da[ETH_ALEN];
  1123. u8 sa[ETH_ALEN];
  1124. int l3_pad_bytes;
  1125. struct htt_rx_desc *rxd;
  1126. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1127. /* Delivered decapped frame:
  1128. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  1129. * [payload]
  1130. */
  1131. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  1132. if (WARN_ON_ONCE(!rfc1042))
  1133. return;
  1134. rxd = (void *)msdu->data - sizeof(*rxd);
  1135. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1136. skb_put(msdu, l3_pad_bytes);
  1137. skb_pull(msdu, l3_pad_bytes);
  1138. /* pull decapped header and copy SA & DA */
  1139. eth = (struct ethhdr *)msdu->data;
  1140. ether_addr_copy(da, eth->h_dest);
  1141. ether_addr_copy(sa, eth->h_source);
  1142. skb_pull(msdu, sizeof(struct ethhdr));
  1143. /* push rfc1042/llc/snap */
  1144. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  1145. sizeof(struct rfc1042_hdr));
  1146. /* push original 802.11 header */
  1147. hdr = (struct ieee80211_hdr *)first_hdr;
  1148. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1149. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1150. memcpy(skb_push(msdu,
  1151. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1152. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1153. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1154. }
  1155. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1156. /* original 802.11 header has a different DA and in
  1157. * case of 4addr it may also have different SA
  1158. */
  1159. hdr = (struct ieee80211_hdr *)msdu->data;
  1160. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1161. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1162. }
  1163. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  1164. struct sk_buff *msdu,
  1165. struct ieee80211_rx_status *status,
  1166. const u8 first_hdr[64],
  1167. enum htt_rx_mpdu_encrypt_type enctype)
  1168. {
  1169. struct ieee80211_hdr *hdr;
  1170. size_t hdr_len;
  1171. int l3_pad_bytes;
  1172. struct htt_rx_desc *rxd;
  1173. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1174. /* Delivered decapped frame:
  1175. * [amsdu header] <-- replaced with 802.11 hdr
  1176. * [rfc1042/llc]
  1177. * [payload]
  1178. */
  1179. rxd = (void *)msdu->data - sizeof(*rxd);
  1180. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1181. skb_put(msdu, l3_pad_bytes);
  1182. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
  1183. hdr = (struct ieee80211_hdr *)first_hdr;
  1184. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1185. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1186. memcpy(skb_push(msdu,
  1187. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1188. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1189. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1190. }
  1191. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1192. }
  1193. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  1194. struct sk_buff *msdu,
  1195. struct ieee80211_rx_status *status,
  1196. u8 first_hdr[64],
  1197. enum htt_rx_mpdu_encrypt_type enctype,
  1198. bool is_decrypted)
  1199. {
  1200. struct htt_rx_desc *rxd;
  1201. enum rx_msdu_decap_format decap;
  1202. /* First msdu's decapped header:
  1203. * [802.11 header] <-- padded to 4 bytes long
  1204. * [crypto param] <-- padded to 4 bytes long
  1205. * [amsdu header] <-- only if A-MSDU
  1206. * [rfc1042/llc]
  1207. *
  1208. * Other (2nd, 3rd, ..) msdu's decapped header:
  1209. * [amsdu header] <-- only if A-MSDU
  1210. * [rfc1042/llc]
  1211. */
  1212. rxd = (void *)msdu->data - sizeof(*rxd);
  1213. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1214. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1215. switch (decap) {
  1216. case RX_MSDU_DECAP_RAW:
  1217. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1218. is_decrypted);
  1219. break;
  1220. case RX_MSDU_DECAP_NATIVE_WIFI:
  1221. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
  1222. enctype);
  1223. break;
  1224. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1225. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1226. break;
  1227. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1228. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
  1229. enctype);
  1230. break;
  1231. }
  1232. }
  1233. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1234. {
  1235. struct htt_rx_desc *rxd;
  1236. u32 flags, info;
  1237. bool is_ip4, is_ip6;
  1238. bool is_tcp, is_udp;
  1239. bool ip_csum_ok, tcpudp_csum_ok;
  1240. rxd = (void *)skb->data - sizeof(*rxd);
  1241. flags = __le32_to_cpu(rxd->attention.flags);
  1242. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1243. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1244. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1245. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1246. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1247. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1248. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1249. if (!is_ip4 && !is_ip6)
  1250. return CHECKSUM_NONE;
  1251. if (!is_tcp && !is_udp)
  1252. return CHECKSUM_NONE;
  1253. if (!ip_csum_ok)
  1254. return CHECKSUM_NONE;
  1255. if (!tcpudp_csum_ok)
  1256. return CHECKSUM_NONE;
  1257. return CHECKSUM_UNNECESSARY;
  1258. }
  1259. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1260. {
  1261. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1262. }
  1263. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1264. struct sk_buff_head *amsdu,
  1265. struct ieee80211_rx_status *status,
  1266. bool fill_crypt_header,
  1267. u8 *rx_hdr,
  1268. enum ath10k_pkt_rx_err *err)
  1269. {
  1270. struct sk_buff *first;
  1271. struct sk_buff *last;
  1272. struct sk_buff *msdu;
  1273. struct htt_rx_desc *rxd;
  1274. struct ieee80211_hdr *hdr;
  1275. enum htt_rx_mpdu_encrypt_type enctype;
  1276. u8 first_hdr[64];
  1277. u8 *qos;
  1278. bool has_fcs_err;
  1279. bool has_crypto_err;
  1280. bool has_tkip_err;
  1281. bool has_peer_idx_invalid;
  1282. bool is_decrypted;
  1283. bool is_mgmt;
  1284. u32 attention;
  1285. if (skb_queue_empty(amsdu))
  1286. return;
  1287. first = skb_peek(amsdu);
  1288. rxd = (void *)first->data - sizeof(*rxd);
  1289. is_mgmt = !!(rxd->attention.flags &
  1290. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1291. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1292. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1293. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1294. * decapped header. It'll be used for undecapping of each MSDU.
  1295. */
  1296. hdr = (void *)rxd->rx_hdr_status;
  1297. memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1298. if (rx_hdr)
  1299. memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1300. /* Each A-MSDU subframe will use the original header as the base and be
  1301. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1302. */
  1303. hdr = (void *)first_hdr;
  1304. if (ieee80211_is_data_qos(hdr->frame_control)) {
  1305. qos = ieee80211_get_qos_ctl(hdr);
  1306. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1307. }
  1308. /* Some attention flags are valid only in the last MSDU. */
  1309. last = skb_peek_tail(amsdu);
  1310. rxd = (void *)last->data - sizeof(*rxd);
  1311. attention = __le32_to_cpu(rxd->attention.flags);
  1312. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1313. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1314. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1315. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1316. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1317. * e.g. due to fcs error, missing peer or invalid key data it will
  1318. * report the frame as raw.
  1319. */
  1320. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1321. !has_fcs_err &&
  1322. !has_crypto_err &&
  1323. !has_peer_idx_invalid);
  1324. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1325. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1326. RX_FLAG_MMIC_ERROR |
  1327. RX_FLAG_DECRYPTED |
  1328. RX_FLAG_IV_STRIPPED |
  1329. RX_FLAG_ONLY_MONITOR |
  1330. RX_FLAG_MMIC_STRIPPED);
  1331. if (has_fcs_err)
  1332. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1333. if (has_tkip_err)
  1334. status->flag |= RX_FLAG_MMIC_ERROR;
  1335. if (err) {
  1336. if (has_fcs_err)
  1337. *err = ATH10K_PKT_RX_ERR_FCS;
  1338. else if (has_tkip_err)
  1339. *err = ATH10K_PKT_RX_ERR_TKIP;
  1340. else if (has_crypto_err)
  1341. *err = ATH10K_PKT_RX_ERR_CRYPT;
  1342. else if (has_peer_idx_invalid)
  1343. *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
  1344. }
  1345. /* Firmware reports all necessary management frames via WMI already.
  1346. * They are not reported to monitor interfaces at all so pass the ones
  1347. * coming via HTT to monitor interfaces instead. This simplifies
  1348. * matters a lot.
  1349. */
  1350. if (is_mgmt)
  1351. status->flag |= RX_FLAG_ONLY_MONITOR;
  1352. if (is_decrypted) {
  1353. status->flag |= RX_FLAG_DECRYPTED;
  1354. if (likely(!is_mgmt))
  1355. status->flag |= RX_FLAG_MMIC_STRIPPED;
  1356. if (fill_crypt_header)
  1357. status->flag |= RX_FLAG_MIC_STRIPPED |
  1358. RX_FLAG_ICV_STRIPPED;
  1359. else
  1360. status->flag |= RX_FLAG_IV_STRIPPED;
  1361. }
  1362. skb_queue_walk(amsdu, msdu) {
  1363. ath10k_htt_rx_h_csum_offload(msdu);
  1364. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1365. is_decrypted);
  1366. /* Undecapping involves copying the original 802.11 header back
  1367. * to sk_buff. If frame is protected and hardware has decrypted
  1368. * it then remove the protected bit.
  1369. */
  1370. if (!is_decrypted)
  1371. continue;
  1372. if (is_mgmt)
  1373. continue;
  1374. if (fill_crypt_header)
  1375. continue;
  1376. hdr = (void *)msdu->data;
  1377. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1378. }
  1379. }
  1380. static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
  1381. struct sk_buff_head *amsdu,
  1382. struct ieee80211_rx_status *status)
  1383. {
  1384. struct sk_buff *msdu;
  1385. struct sk_buff *first_subframe;
  1386. first_subframe = skb_peek(amsdu);
  1387. while ((msdu = __skb_dequeue(amsdu))) {
  1388. /* Setup per-MSDU flags */
  1389. if (skb_queue_empty(amsdu))
  1390. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1391. else
  1392. status->flag |= RX_FLAG_AMSDU_MORE;
  1393. if (msdu == first_subframe) {
  1394. first_subframe = NULL;
  1395. status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
  1396. } else {
  1397. status->flag |= RX_FLAG_ALLOW_SAME_PN;
  1398. }
  1399. ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
  1400. }
  1401. }
  1402. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
  1403. unsigned long int *unchain_cnt)
  1404. {
  1405. struct sk_buff *skb, *first;
  1406. int space;
  1407. int total_len = 0;
  1408. int amsdu_len = skb_queue_len(amsdu);
  1409. /* TODO: Might could optimize this by using
  1410. * skb_try_coalesce or similar method to
  1411. * decrease copying, or maybe get mac80211 to
  1412. * provide a way to just receive a list of
  1413. * skb?
  1414. */
  1415. first = __skb_dequeue(amsdu);
  1416. /* Allocate total length all at once. */
  1417. skb_queue_walk(amsdu, skb)
  1418. total_len += skb->len;
  1419. space = total_len - skb_tailroom(first);
  1420. if ((space > 0) &&
  1421. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1422. /* TODO: bump some rx-oom error stat */
  1423. /* put it back together so we can free the
  1424. * whole list at once.
  1425. */
  1426. __skb_queue_head(amsdu, first);
  1427. return -1;
  1428. }
  1429. /* Walk list again, copying contents into
  1430. * msdu_head
  1431. */
  1432. while ((skb = __skb_dequeue(amsdu))) {
  1433. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1434. skb->len);
  1435. dev_kfree_skb_any(skb);
  1436. }
  1437. __skb_queue_head(amsdu, first);
  1438. *unchain_cnt += amsdu_len - 1;
  1439. return 0;
  1440. }
  1441. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1442. struct sk_buff_head *amsdu,
  1443. unsigned long int *drop_cnt,
  1444. unsigned long int *unchain_cnt)
  1445. {
  1446. struct sk_buff *first;
  1447. struct htt_rx_desc *rxd;
  1448. enum rx_msdu_decap_format decap;
  1449. first = skb_peek(amsdu);
  1450. rxd = (void *)first->data - sizeof(*rxd);
  1451. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1452. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1453. /* FIXME: Current unchaining logic can only handle simple case of raw
  1454. * msdu chaining. If decapping is other than raw the chaining may be
  1455. * more complex and this isn't handled by the current code. Don't even
  1456. * try re-constructing such frames - it'll be pretty much garbage.
  1457. */
  1458. if (decap != RX_MSDU_DECAP_RAW ||
  1459. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1460. *drop_cnt += skb_queue_len(amsdu);
  1461. __skb_queue_purge(amsdu);
  1462. return;
  1463. }
  1464. ath10k_unchain_msdu(amsdu, unchain_cnt);
  1465. }
  1466. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1467. struct sk_buff_head *amsdu,
  1468. struct ieee80211_rx_status *rx_status)
  1469. {
  1470. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1471. * invalid/dangerous frames.
  1472. */
  1473. if (!rx_status->freq) {
  1474. ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
  1475. return false;
  1476. }
  1477. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1478. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1479. return false;
  1480. }
  1481. return true;
  1482. }
  1483. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1484. struct sk_buff_head *amsdu,
  1485. struct ieee80211_rx_status *rx_status,
  1486. unsigned long int *drop_cnt)
  1487. {
  1488. if (skb_queue_empty(amsdu))
  1489. return;
  1490. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1491. return;
  1492. if (drop_cnt)
  1493. *drop_cnt += skb_queue_len(amsdu);
  1494. __skb_queue_purge(amsdu);
  1495. }
  1496. static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
  1497. {
  1498. struct ath10k *ar = htt->ar;
  1499. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1500. struct sk_buff_head amsdu;
  1501. int ret;
  1502. unsigned long int drop_cnt = 0;
  1503. unsigned long int unchain_cnt = 0;
  1504. unsigned long int drop_cnt_filter = 0;
  1505. unsigned long int msdus_to_queue, num_msdus;
  1506. enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
  1507. u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
  1508. __skb_queue_head_init(&amsdu);
  1509. spin_lock_bh(&htt->rx_ring.lock);
  1510. if (htt->rx_confused) {
  1511. spin_unlock_bh(&htt->rx_ring.lock);
  1512. return -EIO;
  1513. }
  1514. ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
  1515. spin_unlock_bh(&htt->rx_ring.lock);
  1516. if (ret < 0) {
  1517. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1518. __skb_queue_purge(&amsdu);
  1519. /* FIXME: It's probably a good idea to reboot the
  1520. * device instead of leaving it inoperable.
  1521. */
  1522. htt->rx_confused = true;
  1523. return ret;
  1524. }
  1525. num_msdus = skb_queue_len(&amsdu);
  1526. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1527. /* only for ret = 1 indicates chained msdus */
  1528. if (ret > 0)
  1529. ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
  1530. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
  1531. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
  1532. msdus_to_queue = skb_queue_len(&amsdu);
  1533. ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
  1534. ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
  1535. unchain_cnt, drop_cnt, drop_cnt_filter,
  1536. msdus_to_queue);
  1537. return 0;
  1538. }
  1539. static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
  1540. struct htt_rx_indication *rx)
  1541. {
  1542. struct ath10k *ar = htt->ar;
  1543. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1544. int num_mpdu_ranges;
  1545. int i, mpdu_count = 0;
  1546. u16 peer_id;
  1547. u8 tid;
  1548. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1549. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1550. peer_id = __le16_to_cpu(rx->hdr.peer_id);
  1551. tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
  1552. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1553. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1554. rx, sizeof(*rx) +
  1555. (sizeof(struct htt_rx_indication_mpdu_range) *
  1556. num_mpdu_ranges));
  1557. for (i = 0; i < num_mpdu_ranges; i++)
  1558. mpdu_count += mpdu_ranges[i].mpdu_count;
  1559. atomic_add(mpdu_count, &htt->num_mpdus_ready);
  1560. ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
  1561. num_mpdu_ranges);
  1562. }
  1563. static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
  1564. struct sk_buff *skb)
  1565. {
  1566. struct ath10k_htt *htt = &ar->htt;
  1567. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1568. struct htt_tx_done tx_done = {};
  1569. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1570. __le16 msdu_id;
  1571. int i;
  1572. switch (status) {
  1573. case HTT_DATA_TX_STATUS_NO_ACK:
  1574. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1575. break;
  1576. case HTT_DATA_TX_STATUS_OK:
  1577. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1578. break;
  1579. case HTT_DATA_TX_STATUS_DISCARD:
  1580. case HTT_DATA_TX_STATUS_POSTPONE:
  1581. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1582. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1583. break;
  1584. default:
  1585. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1586. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1587. break;
  1588. }
  1589. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1590. resp->data_tx_completion.num_msdus);
  1591. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1592. msdu_id = resp->data_tx_completion.msdus[i];
  1593. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1594. /* kfifo_put: In practice firmware shouldn't fire off per-CE
  1595. * interrupt and main interrupt (MSI/-X range case) for the same
  1596. * HTC service so it should be safe to use kfifo_put w/o lock.
  1597. *
  1598. * From kfifo_put() documentation:
  1599. * Note that with only one concurrent reader and one concurrent
  1600. * writer, you don't need extra locking to use these macro.
  1601. */
  1602. if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
  1603. ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
  1604. tx_done.msdu_id, tx_done.status);
  1605. ath10k_txrx_tx_unref(htt, &tx_done);
  1606. }
  1607. }
  1608. }
  1609. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1610. {
  1611. struct htt_rx_addba *ev = &resp->rx_addba;
  1612. struct ath10k_peer *peer;
  1613. struct ath10k_vif *arvif;
  1614. u16 info0, tid, peer_id;
  1615. info0 = __le16_to_cpu(ev->info0);
  1616. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1617. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1618. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1619. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1620. tid, peer_id, ev->window_size);
  1621. spin_lock_bh(&ar->data_lock);
  1622. peer = ath10k_peer_find_by_id(ar, peer_id);
  1623. if (!peer) {
  1624. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1625. peer_id);
  1626. spin_unlock_bh(&ar->data_lock);
  1627. return;
  1628. }
  1629. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1630. if (!arvif) {
  1631. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1632. peer->vdev_id);
  1633. spin_unlock_bh(&ar->data_lock);
  1634. return;
  1635. }
  1636. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1637. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1638. peer->addr, tid, ev->window_size);
  1639. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1640. spin_unlock_bh(&ar->data_lock);
  1641. }
  1642. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1643. {
  1644. struct htt_rx_delba *ev = &resp->rx_delba;
  1645. struct ath10k_peer *peer;
  1646. struct ath10k_vif *arvif;
  1647. u16 info0, tid, peer_id;
  1648. info0 = __le16_to_cpu(ev->info0);
  1649. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1650. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1651. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1652. "htt rx delba tid %hu peer_id %hu\n",
  1653. tid, peer_id);
  1654. spin_lock_bh(&ar->data_lock);
  1655. peer = ath10k_peer_find_by_id(ar, peer_id);
  1656. if (!peer) {
  1657. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1658. peer_id);
  1659. spin_unlock_bh(&ar->data_lock);
  1660. return;
  1661. }
  1662. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1663. if (!arvif) {
  1664. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1665. peer->vdev_id);
  1666. spin_unlock_bh(&ar->data_lock);
  1667. return;
  1668. }
  1669. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1670. "htt rx stop rx ba session sta %pM tid %hu\n",
  1671. peer->addr, tid);
  1672. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1673. spin_unlock_bh(&ar->data_lock);
  1674. }
  1675. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1676. struct sk_buff_head *amsdu)
  1677. {
  1678. struct sk_buff *msdu;
  1679. struct htt_rx_desc *rxd;
  1680. if (skb_queue_empty(list))
  1681. return -ENOBUFS;
  1682. if (WARN_ON(!skb_queue_empty(amsdu)))
  1683. return -EINVAL;
  1684. while ((msdu = __skb_dequeue(list))) {
  1685. __skb_queue_tail(amsdu, msdu);
  1686. rxd = (void *)msdu->data - sizeof(*rxd);
  1687. if (rxd->msdu_end.common.info0 &
  1688. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1689. break;
  1690. }
  1691. msdu = skb_peek_tail(amsdu);
  1692. rxd = (void *)msdu->data - sizeof(*rxd);
  1693. if (!(rxd->msdu_end.common.info0 &
  1694. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1695. skb_queue_splice_init(amsdu, list);
  1696. return -EAGAIN;
  1697. }
  1698. return 0;
  1699. }
  1700. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1701. struct sk_buff *skb)
  1702. {
  1703. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1704. if (!ieee80211_has_protected(hdr->frame_control))
  1705. return;
  1706. /* Offloaded frames are already decrypted but firmware insists they are
  1707. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1708. * will drop the frame.
  1709. */
  1710. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1711. status->flag |= RX_FLAG_DECRYPTED |
  1712. RX_FLAG_IV_STRIPPED |
  1713. RX_FLAG_MMIC_STRIPPED;
  1714. }
  1715. static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1716. struct sk_buff_head *list)
  1717. {
  1718. struct ath10k_htt *htt = &ar->htt;
  1719. struct ieee80211_rx_status *status = &htt->rx_status;
  1720. struct htt_rx_offload_msdu *rx;
  1721. struct sk_buff *msdu;
  1722. size_t offset;
  1723. while ((msdu = __skb_dequeue(list))) {
  1724. /* Offloaded frames don't have Rx descriptor. Instead they have
  1725. * a short meta information header.
  1726. */
  1727. rx = (void *)msdu->data;
  1728. skb_put(msdu, sizeof(*rx));
  1729. skb_pull(msdu, sizeof(*rx));
  1730. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1731. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1732. dev_kfree_skb_any(msdu);
  1733. continue;
  1734. }
  1735. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1736. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1737. * actual payload is unaligned. Align the frame. Otherwise
  1738. * mac80211 complains. This shouldn't reduce performance much
  1739. * because these offloaded frames are rare.
  1740. */
  1741. offset = 4 - ((unsigned long)msdu->data & 3);
  1742. skb_put(msdu, offset);
  1743. memmove(msdu->data + offset, msdu->data, msdu->len);
  1744. skb_pull(msdu, offset);
  1745. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1746. * if possible later.
  1747. */
  1748. memset(status, 0, sizeof(*status));
  1749. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1750. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1751. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1752. ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
  1753. }
  1754. }
  1755. static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  1756. {
  1757. struct ath10k_htt *htt = &ar->htt;
  1758. struct htt_resp *resp = (void *)skb->data;
  1759. struct ieee80211_rx_status *status = &htt->rx_status;
  1760. struct sk_buff_head list;
  1761. struct sk_buff_head amsdu;
  1762. u16 peer_id;
  1763. u16 msdu_count;
  1764. u8 vdev_id;
  1765. u8 tid;
  1766. bool offload;
  1767. bool frag;
  1768. int ret;
  1769. lockdep_assert_held(&htt->rx_ring.lock);
  1770. if (htt->rx_confused)
  1771. return -EIO;
  1772. skb_pull(skb, sizeof(resp->hdr));
  1773. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1774. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1775. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1776. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1777. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1778. offload = !!(resp->rx_in_ord_ind.info &
  1779. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1780. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1781. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1782. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1783. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1784. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
  1785. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1786. return -EINVAL;
  1787. }
  1788. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1789. * extracted and processed.
  1790. */
  1791. __skb_queue_head_init(&list);
  1792. if (ar->hw_params.target_64bit)
  1793. ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
  1794. &list);
  1795. else
  1796. ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
  1797. &list);
  1798. if (ret < 0) {
  1799. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1800. htt->rx_confused = true;
  1801. return -EIO;
  1802. }
  1803. /* Offloaded frames are very different and need to be handled
  1804. * separately.
  1805. */
  1806. if (offload)
  1807. ath10k_htt_rx_h_rx_offload(ar, &list);
  1808. while (!skb_queue_empty(&list)) {
  1809. __skb_queue_head_init(&amsdu);
  1810. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  1811. switch (ret) {
  1812. case 0:
  1813. /* Note: The in-order indication may report interleaved
  1814. * frames from different PPDUs meaning reported rx rate
  1815. * to mac80211 isn't accurate/reliable. It's still
  1816. * better to report something than nothing though. This
  1817. * should still give an idea about rx rate to the user.
  1818. */
  1819. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1820. ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
  1821. ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
  1822. NULL);
  1823. ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
  1824. break;
  1825. case -EAGAIN:
  1826. /* fall through */
  1827. default:
  1828. /* Should not happen. */
  1829. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1830. htt->rx_confused = true;
  1831. __skb_queue_purge(&list);
  1832. return -EIO;
  1833. }
  1834. }
  1835. return ret;
  1836. }
  1837. static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
  1838. const __le32 *resp_ids,
  1839. int num_resp_ids)
  1840. {
  1841. int i;
  1842. u32 resp_id;
  1843. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
  1844. num_resp_ids);
  1845. for (i = 0; i < num_resp_ids; i++) {
  1846. resp_id = le32_to_cpu(resp_ids[i]);
  1847. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
  1848. resp_id);
  1849. /* TODO: free resp_id */
  1850. }
  1851. }
  1852. static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
  1853. {
  1854. struct ieee80211_hw *hw = ar->hw;
  1855. struct ieee80211_txq *txq;
  1856. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1857. struct htt_tx_fetch_record *record;
  1858. size_t len;
  1859. size_t max_num_bytes;
  1860. size_t max_num_msdus;
  1861. size_t num_bytes;
  1862. size_t num_msdus;
  1863. const __le32 *resp_ids;
  1864. u16 num_records;
  1865. u16 num_resp_ids;
  1866. u16 peer_id;
  1867. u8 tid;
  1868. int ret;
  1869. int i;
  1870. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
  1871. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
  1872. if (unlikely(skb->len < len)) {
  1873. ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
  1874. return;
  1875. }
  1876. num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
  1877. num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
  1878. len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
  1879. len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
  1880. if (unlikely(skb->len < len)) {
  1881. ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
  1882. return;
  1883. }
  1884. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
  1885. num_records, num_resp_ids,
  1886. le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
  1887. if (!ar->htt.tx_q_state.enabled) {
  1888. ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
  1889. return;
  1890. }
  1891. if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
  1892. ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
  1893. return;
  1894. }
  1895. rcu_read_lock();
  1896. for (i = 0; i < num_records; i++) {
  1897. record = &resp->tx_fetch_ind.records[i];
  1898. peer_id = MS(le16_to_cpu(record->info),
  1899. HTT_TX_FETCH_RECORD_INFO_PEER_ID);
  1900. tid = MS(le16_to_cpu(record->info),
  1901. HTT_TX_FETCH_RECORD_INFO_TID);
  1902. max_num_msdus = le16_to_cpu(record->num_msdus);
  1903. max_num_bytes = le32_to_cpu(record->num_bytes);
  1904. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
  1905. i, peer_id, tid, max_num_msdus, max_num_bytes);
  1906. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1907. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1908. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1909. peer_id, tid);
  1910. continue;
  1911. }
  1912. spin_lock_bh(&ar->data_lock);
  1913. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1914. spin_unlock_bh(&ar->data_lock);
  1915. /* It is okay to release the lock and use txq because RCU read
  1916. * lock is held.
  1917. */
  1918. if (unlikely(!txq)) {
  1919. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1920. peer_id, tid);
  1921. continue;
  1922. }
  1923. num_msdus = 0;
  1924. num_bytes = 0;
  1925. while (num_msdus < max_num_msdus &&
  1926. num_bytes < max_num_bytes) {
  1927. ret = ath10k_mac_tx_push_txq(hw, txq);
  1928. if (ret < 0)
  1929. break;
  1930. num_msdus++;
  1931. num_bytes += ret;
  1932. }
  1933. record->num_msdus = cpu_to_le16(num_msdus);
  1934. record->num_bytes = cpu_to_le32(num_bytes);
  1935. ath10k_htt_tx_txq_recalc(hw, txq);
  1936. }
  1937. rcu_read_unlock();
  1938. resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
  1939. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
  1940. ret = ath10k_htt_tx_fetch_resp(ar,
  1941. resp->tx_fetch_ind.token,
  1942. resp->tx_fetch_ind.fetch_seq_num,
  1943. resp->tx_fetch_ind.records,
  1944. num_records);
  1945. if (unlikely(ret)) {
  1946. ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
  1947. le32_to_cpu(resp->tx_fetch_ind.token), ret);
  1948. /* FIXME: request fw restart */
  1949. }
  1950. ath10k_htt_tx_txq_sync(ar);
  1951. }
  1952. static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
  1953. struct sk_buff *skb)
  1954. {
  1955. const struct htt_resp *resp = (void *)skb->data;
  1956. size_t len;
  1957. int num_resp_ids;
  1958. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
  1959. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
  1960. if (unlikely(skb->len < len)) {
  1961. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
  1962. return;
  1963. }
  1964. num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
  1965. len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
  1966. if (unlikely(skb->len < len)) {
  1967. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
  1968. return;
  1969. }
  1970. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
  1971. resp->tx_fetch_confirm.resp_ids,
  1972. num_resp_ids);
  1973. }
  1974. static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
  1975. struct sk_buff *skb)
  1976. {
  1977. const struct htt_resp *resp = (void *)skb->data;
  1978. const struct htt_tx_mode_switch_record *record;
  1979. struct ieee80211_txq *txq;
  1980. struct ath10k_txq *artxq;
  1981. size_t len;
  1982. size_t num_records;
  1983. enum htt_tx_mode_switch_mode mode;
  1984. bool enable;
  1985. u16 info0;
  1986. u16 info1;
  1987. u16 threshold;
  1988. u16 peer_id;
  1989. u8 tid;
  1990. int i;
  1991. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
  1992. len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
  1993. if (unlikely(skb->len < len)) {
  1994. ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
  1995. return;
  1996. }
  1997. info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
  1998. info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
  1999. enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
  2000. num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  2001. mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
  2002. threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  2003. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2004. "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
  2005. info0, info1, enable, num_records, mode, threshold);
  2006. len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
  2007. if (unlikely(skb->len < len)) {
  2008. ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
  2009. return;
  2010. }
  2011. switch (mode) {
  2012. case HTT_TX_MODE_SWITCH_PUSH:
  2013. case HTT_TX_MODE_SWITCH_PUSH_PULL:
  2014. break;
  2015. default:
  2016. ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
  2017. mode);
  2018. return;
  2019. }
  2020. if (!enable)
  2021. return;
  2022. ar->htt.tx_q_state.enabled = enable;
  2023. ar->htt.tx_q_state.mode = mode;
  2024. ar->htt.tx_q_state.num_push_allowed = threshold;
  2025. rcu_read_lock();
  2026. for (i = 0; i < num_records; i++) {
  2027. record = &resp->tx_mode_switch_ind.records[i];
  2028. info0 = le16_to_cpu(record->info0);
  2029. peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
  2030. tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
  2031. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  2032. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  2033. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  2034. peer_id, tid);
  2035. continue;
  2036. }
  2037. spin_lock_bh(&ar->data_lock);
  2038. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  2039. spin_unlock_bh(&ar->data_lock);
  2040. /* It is okay to release the lock and use txq because RCU read
  2041. * lock is held.
  2042. */
  2043. if (unlikely(!txq)) {
  2044. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  2045. peer_id, tid);
  2046. continue;
  2047. }
  2048. spin_lock_bh(&ar->htt.tx_lock);
  2049. artxq = (void *)txq->drv_priv;
  2050. artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
  2051. spin_unlock_bh(&ar->htt.tx_lock);
  2052. }
  2053. rcu_read_unlock();
  2054. ath10k_mac_tx_push_pending(ar);
  2055. }
  2056. void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  2057. {
  2058. bool release;
  2059. release = ath10k_htt_t2h_msg_handler(ar, skb);
  2060. /* Free the indication buffer */
  2061. if (release)
  2062. dev_kfree_skb_any(skb);
  2063. }
  2064. static inline bool is_valid_legacy_rate(u8 rate)
  2065. {
  2066. static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
  2067. 18, 24, 36, 48, 54};
  2068. int i;
  2069. for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
  2070. if (rate == legacy_rates[i])
  2071. return true;
  2072. }
  2073. return false;
  2074. }
  2075. static void
  2076. ath10k_update_per_peer_tx_stats(struct ath10k *ar,
  2077. struct ieee80211_sta *sta,
  2078. struct ath10k_per_peer_tx_stats *peer_stats)
  2079. {
  2080. struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  2081. u8 rate = 0, sgi;
  2082. struct rate_info txrate;
  2083. lockdep_assert_held(&ar->data_lock);
  2084. txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
  2085. txrate.bw = ATH10K_HW_BW(peer_stats->flags);
  2086. txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
  2087. txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
  2088. sgi = ATH10K_HW_GI(peer_stats->flags);
  2089. if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
  2090. ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
  2091. return;
  2092. }
  2093. if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
  2094. (txrate.mcs > 7 || txrate.nss < 1)) {
  2095. ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
  2096. txrate.mcs, txrate.nss);
  2097. return;
  2098. }
  2099. memset(&arsta->txrate, 0, sizeof(arsta->txrate));
  2100. if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
  2101. txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
  2102. rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
  2103. if (!is_valid_legacy_rate(rate)) {
  2104. ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
  2105. rate);
  2106. return;
  2107. }
  2108. /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
  2109. rate *= 10;
  2110. if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
  2111. rate = rate - 5;
  2112. arsta->txrate.legacy = rate;
  2113. } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
  2114. arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
  2115. arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
  2116. } else {
  2117. arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
  2118. arsta->txrate.mcs = txrate.mcs;
  2119. }
  2120. if (sgi)
  2121. arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  2122. arsta->txrate.nss = txrate.nss;
  2123. arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
  2124. }
  2125. static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
  2126. struct sk_buff *skb)
  2127. {
  2128. struct htt_resp *resp = (struct htt_resp *)skb->data;
  2129. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  2130. struct htt_per_peer_tx_stats_ind *tx_stats;
  2131. struct ieee80211_sta *sta;
  2132. struct ath10k_peer *peer;
  2133. int peer_id, i;
  2134. u8 ppdu_len, num_ppdu;
  2135. num_ppdu = resp->peer_tx_stats.num_ppdu;
  2136. ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
  2137. if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
  2138. ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
  2139. return;
  2140. }
  2141. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  2142. (resp->peer_tx_stats.payload);
  2143. peer_id = __le16_to_cpu(tx_stats->peer_id);
  2144. rcu_read_lock();
  2145. spin_lock_bh(&ar->data_lock);
  2146. peer = ath10k_peer_find_by_id(ar, peer_id);
  2147. if (!peer || !peer->sta) {
  2148. ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
  2149. peer_id);
  2150. goto out;
  2151. }
  2152. sta = peer->sta;
  2153. for (i = 0; i < num_ppdu; i++) {
  2154. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  2155. (resp->peer_tx_stats.payload + i * ppdu_len);
  2156. p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
  2157. p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
  2158. p_tx_stats->failed_bytes =
  2159. __le32_to_cpu(tx_stats->failed_bytes);
  2160. p_tx_stats->ratecode = tx_stats->ratecode;
  2161. p_tx_stats->flags = tx_stats->flags;
  2162. p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
  2163. p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
  2164. p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
  2165. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  2166. }
  2167. out:
  2168. spin_unlock_bh(&ar->data_lock);
  2169. rcu_read_unlock();
  2170. }
  2171. static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
  2172. {
  2173. struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
  2174. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  2175. struct ath10k_10_2_peer_tx_stats *tx_stats;
  2176. struct ieee80211_sta *sta;
  2177. struct ath10k_peer *peer;
  2178. u16 log_type = __le16_to_cpu(hdr->log_type);
  2179. u32 peer_id = 0, i;
  2180. if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
  2181. return;
  2182. tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
  2183. ATH10K_10_2_TX_STATS_OFFSET);
  2184. if (!tx_stats->tx_ppdu_cnt)
  2185. return;
  2186. peer_id = tx_stats->peer_id;
  2187. rcu_read_lock();
  2188. spin_lock_bh(&ar->data_lock);
  2189. peer = ath10k_peer_find_by_id(ar, peer_id);
  2190. if (!peer || !peer->sta) {
  2191. ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
  2192. peer_id);
  2193. goto out;
  2194. }
  2195. sta = peer->sta;
  2196. for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
  2197. p_tx_stats->succ_bytes =
  2198. __le16_to_cpu(tx_stats->success_bytes[i]);
  2199. p_tx_stats->retry_bytes =
  2200. __le16_to_cpu(tx_stats->retry_bytes[i]);
  2201. p_tx_stats->failed_bytes =
  2202. __le16_to_cpu(tx_stats->failed_bytes[i]);
  2203. p_tx_stats->ratecode = tx_stats->ratecode[i];
  2204. p_tx_stats->flags = tx_stats->flags[i];
  2205. p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
  2206. p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
  2207. p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
  2208. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  2209. }
  2210. spin_unlock_bh(&ar->data_lock);
  2211. rcu_read_unlock();
  2212. return;
  2213. out:
  2214. spin_unlock_bh(&ar->data_lock);
  2215. rcu_read_unlock();
  2216. }
  2217. bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  2218. {
  2219. struct ath10k_htt *htt = &ar->htt;
  2220. struct htt_resp *resp = (struct htt_resp *)skb->data;
  2221. enum htt_t2h_msg_type type;
  2222. /* confirm alignment */
  2223. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  2224. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  2225. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  2226. resp->hdr.msg_type);
  2227. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  2228. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  2229. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  2230. return true;
  2231. }
  2232. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  2233. switch (type) {
  2234. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  2235. htt->target_version_major = resp->ver_resp.major;
  2236. htt->target_version_minor = resp->ver_resp.minor;
  2237. complete(&htt->target_version_received);
  2238. break;
  2239. }
  2240. case HTT_T2H_MSG_TYPE_RX_IND:
  2241. ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
  2242. break;
  2243. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  2244. struct htt_peer_map_event ev = {
  2245. .vdev_id = resp->peer_map.vdev_id,
  2246. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  2247. };
  2248. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  2249. ath10k_peer_map_event(htt, &ev);
  2250. break;
  2251. }
  2252. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  2253. struct htt_peer_unmap_event ev = {
  2254. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  2255. };
  2256. ath10k_peer_unmap_event(htt, &ev);
  2257. break;
  2258. }
  2259. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  2260. struct htt_tx_done tx_done = {};
  2261. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  2262. int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
  2263. tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  2264. switch (status) {
  2265. case HTT_MGMT_TX_STATUS_OK:
  2266. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  2267. if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
  2268. ar->wmi.svc_map) &&
  2269. (resp->mgmt_tx_completion.flags &
  2270. HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
  2271. tx_done.ack_rssi =
  2272. FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
  2273. info);
  2274. }
  2275. break;
  2276. case HTT_MGMT_TX_STATUS_RETRY:
  2277. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  2278. break;
  2279. case HTT_MGMT_TX_STATUS_DROP:
  2280. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  2281. break;
  2282. }
  2283. status = ath10k_txrx_tx_unref(htt, &tx_done);
  2284. if (!status) {
  2285. spin_lock_bh(&htt->tx_lock);
  2286. ath10k_htt_tx_mgmt_dec_pending(htt);
  2287. spin_unlock_bh(&htt->tx_lock);
  2288. }
  2289. break;
  2290. }
  2291. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  2292. ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
  2293. break;
  2294. case HTT_T2H_MSG_TYPE_SEC_IND: {
  2295. struct ath10k *ar = htt->ar;
  2296. struct htt_security_indication *ev = &resp->security_indication;
  2297. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2298. "sec ind peer_id %d unicast %d type %d\n",
  2299. __le16_to_cpu(ev->peer_id),
  2300. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  2301. MS(ev->flags, HTT_SECURITY_TYPE));
  2302. complete(&ar->install_key_done);
  2303. break;
  2304. }
  2305. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  2306. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2307. skb->data, skb->len);
  2308. atomic_inc(&htt->num_mpdus_ready);
  2309. break;
  2310. }
  2311. case HTT_T2H_MSG_TYPE_TEST:
  2312. break;
  2313. case HTT_T2H_MSG_TYPE_STATS_CONF:
  2314. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  2315. break;
  2316. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  2317. /* Firmware can return tx frames if it's unable to fully
  2318. * process them and suspects host may be able to fix it. ath10k
  2319. * sends all tx frames as already inspected so this shouldn't
  2320. * happen unless fw has a bug.
  2321. */
  2322. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  2323. break;
  2324. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  2325. ath10k_htt_rx_addba(ar, resp);
  2326. break;
  2327. case HTT_T2H_MSG_TYPE_RX_DELBA:
  2328. ath10k_htt_rx_delba(ar, resp);
  2329. break;
  2330. case HTT_T2H_MSG_TYPE_PKTLOG: {
  2331. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  2332. skb->len -
  2333. offsetof(struct htt_resp,
  2334. pktlog_msg.payload));
  2335. if (ath10k_peer_stats_enabled(ar))
  2336. ath10k_fetch_10_2_tx_stats(ar,
  2337. resp->pktlog_msg.payload);
  2338. break;
  2339. }
  2340. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  2341. /* Ignore this event because mac80211 takes care of Rx
  2342. * aggregation reordering.
  2343. */
  2344. break;
  2345. }
  2346. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  2347. skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  2348. return false;
  2349. }
  2350. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  2351. break;
  2352. case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
  2353. u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
  2354. u32 freq = __le32_to_cpu(resp->chan_change.freq);
  2355. ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
  2356. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2357. "htt chan change freq %u phymode %s\n",
  2358. freq, ath10k_wmi_phymode_str(phymode));
  2359. break;
  2360. }
  2361. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  2362. break;
  2363. case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
  2364. struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
  2365. if (!tx_fetch_ind) {
  2366. ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
  2367. break;
  2368. }
  2369. skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
  2370. break;
  2371. }
  2372. case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
  2373. ath10k_htt_rx_tx_fetch_confirm(ar, skb);
  2374. break;
  2375. case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
  2376. ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
  2377. break;
  2378. case HTT_T2H_MSG_TYPE_PEER_STATS:
  2379. ath10k_htt_fetch_peer_stats(ar, skb);
  2380. break;
  2381. case HTT_T2H_MSG_TYPE_EN_STATS:
  2382. default:
  2383. ath10k_warn(ar, "htt event (%d) not handled\n",
  2384. resp->hdr.msg_type);
  2385. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2386. skb->data, skb->len);
  2387. break;
  2388. }
  2389. return true;
  2390. }
  2391. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  2392. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  2393. struct sk_buff *skb)
  2394. {
  2395. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  2396. dev_kfree_skb_any(skb);
  2397. }
  2398. EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
  2399. static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
  2400. {
  2401. struct sk_buff *skb;
  2402. while (quota < budget) {
  2403. if (skb_queue_empty(&ar->htt.rx_msdus_q))
  2404. break;
  2405. skb = skb_dequeue(&ar->htt.rx_msdus_q);
  2406. if (!skb)
  2407. break;
  2408. ath10k_process_rx(ar, skb);
  2409. quota++;
  2410. }
  2411. return quota;
  2412. }
  2413. int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
  2414. {
  2415. struct ath10k_htt *htt = &ar->htt;
  2416. struct htt_tx_done tx_done = {};
  2417. struct sk_buff_head tx_ind_q;
  2418. struct sk_buff *skb;
  2419. unsigned long flags;
  2420. int quota = 0, done, ret;
  2421. bool resched_napi = false;
  2422. __skb_queue_head_init(&tx_ind_q);
  2423. /* Process pending frames before dequeuing more data
  2424. * from hardware.
  2425. */
  2426. quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
  2427. if (quota == budget) {
  2428. resched_napi = true;
  2429. goto exit;
  2430. }
  2431. while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
  2432. spin_lock_bh(&htt->rx_ring.lock);
  2433. ret = ath10k_htt_rx_in_ord_ind(ar, skb);
  2434. spin_unlock_bh(&htt->rx_ring.lock);
  2435. dev_kfree_skb_any(skb);
  2436. if (ret == -EIO) {
  2437. resched_napi = true;
  2438. goto exit;
  2439. }
  2440. }
  2441. while (atomic_read(&htt->num_mpdus_ready)) {
  2442. ret = ath10k_htt_rx_handle_amsdu(htt);
  2443. if (ret == -EIO) {
  2444. resched_napi = true;
  2445. goto exit;
  2446. }
  2447. atomic_dec(&htt->num_mpdus_ready);
  2448. }
  2449. /* Deliver received data after processing data from hardware */
  2450. quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
  2451. /* From NAPI documentation:
  2452. * The napi poll() function may also process TX completions, in which
  2453. * case if it processes the entire TX ring then it should count that
  2454. * work as the rest of the budget.
  2455. */
  2456. if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
  2457. quota = budget;
  2458. /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
  2459. * From kfifo_get() documentation:
  2460. * Note that with only one concurrent reader and one concurrent writer,
  2461. * you don't need extra locking to use these macro.
  2462. */
  2463. while (kfifo_get(&htt->txdone_fifo, &tx_done))
  2464. ath10k_txrx_tx_unref(htt, &tx_done);
  2465. ath10k_mac_tx_push_pending(ar);
  2466. spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
  2467. skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
  2468. spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
  2469. while ((skb = __skb_dequeue(&tx_ind_q))) {
  2470. ath10k_htt_rx_tx_fetch_ind(ar, skb);
  2471. dev_kfree_skb_any(skb);
  2472. }
  2473. exit:
  2474. ath10k_htt_rx_msdu_buff_replenish(htt);
  2475. /* In case of rx failure or more data to read, report budget
  2476. * to reschedule NAPI poll
  2477. */
  2478. done = resched_napi ? budget : quota;
  2479. return done;
  2480. }
  2481. EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
  2482. static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
  2483. .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
  2484. .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
  2485. .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
  2486. .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
  2487. .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
  2488. };
  2489. static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
  2490. .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
  2491. .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
  2492. .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
  2493. .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
  2494. .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
  2495. };
  2496. void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
  2497. {
  2498. struct ath10k *ar = htt->ar;
  2499. if (ar->hw_params.target_64bit)
  2500. htt->rx_ops = &htt_rx_ops_64;
  2501. else
  2502. htt->rx_ops = &htt_rx_ops_32;
  2503. }