dhd_flowring.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. /*
  2. * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
  3. *
  4. * Flow rings are transmit traffic (=propagating towards antenna) related entities
  5. *
  6. *
  7. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  8. *
  9. * Copyright (C) 1999-2020, Broadcom Corporation
  10. *
  11. * Unless you and Broadcom execute a separate written software license
  12. * agreement governing use of this software, this software is licensed to you
  13. * under the terms of the GNU General Public License version 2 (the "GPL"),
  14. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  15. * following added to such license:
  16. *
  17. * As a special exception, the copyright holders of this software give you
  18. * permission to link this software with independent modules, and to copy and
  19. * distribute the resulting executable under terms of your choice, provided that
  20. * you also meet, for each linked independent module, the terms and conditions of
  21. * the license of that module. An independent module is a module which is not
  22. * derived from this software. The special exception does not apply to any
  23. * modifications of the software.
  24. *
  25. * Notwithstanding the above, under no circumstances may you combine this
  26. * software in any way with any other Broadcom software provided under a license
  27. * other than the GPL, without Broadcom's express prior written consent.
  28. *
  29. *
  30. * <<Broadcom-WL-IPTag/Open:>>
  31. *
  32. * $Id: dhd_flowring.c 699841 2017-05-16 16:47:06Z $
  33. */
  34. #include <typedefs.h>
  35. #include <bcmutils.h>
  36. #include <bcmendian.h>
  37. #include <bcmdevs.h>
  38. #include <ethernet.h>
  39. #include <bcmevent.h>
  40. #include <dngl_stats.h>
  41. #include <dhd.h>
  42. #include <dhd_flowring.h>
  43. #include <dhd_bus.h>
  44. #include <dhd_proto.h>
  45. #include <dhd_dbg.h>
  46. #include <802.1d.h>
  47. #include <pcie_core.h>
  48. #include <bcmmsgbuf.h>
  49. #include <dhd_pcie.h>
  50. static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
  51. static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
  52. uint8 prio, char *sa, char *da);
  53. static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
  54. uint8 prio, char *sa, char *da);
  55. static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
  56. uint8 prio, char *sa, char *da, uint16 *flowid);
  57. int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
  58. #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
  59. #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
  60. const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
  61. const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
  62. /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
  63. static INLINE int
  64. dhd_flow_queue_throttle(flow_queue_t *queue)
  65. {
  66. return DHD_FLOW_QUEUE_FULL(queue);
  67. }
  68. int BCMFASTPATH
  69. dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
  70. {
  71. return BCME_NORESOURCE;
  72. }
  73. /** Returns flow ring given a flowid */
  74. flow_ring_node_t *
  75. dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
  76. {
  77. flow_ring_node_t * flow_ring_node;
  78. ASSERT(dhdp != (dhd_pub_t*)NULL);
  79. ASSERT(flowid < dhdp->num_flow_rings);
  80. if (flowid >= dhdp->num_flow_rings) {
  81. return NULL;
  82. }
  83. flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
  84. ASSERT(flow_ring_node->flowid == flowid);
  85. return flow_ring_node;
  86. }
  87. /** Returns 'backup' queue given a flowid */
  88. flow_queue_t *
  89. dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
  90. {
  91. flow_ring_node_t * flow_ring_node = NULL;
  92. flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
  93. if (flow_ring_node)
  94. return &flow_ring_node->queue;
  95. else
  96. return NULL;
  97. }
  98. /* Flow ring's queue management functions */
  99. /** Reinitialize a flow ring's queue. */
  100. void
  101. dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
  102. {
  103. ASSERT((queue != NULL) && (max > 0));
  104. queue->head = queue->tail = NULL;
  105. queue->len = 0;
  106. /* Set queue's threshold and queue's parent cummulative length counter */
  107. ASSERT(max > 1);
  108. DHD_FLOW_QUEUE_SET_MAX(queue, max);
  109. DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
  110. DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
  111. DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
  112. queue->failures = 0U;
  113. queue->cb = &dhd_flow_queue_overflow;
  114. }
  115. /** Initialize a flow ring's queue, called on driver initialization. */
  116. void
  117. dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
  118. {
  119. ASSERT((queue != NULL) && (max > 0));
  120. dll_init(&queue->list);
  121. dhd_flow_queue_reinit(dhdp, queue, max);
  122. }
  123. /** Register an enqueue overflow callback handler */
  124. void
  125. dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
  126. {
  127. ASSERT(queue != NULL);
  128. queue->cb = cb;
  129. }
  130. /**
  131. * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
  132. * to the flow ring itself.
  133. */
  134. int BCMFASTPATH
  135. dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
  136. {
  137. int ret = BCME_OK;
  138. ASSERT(queue != NULL);
  139. if (dhd_flow_queue_throttle(queue)) {
  140. queue->failures++;
  141. ret = (*queue->cb)(queue, pkt);
  142. goto done;
  143. }
  144. if (queue->head) {
  145. FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
  146. } else {
  147. queue->head = pkt;
  148. }
  149. FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
  150. queue->tail = pkt; /* at tail */
  151. queue->len++;
  152. /* increment parent's cummulative length */
  153. DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
  154. /* increment grandparent's cummulative length */
  155. DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
  156. done:
  157. return ret;
  158. }
  159. /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
  160. void * BCMFASTPATH
  161. dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
  162. {
  163. void * pkt;
  164. ASSERT(queue != NULL);
  165. pkt = queue->head; /* from head */
  166. if (pkt == NULL) {
  167. ASSERT((queue->len == 0) && (queue->tail == NULL));
  168. goto done;
  169. }
  170. queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
  171. if (queue->head == NULL)
  172. queue->tail = NULL;
  173. queue->len--;
  174. /* decrement parent's cummulative length */
  175. DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
  176. /* decrement grandparent's cummulative length */
  177. DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
  178. FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
  179. done:
  180. return pkt;
  181. }
  182. /** Reinsert a dequeued 802.3 packet back at the head */
  183. void BCMFASTPATH
  184. dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
  185. {
  186. if (queue->head == NULL) {
  187. queue->tail = pkt;
  188. }
  189. FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
  190. queue->head = pkt;
  191. queue->len++;
  192. /* increment parent's cummulative length */
  193. DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
  194. /* increment grandparent's cummulative length */
  195. DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
  196. }
  197. /** Fetch the backup queue for a flowring, and assign flow control thresholds */
  198. void
  199. dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
  200. int queue_budget, int cumm_threshold, void *cumm_ctr,
  201. int l2cumm_threshold, void *l2cumm_ctr)
  202. {
  203. flow_queue_t * queue = NULL;
  204. ASSERT(dhdp != (dhd_pub_t*)NULL);
  205. ASSERT(queue_budget > 1);
  206. ASSERT(cumm_threshold > 1);
  207. ASSERT(cumm_ctr != (void*)NULL);
  208. ASSERT(l2cumm_threshold > 1);
  209. ASSERT(l2cumm_ctr != (void*)NULL);
  210. queue = dhd_flow_queue(dhdp, flowid);
  211. if (queue) {
  212. DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
  213. /* Set the queue's parent threshold and cummulative counter */
  214. DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
  215. DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
  216. /* Set the queue's grandparent threshold and cummulative counter */
  217. DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
  218. DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
  219. }
  220. }
  221. uint8
  222. dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
  223. {
  224. uint8 prio_count = 0;
  225. int i;
  226. // Pick all elements one by one
  227. for (i = 0; i < NUMPRIO; i++)
  228. {
  229. // Check if the picked element is already counted
  230. int j;
  231. for (j = 0; j < i; j++) {
  232. if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
  233. break;
  234. }
  235. }
  236. // If not counted earlier, then count it
  237. if (i == j) {
  238. prio_count++;
  239. }
  240. }
  241. #ifdef DHD_LOSSLESS_ROAMING
  242. /* For LLR, we are using flowring with prio 7 which is not considered
  243. * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
  244. * prio to PRIO_8021D_NC and send to dhd_flowid_update.
  245. * So add 1 to prio_count.
  246. */
  247. prio_count++;
  248. #endif /* DHD_LOSSLESS_ROAMING */
  249. return prio_count;
  250. }
  251. uint8
  252. dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
  253. {
  254. uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
  255. uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
  256. uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
  257. return max_multi_client_flow_rings;
  258. }
  259. /** Initializes data structures of multiple flow rings */
  260. int
  261. dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
  262. {
  263. uint32 idx;
  264. uint32 flow_ring_table_sz;
  265. uint32 if_flow_lkup_sz = 0;
  266. void * flowid_allocator;
  267. flow_ring_table_t *flow_ring_table = NULL;
  268. if_flow_lkup_t *if_flow_lkup = NULL;
  269. void *lock = NULL;
  270. void *list_lock = NULL;
  271. unsigned long flags;
  272. DHD_INFO(("%s\n", __FUNCTION__));
  273. /* Construct a 16bit flowid allocator */
  274. flowid_allocator = id16_map_init(dhdp->osh,
  275. num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
  276. if (flowid_allocator == NULL) {
  277. DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
  278. return BCME_NOMEM;
  279. }
  280. /* Allocate a flow ring table, comprising of requested number of rings */
  281. flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
  282. flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
  283. if (flow_ring_table == NULL) {
  284. DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
  285. goto fail;
  286. }
  287. /* Initialize flow ring table state */
  288. DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
  289. DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
  290. bzero((uchar *)flow_ring_table, flow_ring_table_sz);
  291. for (idx = 0; idx < num_flow_rings; idx++) {
  292. flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
  293. flow_ring_table[idx].flowid = (uint16)idx;
  294. flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
  295. #ifdef IDLE_TX_FLOW_MGMT
  296. flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
  297. #endif /* IDLE_TX_FLOW_MGMT */
  298. if (flow_ring_table[idx].lock == NULL) {
  299. DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
  300. goto fail;
  301. }
  302. dll_init(&flow_ring_table[idx].list);
  303. /* Initialize the per flow ring backup queue */
  304. dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
  305. FLOW_RING_QUEUE_THRESHOLD);
  306. }
  307. /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
  308. if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
  309. if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
  310. DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
  311. if (if_flow_lkup == NULL) {
  312. DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
  313. goto fail;
  314. }
  315. /* Initialize per interface hash table */
  316. for (idx = 0; idx < DHD_MAX_IFS; idx++) {
  317. int hash_ix;
  318. if_flow_lkup[idx].status = 0;
  319. if_flow_lkup[idx].role = 0;
  320. for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
  321. if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
  322. }
  323. lock = dhd_os_spin_lock_init(dhdp->osh);
  324. if (lock == NULL)
  325. goto fail;
  326. list_lock = dhd_os_spin_lock_init(dhdp->osh);
  327. if (list_lock == NULL)
  328. goto lock_fail;
  329. dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
  330. bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
  331. dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
  332. dhdp->multi_client_flow_rings = 0U;
  333. #ifdef DHD_LOSSLESS_ROAMING
  334. dhdp->dequeue_prec_map = ALLPRIO;
  335. #endif // endif
  336. /* Now populate into dhd pub */
  337. DHD_FLOWID_LOCK(lock, flags);
  338. dhdp->num_flow_rings = num_flow_rings;
  339. dhdp->flowid_allocator = (void *)flowid_allocator;
  340. dhdp->flow_ring_table = (void *)flow_ring_table;
  341. dhdp->if_flow_lkup = (void *)if_flow_lkup;
  342. dhdp->flowid_lock = lock;
  343. dhdp->flow_rings_inited = TRUE;
  344. dhdp->flowring_list_lock = list_lock;
  345. DHD_FLOWID_UNLOCK(lock, flags);
  346. DHD_INFO(("%s done\n", __FUNCTION__));
  347. return BCME_OK;
  348. lock_fail:
  349. /* deinit the spinlock */
  350. dhd_os_spin_lock_deinit(dhdp->osh, lock);
  351. fail:
  352. /* Destruct the per interface flow lkup table */
  353. if (if_flow_lkup != NULL) {
  354. DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
  355. }
  356. if (flow_ring_table != NULL) {
  357. for (idx = 0; idx < num_flow_rings; idx++) {
  358. if (flow_ring_table[idx].lock != NULL)
  359. dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
  360. }
  361. MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
  362. }
  363. id16_map_fini(dhdp->osh, flowid_allocator);
  364. return BCME_NOMEM;
  365. }
  366. /** Deinit Flow Ring specific data structures */
  367. void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
  368. {
  369. uint16 idx;
  370. uint32 flow_ring_table_sz;
  371. uint32 if_flow_lkup_sz;
  372. flow_ring_table_t *flow_ring_table;
  373. unsigned long flags;
  374. void *lock;
  375. DHD_INFO(("dhd_flow_rings_deinit\n"));
  376. if (!(dhdp->flow_rings_inited)) {
  377. DHD_ERROR(("dhd_flow_rings not initialized!\n"));
  378. return;
  379. }
  380. if (dhdp->flow_ring_table != NULL) {
  381. ASSERT(dhdp->num_flow_rings > 0);
  382. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  383. flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
  384. dhdp->flow_ring_table = NULL;
  385. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  386. for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
  387. if (flow_ring_table[idx].active) {
  388. dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
  389. }
  390. ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
  391. /* Deinit flow ring queue locks before destroying flow ring table */
  392. if (flow_ring_table[idx].lock != NULL) {
  393. dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
  394. }
  395. flow_ring_table[idx].lock = NULL;
  396. }
  397. /* Destruct the flow ring table */
  398. flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
  399. MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
  400. }
  401. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  402. /* Destruct the per interface flow lkup table */
  403. if (dhdp->if_flow_lkup != NULL) {
  404. if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
  405. bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
  406. DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
  407. dhdp->if_flow_lkup = NULL;
  408. }
  409. /* Destruct the flowid allocator */
  410. if (dhdp->flowid_allocator != NULL)
  411. dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
  412. dhdp->num_flow_rings = 0U;
  413. bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
  414. dhdp->max_multi_client_flow_rings = 0U;
  415. dhdp->multi_client_flow_rings = 0U;
  416. lock = dhdp->flowid_lock;
  417. dhdp->flowid_lock = NULL;
  418. if (lock) {
  419. DHD_FLOWID_UNLOCK(lock, flags);
  420. dhd_os_spin_lock_deinit(dhdp->osh, lock);
  421. }
  422. dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
  423. dhdp->flowring_list_lock = NULL;
  424. ASSERT(dhdp->if_flow_lkup == NULL);
  425. ASSERT(dhdp->flowid_allocator == NULL);
  426. ASSERT(dhdp->flow_ring_table == NULL);
  427. dhdp->flow_rings_inited = FALSE;
  428. }
  429. /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
  430. uint8
  431. dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
  432. {
  433. if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  434. ASSERT(if_flow_lkup);
  435. return if_flow_lkup[ifindex].role;
  436. }
  437. #ifdef WLTDLS
  438. bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
  439. {
  440. unsigned long flags;
  441. tdls_peer_node_t *cur = NULL;
  442. DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
  443. cur = dhdp->peer_tbl.node;
  444. while (cur != NULL) {
  445. if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
  446. DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
  447. return TRUE;
  448. }
  449. cur = cur->next;
  450. }
  451. DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
  452. return FALSE;
  453. }
  454. #endif /* WLTDLS */
  455. /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
  456. static INLINE uint16
  457. dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
  458. {
  459. int hash;
  460. bool ismcast = FALSE;
  461. flow_hash_info_t *cur;
  462. if_flow_lkup_t *if_flow_lkup;
  463. unsigned long flags;
  464. ASSERT(ifindex < DHD_MAX_IFS);
  465. if (ifindex >= DHD_MAX_IFS)
  466. return FLOWID_INVALID;
  467. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  468. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  469. ASSERT(if_flow_lkup);
  470. if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
  471. #ifdef WLTDLS
  472. if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
  473. is_tdls_destination(dhdp, da)) {
  474. hash = DHD_FLOWRING_HASHINDEX(da, prio);
  475. cur = if_flow_lkup[ifindex].fl_hash[hash];
  476. while (cur != NULL) {
  477. if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
  478. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  479. return cur->flowid;
  480. }
  481. cur = cur->next;
  482. }
  483. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  484. return FLOWID_INVALID;
  485. }
  486. #endif /* WLTDLS */
  487. /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
  488. cur = if_flow_lkup[ifindex].fl_hash[prio];
  489. if (cur) {
  490. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  491. return cur->flowid;
  492. }
  493. } else {
  494. if (ETHER_ISMULTI(da)) {
  495. ismcast = TRUE;
  496. hash = 0;
  497. } else {
  498. hash = DHD_FLOWRING_HASHINDEX(da, prio);
  499. }
  500. cur = if_flow_lkup[ifindex].fl_hash[hash];
  501. while (cur) {
  502. if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
  503. (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
  504. (cur->flow_info.tid == prio))) {
  505. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  506. return cur->flowid;
  507. }
  508. cur = cur->next;
  509. }
  510. }
  511. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  512. DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
  513. return FLOWID_INVALID;
  514. } /* dhd_flowid_find */
  515. /** Create unique Flow ID, called when a flow ring is created. */
  516. static INLINE uint16
  517. dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
  518. {
  519. flow_hash_info_t *fl_hash_node, *cur;
  520. if_flow_lkup_t *if_flow_lkup;
  521. int hash;
  522. uint16 flowid;
  523. unsigned long flags;
  524. fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
  525. if (fl_hash_node == NULL) {
  526. DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
  527. return FLOWID_INVALID;
  528. }
  529. memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
  530. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  531. ASSERT(dhdp->flowid_allocator != NULL);
  532. flowid = id16_map_alloc(dhdp->flowid_allocator);
  533. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  534. if (flowid == FLOWID_INVALID) {
  535. MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
  536. DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
  537. return FLOWID_INVALID;
  538. }
  539. fl_hash_node->flowid = flowid;
  540. fl_hash_node->flow_info.tid = prio;
  541. fl_hash_node->flow_info.ifindex = ifindex;
  542. fl_hash_node->next = NULL;
  543. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  544. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  545. if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
  546. /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
  547. #ifdef WLTDLS
  548. if (dhdp->peer_tbl.tdls_peer_count &&
  549. (is_tdls_destination(dhdp, da))) {
  550. hash = DHD_FLOWRING_HASHINDEX(da, prio);
  551. cur = if_flow_lkup[ifindex].fl_hash[hash];
  552. if (cur) {
  553. while (cur->next) {
  554. cur = cur->next;
  555. }
  556. cur->next = fl_hash_node;
  557. } else {
  558. if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
  559. }
  560. } else
  561. #endif /* WLTDLS */
  562. if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
  563. } else {
  564. /* For bcast/mcast assign first slot in in interface */
  565. hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
  566. cur = if_flow_lkup[ifindex].fl_hash[hash];
  567. if (cur) {
  568. while (cur->next) {
  569. cur = cur->next;
  570. }
  571. cur->next = fl_hash_node;
  572. } else
  573. if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
  574. }
  575. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  576. DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
  577. if (fl_hash_node->flowid >= dhdp->num_flow_rings) {
  578. DHD_ERROR(("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n",
  579. __FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings,
  580. ifindex, prio, if_flow_lkup[ifindex].role));
  581. dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
  582. dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
  583. return FLOWID_INVALID;
  584. }
  585. return fl_hash_node->flowid;
  586. } /* dhd_flowid_alloc */
  587. /** Get flow ring ID, if not present try to create one */
  588. static INLINE int
  589. dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
  590. uint8 prio, char *sa, char *da, uint16 *flowid)
  591. {
  592. uint16 id;
  593. flow_ring_node_t *flow_ring_node;
  594. flow_ring_table_t *flow_ring_table;
  595. unsigned long flags;
  596. int ret;
  597. DHD_TRACE(("%s\n", __FUNCTION__));
  598. if (!dhdp->flow_ring_table) {
  599. return BCME_ERROR;
  600. }
  601. ASSERT(ifindex < DHD_MAX_IFS);
  602. if (ifindex >= DHD_MAX_IFS)
  603. return BCME_BADARG;
  604. flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
  605. id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
  606. if (id == FLOWID_INVALID) {
  607. bool if_role_multi_client;
  608. if_flow_lkup_t *if_flow_lkup;
  609. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  610. if (!if_flow_lkup[ifindex].status)
  611. return BCME_ERROR;
  612. /* check role for multi client case */
  613. if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
  614. /* Abort Flowring creation if multi client flowrings crossed the threshold */
  615. #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
  616. if (if_role_multi_client &&
  617. (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
  618. DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
  619. __FUNCTION__, dhdp->multi_client_flow_rings,
  620. dhdp->max_multi_client_flow_rings));
  621. return BCME_ERROR;
  622. }
  623. #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
  624. /* Do not create Flowring if peer is not associated */
  625. #if defined(PCIE_FULL_DONGLE)
  626. if (if_role_multi_client && !ETHER_ISMULTI(da) &&
  627. !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
  628. DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
  629. return BCME_ERROR;
  630. }
  631. #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
  632. id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
  633. if (id == FLOWID_INVALID) {
  634. DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
  635. __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
  636. return BCME_ERROR;
  637. }
  638. ASSERT(id < dhdp->num_flow_rings);
  639. /* Only after flowid alloc, increment multi_client_flow_rings */
  640. if (if_role_multi_client) {
  641. dhdp->multi_client_flow_rings++;
  642. }
  643. /* register this flowid in dhd_pub */
  644. dhd_add_flowid(dhdp, ifindex, prio, da, id);
  645. flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
  646. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  647. /* Init Flow info */
  648. memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
  649. memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
  650. flow_ring_node->flow_info.tid = prio;
  651. flow_ring_node->flow_info.ifindex = ifindex;
  652. flow_ring_node->active = TRUE;
  653. flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
  654. #ifdef TX_STATUS_LATENCY_STATS
  655. flow_ring_node->flow_info.num_tx_status = 0;
  656. flow_ring_node->flow_info.cum_tx_status_latency = 0;
  657. flow_ring_node->flow_info.num_tx_pkts = 0;
  658. #endif /* TX_STATUS_LATENCY_STATS */
  659. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  660. /* Create and inform device about the new flow */
  661. if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
  662. != BCME_OK) {
  663. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  664. flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
  665. flow_ring_node->active = FALSE;
  666. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  667. DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
  668. return BCME_ERROR;
  669. }
  670. *flowid = id;
  671. return BCME_OK;
  672. } else {
  673. /* if the Flow id was found in the hash */
  674. if (id >= dhdp->num_flow_rings) {
  675. DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
  676. __FUNCTION__, id, dhdp->num_flow_rings));
  677. *flowid = FLOWID_INVALID;
  678. ASSERT(0);
  679. return BCME_ERROR;
  680. }
  681. flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
  682. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  683. /*
  684. * If the flow_ring_node is in Open State or Status pending state then
  685. * we can return the Flow id to the caller.If the flow_ring_node is in
  686. * FLOW_RING_STATUS_PENDING this means the creation is in progress and
  687. * hence the packets should be queued.
  688. *
  689. * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
  690. * FLOW_RING_STATUS_CLOSED, then we should return Error.
  691. * Note that if the flowing is being deleted we would mark it as
  692. * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
  693. * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
  694. * We should drop the packets in that case.
  695. * The decission to return OK should NOT be based on 'active' variable, beause
  696. * active is made TRUE when a flow_ring_node gets allocated and is made
  697. * FALSE when the flow ring gets removed and does not reflect the True state
  698. * of the Flow ring.
  699. * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
  700. * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
  701. * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
  702. * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
  703. * FLOW_RING_STATUS_CREATE_PENDING.
  704. */
  705. if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
  706. flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
  707. *flowid = FLOWID_INVALID;
  708. ret = BCME_ERROR;
  709. } else {
  710. *flowid = id;
  711. ret = BCME_OK;
  712. }
  713. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  714. return ret;
  715. } /* Flow Id found in the hash */
  716. } /* dhd_flowid_lookup */
  717. int
  718. dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
  719. {
  720. int hashidx = 0;
  721. bool found = FALSE;
  722. flow_hash_info_t *cur;
  723. if_flow_lkup_t *if_flow_lkup;
  724. unsigned long flags;
  725. if (!dhdp->flow_ring_table) {
  726. DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
  727. return BCME_ERROR;
  728. }
  729. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  730. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  731. for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
  732. cur = if_flow_lkup[ifindex].fl_hash[hashidx];
  733. if (cur) {
  734. if (cur->flowid == flowid) {
  735. found = TRUE;
  736. }
  737. while (!found && cur) {
  738. if (cur->flowid == flowid) {
  739. found = TRUE;
  740. break;
  741. }
  742. cur = cur->next;
  743. }
  744. if (found) {
  745. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  746. return BCME_OK;
  747. }
  748. }
  749. }
  750. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  751. return BCME_ERROR;
  752. }
  753. int
  754. dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
  755. uint8 prio, char *sa, char *da, uint16 *flowid)
  756. {
  757. return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
  758. }
  759. /**
  760. * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
  761. * select the flowring to send the packet to the dongle.
  762. */
  763. int BCMFASTPATH
  764. dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
  765. {
  766. uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
  767. struct ether_header *eh = (struct ether_header *)pktdata;
  768. uint16 flowid = 0;
  769. ASSERT(ifindex < DHD_MAX_IFS);
  770. if (ifindex >= DHD_MAX_IFS) {
  771. return BCME_BADARG;
  772. }
  773. if (!dhdp->flowid_allocator) {
  774. DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
  775. return BCME_ERROR;
  776. }
  777. if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
  778. &flowid) != BCME_OK) {
  779. return BCME_ERROR;
  780. }
  781. DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
  782. /* Tag the packet with flowid */
  783. DHD_PKT_SET_FLOWID(pktbuf, flowid);
  784. return BCME_OK;
  785. }
  786. void
  787. dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
  788. {
  789. int hashix;
  790. bool found = FALSE;
  791. flow_hash_info_t *cur, *prev;
  792. if_flow_lkup_t *if_flow_lkup;
  793. unsigned long flags;
  794. bool if_role_multi_client;
  795. ASSERT(ifindex < DHD_MAX_IFS);
  796. if (ifindex >= DHD_MAX_IFS)
  797. return;
  798. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  799. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  800. if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
  801. for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
  802. cur = if_flow_lkup[ifindex].fl_hash[hashix];
  803. if (cur) {
  804. if (cur->flowid == flowid) {
  805. found = TRUE;
  806. }
  807. prev = NULL;
  808. while (!found && cur) {
  809. if (cur->flowid == flowid) {
  810. found = TRUE;
  811. break;
  812. }
  813. prev = cur;
  814. cur = cur->next;
  815. }
  816. if (found) {
  817. if (!prev) {
  818. if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
  819. } else {
  820. prev->next = cur->next;
  821. }
  822. /* Decrement multi_client_flow_rings */
  823. if (if_role_multi_client) {
  824. dhdp->multi_client_flow_rings--;
  825. }
  826. /* deregister flowid from dhd_pub. */
  827. dhd_del_flowid(dhdp, ifindex, flowid);
  828. id16_map_free(dhdp->flowid_allocator, flowid);
  829. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  830. MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
  831. return;
  832. }
  833. }
  834. }
  835. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  836. DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
  837. __FUNCTION__, flowid));
  838. } /* dhd_flowid_free */
  839. /**
  840. * Delete all Flow rings associated with the given interface. Is called when eg the dongle
  841. * indicates that a wireless link has gone down.
  842. */
  843. void
  844. dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
  845. {
  846. uint32 id;
  847. flow_ring_table_t *flow_ring_table;
  848. DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
  849. ASSERT(ifindex < DHD_MAX_IFS);
  850. if (ifindex >= DHD_MAX_IFS)
  851. return;
  852. if (!dhdp->flow_ring_table)
  853. return;
  854. flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
  855. for (id = 0; id < dhdp->num_flow_rings; id++) {
  856. if (flow_ring_table[id].active &&
  857. (flow_ring_table[id].flow_info.ifindex == ifindex) &&
  858. (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
  859. dhd_bus_flow_ring_delete_request(dhdp->bus,
  860. (void *) &flow_ring_table[id]);
  861. }
  862. }
  863. }
  864. void
  865. dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
  866. {
  867. uint32 id;
  868. flow_ring_table_t *flow_ring_table;
  869. DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
  870. ASSERT(ifindex < DHD_MAX_IFS);
  871. if (ifindex >= DHD_MAX_IFS)
  872. return;
  873. if (!dhdp->flow_ring_table)
  874. return;
  875. flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
  876. for (id = 0; id < dhdp->num_flow_rings; id++) {
  877. if (flow_ring_table[id].active &&
  878. (flow_ring_table[id].flow_info.ifindex == ifindex) &&
  879. (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
  880. dhd_bus_flow_ring_flush_request(dhdp->bus,
  881. (void *) &flow_ring_table[id]);
  882. }
  883. }
  884. }
  885. /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
  886. void
  887. dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
  888. {
  889. uint32 id;
  890. flow_ring_table_t *flow_ring_table;
  891. DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
  892. ASSERT(ifindex < DHD_MAX_IFS);
  893. if (ifindex >= DHD_MAX_IFS)
  894. return;
  895. if (!dhdp->flow_ring_table)
  896. return;
  897. flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
  898. for (id = 0; id < dhdp->num_flow_rings; id++) {
  899. /*
  900. * Send flowring delete request even if flowring status is
  901. * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
  902. * event comes ahead of flowring create response.
  903. * Otherwise the flowring will not be deleted later as there will not be any
  904. * DISASSOC_IND event. With this change, when create response event comes to DHD,
  905. * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
  906. * event will come, upon which DHD will delete the flowring.
  907. */
  908. if (flow_ring_table[id].active &&
  909. (flow_ring_table[id].flow_info.ifindex == ifindex) &&
  910. (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
  911. ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
  912. (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
  913. DHD_ERROR(("%s: deleting flowid %d\n",
  914. __FUNCTION__, flow_ring_table[id].flowid));
  915. dhd_bus_flow_ring_delete_request(dhdp->bus,
  916. (void *) &flow_ring_table[id]);
  917. }
  918. }
  919. }
  920. /** Handles interface ADD, CHANGE, DEL indications from the dongle */
  921. void
  922. dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
  923. uint8 op, uint8 role)
  924. {
  925. if_flow_lkup_t *if_flow_lkup;
  926. unsigned long flags;
  927. ASSERT(ifindex < DHD_MAX_IFS);
  928. if (ifindex >= DHD_MAX_IFS)
  929. return;
  930. DHD_INFO(("%s: ifindex %u op %u role is %u \n",
  931. __FUNCTION__, ifindex, op, role));
  932. if (!dhdp->flowid_allocator) {
  933. DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
  934. return;
  935. }
  936. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  937. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  938. if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
  939. if_flow_lkup[ifindex].role = role;
  940. if (role == WLC_E_IF_ROLE_WDS) {
  941. /**
  942. * WDS role does not send WLC_E_LINK event after interface is up.
  943. * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
  944. * same is true while making the status as FALSE.
  945. * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
  946. * interfaces are handled uniformly.
  947. */
  948. if_flow_lkup[ifindex].status = TRUE;
  949. DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
  950. __FUNCTION__, ifindex, role));
  951. }
  952. } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
  953. if_flow_lkup[ifindex].status = FALSE;
  954. DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
  955. __FUNCTION__, ifindex, role));
  956. }
  957. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  958. }
  959. /** Handles a STA 'link' indication from the dongle */
  960. int
  961. dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
  962. {
  963. if_flow_lkup_t *if_flow_lkup;
  964. unsigned long flags;
  965. ASSERT(ifindex < DHD_MAX_IFS);
  966. if (ifindex >= DHD_MAX_IFS)
  967. return BCME_BADARG;
  968. DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
  969. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  970. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  971. if (status) {
  972. if_flow_lkup[ifindex].status = TRUE;
  973. } else {
  974. if_flow_lkup[ifindex].status = FALSE;
  975. }
  976. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  977. return BCME_OK;
  978. }
  979. /** Update flow priority mapping, called on IOVAR */
  980. int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
  981. {
  982. uint16 flowid;
  983. flow_ring_node_t *flow_ring_node;
  984. if (map > DHD_FLOW_PRIO_LLR_MAP)
  985. return BCME_BADOPTION;
  986. /* Check if we need to change prio map */
  987. if (map == dhdp->flow_prio_map_type)
  988. return BCME_OK;
  989. /* If any ring is active we cannot change priority mapping for flow rings */
  990. for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
  991. flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
  992. if (flow_ring_node->active)
  993. return BCME_EPERM;
  994. }
  995. /* Inform firmware about new mapping type */
  996. if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
  997. return BCME_ERROR;
  998. /* update internal structures */
  999. dhdp->flow_prio_map_type = map;
  1000. if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
  1001. bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
  1002. else
  1003. bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
  1004. dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
  1005. return BCME_OK;
  1006. }
  1007. /** Inform firmware on updated flow priority mapping, called on IOVAR */
  1008. int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
  1009. {
  1010. uint8 iovbuf[24];
  1011. int len;
  1012. if (!set) {
  1013. memset(&iovbuf, 0, sizeof(iovbuf));
  1014. len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
  1015. if (len == 0) {
  1016. return BCME_BUFTOOSHORT;
  1017. }
  1018. if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
  1019. DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
  1020. return BCME_ERROR;
  1021. }
  1022. *map = iovbuf[0];
  1023. return BCME_OK;
  1024. }
  1025. len = bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
  1026. if (len == 0) {
  1027. return BCME_BUFTOOSHORT;
  1028. }
  1029. if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
  1030. DHD_ERROR(("%s: failed to set fl_prio_map \n",
  1031. __FUNCTION__));
  1032. return BCME_ERROR;
  1033. }
  1034. return BCME_OK;
  1035. }