hnd_pktpool.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. /*
  2. * HND generic packet pool operation primitives
  3. *
  4. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  5. *
  6. * Copyright (C) 1999-2020, Broadcom Corporation
  7. *
  8. * Unless you and Broadcom execute a separate written software license
  9. * agreement governing use of this software, this software is licensed to you
  10. * under the terms of the GNU General Public License version 2 (the "GPL"),
  11. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  12. * following added to such license:
  13. *
  14. * As a special exception, the copyright holders of this software give you
  15. * permission to link this software with independent modules, and to copy and
  16. * distribute the resulting executable under terms of your choice, provided that
  17. * you also meet, for each linked independent module, the terms and conditions of
  18. * the license of that module. An independent module is a module which is not
  19. * derived from this software. The special exception does not apply to any
  20. * modifications of the software.
  21. *
  22. * Notwithstanding the above, under no circumstances may you combine this
  23. * software in any way with any other Broadcom software provided under a license
  24. * other than the GPL, without Broadcom's express prior written consent.
  25. *
  26. *
  27. * <<Broadcom-WL-IPTag/Open:>>
  28. *
  29. * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $
  30. */
  31. #include <typedefs.h>
  32. #include <osl.h>
  33. #include <osl_ext.h>
  34. #include <bcmutils.h>
  35. #include <hnd_pktpool.h>
  36. #ifdef BCMRESVFRAGPOOL
  37. #include <hnd_resvpool.h>
  38. #endif /* BCMRESVFRAGPOOL */
  39. #ifdef BCMFRWDPOOLREORG
  40. #include <hnd_poolreorg.h>
  41. #endif /* BCMFRWDPOOLREORG */
  42. /* mutex macros for thread safe */
  43. #ifdef HND_PKTPOOL_THREAD_SAFE
  44. #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
  45. #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
  46. #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
  47. #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
  48. #else
  49. #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
  50. #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
  51. #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
  52. #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
  53. #endif // endif
  54. /* Registry size is one larger than max pools, as slot #0 is reserved */
  55. #define PKTPOOLREG_RSVD_ID (0U)
  56. #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
  57. #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
  58. #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
  59. #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
  60. /* Tag a registry entry as free for use */
  61. #define PKTPOOL_REGISTRY_CLR(id) \
  62. PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
  63. #define PKTPOOL_REGISTRY_ISCLR(id) \
  64. (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
  65. /* Tag registry entry 0 as reserved */
  66. #define PKTPOOL_REGISTRY_RSV() \
  67. PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
  68. #define PKTPOOL_REGISTRY_ISRSVD() \
  69. (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
  70. /* Walk all un-reserved entries in registry */
  71. #define PKTPOOL_REGISTRY_FOREACH(id) \
  72. for ((id) = 1U; (id) <= pktpools_max; (id)++)
  73. enum pktpool_empty_cb_state {
  74. EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
  75. EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
  76. EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
  77. };
  78. uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
  79. pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
  80. /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
  81. static int pktpool_register(pktpool_t * poolptr);
  82. static int pktpool_deregister(pktpool_t * poolptr);
  83. /** add declaration */
  84. static void pktpool_avail_notify(pktpool_t *pktp);
  85. /** accessor functions required when ROMming this file, forced into RAM */
  86. pktpool_t *
  87. BCMRAMFN(get_pktpools_registry)(int id)
  88. {
  89. return pktpools_registry[id];
  90. }
  91. static void
  92. BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
  93. {
  94. pktpools_registry[id] = pp;
  95. }
  96. static bool
  97. BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
  98. {
  99. return pktpools_registry[id] == pp;
  100. }
  101. /** Constructs a pool registry to serve a maximum of total_pools */
  102. int
  103. pktpool_attach(osl_t *osh, uint32 total_pools)
  104. {
  105. uint32 poolid;
  106. BCM_REFERENCE(osh);
  107. if (pktpools_max != 0U) {
  108. return BCME_ERROR;
  109. }
  110. ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
  111. /* Initialize registry: reserve slot#0 and tag others as free */
  112. PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
  113. PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
  114. PKTPOOL_REGISTRY_CLR(poolid);
  115. }
  116. pktpools_max = total_pools;
  117. return (int)pktpools_max;
  118. }
  119. /** Destructs the pool registry. Ascertain all pools were first de-inited */
  120. int
  121. pktpool_dettach(osl_t *osh)
  122. {
  123. uint32 poolid;
  124. BCM_REFERENCE(osh);
  125. if (pktpools_max == 0U) {
  126. return BCME_OK;
  127. }
  128. /* Ascertain that no pools are still registered */
  129. ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
  130. PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
  131. ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
  132. }
  133. pktpools_max = 0U; /* restore boot state */
  134. return BCME_OK;
  135. }
  136. /** Registers a pool in a free slot; returns the registry slot index */
  137. static int
  138. pktpool_register(pktpool_t * poolptr)
  139. {
  140. uint32 poolid;
  141. if (pktpools_max == 0U) {
  142. return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
  143. }
  144. ASSERT(pktpools_max != 0U);
  145. /* find an empty slot in pktpools_registry */
  146. PKTPOOL_REGISTRY_FOREACH(poolid) {
  147. if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
  148. PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
  149. return (int)poolid; /* return pool ID */
  150. }
  151. } /* FOREACH */
  152. return PKTPOOL_INVALID_ID; /* error: registry is full */
  153. }
  154. /** Deregisters a pktpool, given the pool pointer; tag slot as free */
  155. static int
  156. pktpool_deregister(pktpool_t * poolptr)
  157. {
  158. uint32 poolid;
  159. ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
  160. poolid = POOLID(poolptr);
  161. ASSERT(poolid <= pktpools_max);
  162. /* Asertain that a previously registered poolptr is being de-registered */
  163. if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
  164. PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
  165. } else {
  166. ASSERT(0);
  167. return BCME_ERROR; /* mismatch in registry */
  168. }
  169. return BCME_OK;
  170. }
  171. /**
  172. * pktpool_init:
  173. * User provides a pktpool_t structure and specifies the number of packets to
  174. * be pre-filled into the pool (n_pkts).
  175. * pktpool_init first attempts to register the pool and fetch a unique poolid.
  176. * If registration fails, it is considered an BCME_ERR, caused by either the
  177. * registry was not pre-created (pktpool_attach) or the registry is full.
  178. * If registration succeeds, then the requested number of packets will be filled
  179. * into the pool as part of initialization. In the event that there is no
  180. * available memory to service the request, then BCME_NOMEM will be returned
  181. * along with the count of how many packets were successfully allocated.
  182. * In dongle builds, prior to memory reclaimation, one should limit the number
  183. * of packets to be allocated during pktpool_init and fill the pool up after
  184. * reclaim stage.
  185. *
  186. * @param n_pkts Number of packets to be pre-filled into the pool
  187. * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
  188. * @param type e.g. 'lbuf_frag'
  189. */
  190. int
  191. pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx,
  192. uint8 type)
  193. {
  194. int i, err = BCME_OK;
  195. int pktplen;
  196. uint8 pktp_id;
  197. ASSERT(pktp != NULL);
  198. ASSERT(osh != NULL);
  199. ASSERT(n_pkts != NULL);
  200. pktplen = *n_pkts;
  201. bzero(pktp, sizeof(pktpool_t));
  202. /* assign a unique pktpool id */
  203. if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
  204. return BCME_ERROR;
  205. }
  206. POOLSETID(pktp, pktp_id);
  207. pktp->inited = TRUE;
  208. pktp->istx = istx ? TRUE : FALSE;
  209. pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
  210. pktp->type = type;
  211. if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
  212. return BCME_ERROR;
  213. }
  214. pktp->maxlen = PKTPOOL_LEN_MAX;
  215. pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
  216. for (i = 0; i < pktplen; i++) {
  217. void *p;
  218. p = PKTGET(osh, max_pkt_bytes, TRUE);
  219. if (p == NULL) {
  220. /* Not able to allocate all requested pkts
  221. * so just return what was actually allocated
  222. * We can add to the pool later
  223. */
  224. if (pktp->freelist == NULL) /* pktpool free list is empty */
  225. err = BCME_NOMEM;
  226. goto exit;
  227. }
  228. PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
  229. PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
  230. pktp->freelist = p;
  231. pktp->avail++;
  232. #ifdef BCMDBG_POOL
  233. pktp->dbg_q[pktp->dbg_qlen++].p = p;
  234. #endif // endif
  235. }
  236. exit:
  237. pktp->n_pkts = pktp->avail;
  238. *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
  239. return err;
  240. } /* pktpool_init */
  241. /**
  242. * pktpool_deinit:
  243. * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
  244. * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
  245. * An assert is in place to ensure that there are no packets still lingering
  246. * around. Packets freed to a pool after the deinit will cause a memory
  247. * corruption as the pktpool_t structure no longer exists.
  248. */
  249. int
  250. pktpool_deinit(osl_t *osh, pktpool_t *pktp)
  251. {
  252. uint16 freed = 0;
  253. ASSERT(osh != NULL);
  254. ASSERT(pktp != NULL);
  255. #ifdef BCMDBG_POOL
  256. {
  257. int i;
  258. for (i = 0; i <= pktp->n_pkts; i++) {
  259. pktp->dbg_q[i].p = NULL;
  260. }
  261. }
  262. #endif // endif
  263. while (pktp->freelist != NULL) {
  264. void * p = pktp->freelist;
  265. pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
  266. PKTSETFREELIST(p, NULL);
  267. PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
  268. PKTFREE(osh, p, pktp->istx); /* free the packet */
  269. freed++;
  270. ASSERT(freed <= pktp->n_pkts);
  271. }
  272. pktp->avail -= freed;
  273. ASSERT(pktp->avail == 0);
  274. pktp->n_pkts -= freed;
  275. pktpool_deregister(pktp); /* release previously acquired unique pool id */
  276. POOLSETID(pktp, PKTPOOL_INVALID_ID);
  277. if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
  278. return BCME_ERROR;
  279. pktp->inited = FALSE;
  280. /* Are there still pending pkts? */
  281. ASSERT(pktp->n_pkts == 0);
  282. return 0;
  283. }
  284. int
  285. pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
  286. {
  287. void *p;
  288. int err = 0;
  289. int n_pkts, psize, maxlen;
  290. /* protect shared resource */
  291. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  292. return BCME_ERROR;
  293. ASSERT(pktp->max_pkt_bytes != 0);
  294. maxlen = pktp->maxlen;
  295. psize = minimal ? (maxlen >> 2) : maxlen;
  296. for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) {
  297. p = PKTGET(osh, pktp->n_pkts, TRUE);
  298. if (p == NULL) {
  299. err = BCME_NOMEM;
  300. break;
  301. }
  302. if (pktpool_add(pktp, p) != BCME_OK) {
  303. PKTFREE(osh, p, FALSE);
  304. err = BCME_ERROR;
  305. break;
  306. }
  307. }
  308. /* protect shared resource */
  309. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  310. return BCME_ERROR;
  311. if (pktp->cbcnt) {
  312. if (pktp->empty == FALSE)
  313. pktpool_avail_notify(pktp);
  314. }
  315. return err;
  316. }
  317. #ifdef BCMPOOLRECLAIM
  318. /* New API to decrease the pkts from pool, but not deinit
  319. */
  320. uint16
  321. pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt)
  322. {
  323. uint16 freed = 0;
  324. pktpool_cb_extn_t cb = NULL;
  325. void *arg = NULL;
  326. ASSERT(osh != NULL);
  327. ASSERT(pktp != NULL);
  328. /* protect shared resource */
  329. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
  330. return freed;
  331. }
  332. if (pktp->avail < free_cnt) {
  333. free_cnt = pktp->avail;
  334. }
  335. if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
  336. /* If pool is shared rx frag pool, use call back fn to reclaim host address
  337. * and Rx cpl ID associated with the pkt.
  338. */
  339. ASSERT(pktp->cbext.cb != NULL);
  340. cb = pktp->cbext.cb;
  341. arg = pktp->cbext.arg;
  342. } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
  343. /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
  344. * associated with the pkt.
  345. */
  346. cb = pktp->rxcplidfn.cb;
  347. arg = pktp->rxcplidfn.arg;
  348. }
  349. while ((pktp->freelist != NULL) && (free_cnt)) {
  350. void * p = pktp->freelist;
  351. pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
  352. PKTSETFREELIST(p, NULL);
  353. if (cb != NULL) {
  354. if (cb(pktp, arg, p, REMOVE_RXCPLID)) {
  355. PKTSETFREELIST(p, pktp->freelist);
  356. pktp->freelist = p;
  357. break;
  358. }
  359. }
  360. PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
  361. PKTFREE(osh, p, pktp->istx); /* free the packet */
  362. freed++;
  363. free_cnt--;
  364. }
  365. pktp->avail -= freed;
  366. pktp->n_pkts -= freed;
  367. /* protect shared resource */
  368. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
  369. return freed;
  370. }
  371. return freed;
  372. }
  373. #endif /* #ifdef BCMPOOLRECLAIM */
  374. /* New API to empty the pkts from pool, but not deinit
  375. * NOTE: caller is responsible to ensure,
  376. * all pkts are available in pool for free; else LEAK !
  377. */
  378. int
  379. pktpool_empty(osl_t *osh, pktpool_t *pktp)
  380. {
  381. uint16 freed = 0;
  382. ASSERT(osh != NULL);
  383. ASSERT(pktp != NULL);
  384. /* protect shared resource */
  385. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  386. return BCME_ERROR;
  387. #ifdef BCMDBG_POOL
  388. {
  389. int i;
  390. for (i = 0; i <= pktp->n_pkts; i++) {
  391. pktp->dbg_q[i].p = NULL;
  392. }
  393. }
  394. #endif // endif
  395. while (pktp->freelist != NULL) {
  396. void * p = pktp->freelist;
  397. pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
  398. PKTSETFREELIST(p, NULL);
  399. PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
  400. PKTFREE(osh, p, pktp->istx); /* free the packet */
  401. freed++;
  402. ASSERT(freed <= pktp->n_pkts);
  403. }
  404. pktp->avail -= freed;
  405. ASSERT(pktp->avail == 0);
  406. pktp->n_pkts -= freed;
  407. ASSERT(pktp->n_pkts == 0);
  408. /* protect shared resource */
  409. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  410. return BCME_ERROR;
  411. return 0;
  412. }
  413. static void *
  414. pktpool_deq(pktpool_t *pktp)
  415. {
  416. void *p = NULL;
  417. if (pktp->avail == 0)
  418. return NULL;
  419. ASSERT(pktp->freelist != NULL);
  420. p = pktp->freelist; /* dequeue packet from head of pktpool free list */
  421. pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
  422. PKTSETFREELIST(p, NULL);
  423. pktp->avail--;
  424. return p;
  425. }
  426. static void
  427. pktpool_enq(pktpool_t *pktp, void *p)
  428. {
  429. ASSERT(p != NULL);
  430. PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
  431. pktp->freelist = p; /* free list points to newly inserted packet */
  432. pktp->avail++;
  433. ASSERT(pktp->avail <= pktp->n_pkts);
  434. }
  435. /** utility for registering host addr fill function called from pciedev */
  436. int
  437. /* BCMATTACHFN */
  438. (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
  439. {
  440. ASSERT(cb != NULL);
  441. ASSERT(pktp->cbext.cb == NULL);
  442. pktp->cbext.cb = cb;
  443. pktp->cbext.arg = arg;
  444. return 0;
  445. }
  446. int
  447. pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
  448. {
  449. ASSERT(cb != NULL);
  450. if (pktp == NULL)
  451. return BCME_ERROR;
  452. ASSERT(pktp->rxcplidfn.cb == NULL);
  453. pktp->rxcplidfn.cb = cb;
  454. pktp->rxcplidfn.arg = arg;
  455. return 0;
  456. }
  457. /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
  458. void
  459. pktpool_invoke_dmarxfill(pktpool_t *pktp)
  460. {
  461. ASSERT(pktp->dmarxfill.cb);
  462. ASSERT(pktp->dmarxfill.arg);
  463. if (pktp->dmarxfill.cb)
  464. pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
  465. }
  466. /** Registers callback functions for split rx mode */
  467. int
  468. pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
  469. {
  470. ASSERT(cb != NULL);
  471. pktp->dmarxfill.cb = cb;
  472. pktp->dmarxfill.arg = arg;
  473. return 0;
  474. }
  475. /**
  476. * Registers callback functions.
  477. * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
  478. */
  479. int
  480. pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
  481. {
  482. int err = 0;
  483. int i;
  484. /* protect shared resource */
  485. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  486. return BCME_ERROR;
  487. ASSERT(cb != NULL);
  488. for (i = 0; i < pktp->cbcnt; i++) {
  489. ASSERT(pktp->cbs[i].cb != NULL);
  490. if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
  491. pktp->cbs[i].refcnt++;
  492. goto done;
  493. }
  494. }
  495. i = pktp->cbcnt;
  496. if (i == PKTPOOL_CB_MAX_AVL) {
  497. err = BCME_ERROR;
  498. goto done;
  499. }
  500. ASSERT(pktp->cbs[i].cb == NULL);
  501. pktp->cbs[i].cb = cb;
  502. pktp->cbs[i].arg = arg;
  503. pktp->cbs[i].refcnt++;
  504. pktp->cbcnt++;
  505. done:
  506. /* protect shared resource */
  507. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  508. return BCME_ERROR;
  509. return err;
  510. }
  511. /* No BCMATTACHFN as it is used in a non-attach function */
  512. int
  513. pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
  514. {
  515. int err = 0;
  516. int i, k;
  517. /* protect shared resource */
  518. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
  519. return BCME_ERROR;
  520. }
  521. ASSERT(cb != NULL);
  522. for (i = 0; i < pktp->cbcnt; i++) {
  523. ASSERT(pktp->cbs[i].cb != NULL);
  524. if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
  525. pktp->cbs[i].refcnt--;
  526. if (pktp->cbs[i].refcnt) {
  527. /* Still there are references to this callback */
  528. goto done;
  529. }
  530. /* Moving any more callbacks to fill the hole */
  531. for (k = i+1; k < pktp->cbcnt; i++, k++) {
  532. pktp->cbs[i].cb = pktp->cbs[k].cb;
  533. pktp->cbs[i].arg = pktp->cbs[k].arg;
  534. pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
  535. }
  536. /* reset the last callback */
  537. pktp->cbs[i].cb = NULL;
  538. pktp->cbs[i].arg = NULL;
  539. pktp->cbs[i].refcnt = 0;
  540. pktp->cbcnt--;
  541. goto done;
  542. }
  543. }
  544. done:
  545. /* protect shared resource */
  546. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
  547. return BCME_ERROR;
  548. }
  549. return err;
  550. }
  551. /** Registers callback functions */
  552. int
  553. pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
  554. {
  555. int err = 0;
  556. int i;
  557. /* protect shared resource */
  558. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  559. return BCME_ERROR;
  560. ASSERT(cb != NULL);
  561. i = pktp->ecbcnt;
  562. if (i == PKTPOOL_CB_MAX) {
  563. err = BCME_ERROR;
  564. goto done;
  565. }
  566. ASSERT(pktp->ecbs[i].cb == NULL);
  567. pktp->ecbs[i].cb = cb;
  568. pktp->ecbs[i].arg = arg;
  569. pktp->ecbcnt++;
  570. done:
  571. /* protect shared resource */
  572. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  573. return BCME_ERROR;
  574. return err;
  575. }
  576. /** Calls registered callback functions */
  577. static int
  578. pktpool_empty_notify(pktpool_t *pktp)
  579. {
  580. int i;
  581. pktp->empty = TRUE;
  582. for (i = 0; i < pktp->ecbcnt; i++) {
  583. ASSERT(pktp->ecbs[i].cb != NULL);
  584. pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
  585. }
  586. pktp->empty = FALSE;
  587. return 0;
  588. }
  589. #ifdef BCMDBG_POOL
  590. int
  591. pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
  592. {
  593. int err = 0;
  594. int i;
  595. /* protect shared resource */
  596. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  597. return BCME_ERROR;
  598. ASSERT(cb);
  599. i = pktp->dbg_cbcnt;
  600. if (i == PKTPOOL_CB_MAX) {
  601. err = BCME_ERROR;
  602. goto done;
  603. }
  604. ASSERT(pktp->dbg_cbs[i].cb == NULL);
  605. pktp->dbg_cbs[i].cb = cb;
  606. pktp->dbg_cbs[i].arg = arg;
  607. pktp->dbg_cbcnt++;
  608. done:
  609. /* protect shared resource */
  610. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  611. return BCME_ERROR;
  612. return err;
  613. }
  614. int pktpool_dbg_notify(pktpool_t *pktp);
  615. int
  616. pktpool_dbg_notify(pktpool_t *pktp)
  617. {
  618. int i;
  619. /* protect shared resource */
  620. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  621. return BCME_ERROR;
  622. for (i = 0; i < pktp->dbg_cbcnt; i++) {
  623. ASSERT(pktp->dbg_cbs[i].cb);
  624. pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
  625. }
  626. /* protect shared resource */
  627. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  628. return BCME_ERROR;
  629. return 0;
  630. }
  631. int
  632. pktpool_dbg_dump(pktpool_t *pktp)
  633. {
  634. int i;
  635. /* protect shared resource */
  636. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  637. return BCME_ERROR;
  638. printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
  639. for (i = 0; i < pktp->dbg_qlen; i++) {
  640. ASSERT(pktp->dbg_q[i].p);
  641. printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
  642. pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
  643. }
  644. /* protect shared resource */
  645. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  646. return BCME_ERROR;
  647. return 0;
  648. }
  649. int
  650. pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
  651. {
  652. int i;
  653. int state;
  654. /* protect shared resource */
  655. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  656. return BCME_ERROR;
  657. bzero(stats, sizeof(pktpool_stats_t));
  658. for (i = 0; i < pktp->dbg_qlen; i++) {
  659. ASSERT(pktp->dbg_q[i].p != NULL);
  660. state = PKTPOOLSTATE(pktp->dbg_q[i].p);
  661. switch (state) {
  662. case POOL_TXENQ:
  663. stats->enq++; break;
  664. case POOL_TXDH:
  665. stats->txdh++; break;
  666. case POOL_TXD11:
  667. stats->txd11++; break;
  668. case POOL_RXDH:
  669. stats->rxdh++; break;
  670. case POOL_RXD11:
  671. stats->rxd11++; break;
  672. case POOL_RXFILL:
  673. stats->rxfill++; break;
  674. case POOL_IDLE:
  675. stats->idle++; break;
  676. }
  677. }
  678. /* protect shared resource */
  679. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  680. return BCME_ERROR;
  681. return 0;
  682. }
  683. int
  684. pktpool_start_trigger(pktpool_t *pktp, void *p)
  685. {
  686. uint32 cycles, i;
  687. /* protect shared resource */
  688. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  689. return BCME_ERROR;
  690. if (!PKTPOOL(OSH_NULL, p))
  691. goto done;
  692. OSL_GETCYCLES(cycles);
  693. for (i = 0; i < pktp->dbg_qlen; i++) {
  694. ASSERT(pktp->dbg_q[i].p != NULL);
  695. if (pktp->dbg_q[i].p == p) {
  696. pktp->dbg_q[i].cycles = cycles;
  697. break;
  698. }
  699. }
  700. done:
  701. /* protect shared resource */
  702. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  703. return BCME_ERROR;
  704. return 0;
  705. }
  706. int pktpool_stop_trigger(pktpool_t *pktp, void *p);
  707. int
  708. pktpool_stop_trigger(pktpool_t *pktp, void *p)
  709. {
  710. uint32 cycles, i;
  711. /* protect shared resource */
  712. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  713. return BCME_ERROR;
  714. if (!PKTPOOL(OSH_NULL, p))
  715. goto done;
  716. OSL_GETCYCLES(cycles);
  717. for (i = 0; i < pktp->dbg_qlen; i++) {
  718. ASSERT(pktp->dbg_q[i].p != NULL);
  719. if (pktp->dbg_q[i].p == p) {
  720. if (pktp->dbg_q[i].cycles == 0)
  721. break;
  722. if (cycles >= pktp->dbg_q[i].cycles)
  723. pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
  724. else
  725. pktp->dbg_q[i].dur =
  726. (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
  727. pktp->dbg_q[i].cycles = 0;
  728. break;
  729. }
  730. }
  731. done:
  732. /* protect shared resource */
  733. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  734. return BCME_ERROR;
  735. return 0;
  736. }
  737. #endif /* BCMDBG_POOL */
  738. int
  739. pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
  740. {
  741. BCM_REFERENCE(osh);
  742. ASSERT(pktp);
  743. /* protect shared resource */
  744. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  745. return BCME_ERROR;
  746. pktp->availcb_excl = NULL;
  747. /* protect shared resource */
  748. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  749. return BCME_ERROR;
  750. return 0;
  751. }
  752. int
  753. pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
  754. {
  755. int i;
  756. int err;
  757. BCM_REFERENCE(osh);
  758. ASSERT(pktp);
  759. /* protect shared resource */
  760. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  761. return BCME_ERROR;
  762. ASSERT(pktp->availcb_excl == NULL);
  763. for (i = 0; i < pktp->cbcnt; i++) {
  764. if (cb == pktp->cbs[i].cb) {
  765. pktp->availcb_excl = &pktp->cbs[i];
  766. break;
  767. }
  768. }
  769. if (pktp->availcb_excl == NULL)
  770. err = BCME_ERROR;
  771. else
  772. err = 0;
  773. /* protect shared resource */
  774. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  775. return BCME_ERROR;
  776. return err;
  777. }
  778. static void
  779. pktpool_avail_notify(pktpool_t *pktp)
  780. {
  781. int i, k, idx;
  782. int avail;
  783. ASSERT(pktp);
  784. if (pktp->availcb_excl != NULL) {
  785. pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
  786. return;
  787. }
  788. k = pktp->cbcnt - 1;
  789. for (i = 0; i < pktp->cbcnt; i++) {
  790. avail = pktp->avail;
  791. if (avail) {
  792. if (pktp->cbtoggle)
  793. idx = i;
  794. else
  795. idx = k--;
  796. ASSERT(pktp->cbs[idx].cb != NULL);
  797. pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
  798. }
  799. }
  800. /* Alternate between filling from head or tail
  801. */
  802. pktp->cbtoggle ^= 1;
  803. return;
  804. }
  805. /** Gets an empty packet from the caller provided pool */
  806. void *
  807. pktpool_get(pktpool_t *pktp)
  808. {
  809. void *p;
  810. /* protect shared resource */
  811. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  812. return NULL;
  813. p = pktpool_deq(pktp);
  814. if (p == NULL) {
  815. /* Notify and try to reclaim tx pkts */
  816. if (pktp->ecbcnt)
  817. pktpool_empty_notify(pktp);
  818. p = pktpool_deq(pktp);
  819. if (p == NULL)
  820. goto done;
  821. }
  822. done:
  823. /* protect shared resource */
  824. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  825. return NULL;
  826. return p;
  827. }
  828. void
  829. pktpool_free(pktpool_t *pktp, void *p)
  830. {
  831. /* protect shared resource */
  832. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  833. return;
  834. ASSERT(p != NULL);
  835. #ifdef BCMDBG_POOL
  836. /* pktpool_stop_trigger(pktp, p); */
  837. #endif // endif
  838. pktpool_enq(pktp, p);
  839. /**
  840. * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
  841. * If any avail callback functions are registered, send a notification
  842. * that a new packet is available in the pool.
  843. */
  844. if (pktp->cbcnt) {
  845. /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
  846. * This allows to feed on burst basis as opposed to inefficient per-packet basis.
  847. */
  848. if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
  849. /**
  850. * If the call originated from pktpool_empty_notify, the just freed packet
  851. * is needed in pktpool_get.
  852. * Therefore don't call pktpool_avail_notify.
  853. */
  854. if (pktp->empty == FALSE)
  855. pktpool_avail_notify(pktp);
  856. } else {
  857. /**
  858. * The callback is temporarily disabled, log that a packet has been freed.
  859. */
  860. pktp->emptycb_disable = EMPTYCB_SKIPPED;
  861. }
  862. }
  863. /* protect shared resource */
  864. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  865. return;
  866. }
  867. /** Adds a caller provided (empty) packet to the caller provided pool */
  868. int
  869. pktpool_add(pktpool_t *pktp, void *p)
  870. {
  871. int err = 0;
  872. /* protect shared resource */
  873. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  874. return BCME_ERROR;
  875. ASSERT(p != NULL);
  876. if (pktp->n_pkts == pktp->maxlen) {
  877. err = BCME_RANGE;
  878. goto done;
  879. }
  880. /* pkts in pool have same length */
  881. ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
  882. PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
  883. pktp->n_pkts++;
  884. pktpool_enq(pktp, p);
  885. #ifdef BCMDBG_POOL
  886. pktp->dbg_q[pktp->dbg_qlen++].p = p;
  887. #endif // endif
  888. done:
  889. /* protect shared resource */
  890. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  891. return BCME_ERROR;
  892. return err;
  893. }
  894. /**
  895. * Force pktpool_setmaxlen () into RAM as it uses a constant
  896. * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
  897. */
  898. int
  899. BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
  900. {
  901. /* protect shared resource */
  902. if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
  903. return BCME_ERROR;
  904. if (maxlen > PKTPOOL_LEN_MAX)
  905. maxlen = PKTPOOL_LEN_MAX;
  906. /* if pool is already beyond maxlen, then just cap it
  907. * since we currently do not reduce the pool len
  908. * already allocated
  909. */
  910. pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
  911. /* protect shared resource */
  912. if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
  913. return BCME_ERROR;
  914. return pktp->maxlen;
  915. }
  916. void
  917. pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
  918. {
  919. ASSERT(pktp);
  920. /**
  921. * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
  922. * If callback is going to be re-enabled, check if any packet got
  923. * freed and added back to the pool while callback was disabled.
  924. * When this is the case do the callback now, provided that callback functions
  925. * are registered and this call did not originate from pktpool_empty_notify.
  926. */
  927. if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
  928. (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
  929. pktpool_avail_notify(pktp);
  930. }
  931. /* Enable or temporarily disable callback when packet becomes available. */
  932. pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
  933. }
  934. bool
  935. pktpool_emptycb_disabled(pktpool_t *pktp)
  936. {
  937. ASSERT(pktp);
  938. return pktp->emptycb_disable != EMPTYCB_ENABLED;
  939. }
  940. #ifdef BCMPKTPOOL
  941. #include <hnd_lbuf.h>
  942. pktpool_t *pktpool_shared = NULL;
  943. #ifdef BCMFRAGPOOL
  944. pktpool_t *pktpool_shared_lfrag = NULL;
  945. #ifdef BCMRESVFRAGPOOL
  946. pktpool_t *pktpool_resv_lfrag = NULL;
  947. struct resv_info *resv_pool_info = NULL;
  948. #endif /* BCMRESVFRAGPOOL */
  949. #endif /* BCMFRAGPOOL */
  950. pktpool_t *pktpool_shared_rxlfrag = NULL;
  951. static osl_t *pktpool_osh = NULL;
  952. /**
  953. * Initializes several packet pools and allocates packets within those pools.
  954. */
  955. int
  956. hnd_pktpool_init(osl_t *osh)
  957. {
  958. int err = BCME_OK;
  959. int n;
  960. /* Construct a packet pool registry before initializing packet pools */
  961. n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
  962. if (n != PKTPOOL_MAXIMUM_ID) {
  963. ASSERT(0);
  964. err = BCME_ERROR;
  965. goto error0;
  966. }
  967. pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
  968. if (pktpool_shared == NULL) {
  969. ASSERT(0);
  970. err = BCME_NOMEM;
  971. goto error1;
  972. }
  973. #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
  974. pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
  975. if (pktpool_shared_lfrag == NULL) {
  976. ASSERT(0);
  977. err = BCME_NOMEM;
  978. goto error2;
  979. }
  980. #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
  981. resv_pool_info = hnd_resv_pool_alloc(osh);
  982. if (resv_pool_info == NULL) {
  983. ASSERT(0);
  984. goto error2;
  985. }
  986. pktpool_resv_lfrag = resv_pool_info->pktp;
  987. if (pktpool_resv_lfrag == NULL) {
  988. ASSERT(0);
  989. goto error2;
  990. }
  991. #endif /* RESVFRAGPOOL */
  992. #endif /* FRAGPOOL */
  993. #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
  994. pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
  995. if (pktpool_shared_rxlfrag == NULL) {
  996. ASSERT(0);
  997. err = BCME_NOMEM;
  998. goto error3;
  999. }
  1000. #endif // endif
  1001. /*
  1002. * At this early stage, there's not enough memory to allocate all
  1003. * requested pkts in the shared pool. Need to add to the pool
  1004. * after reclaim
  1005. *
  1006. * n = NRXBUFPOST + SDPCMD_RXBUFS;
  1007. *
  1008. * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
  1009. * registry is not initialized or the registry is depleted.
  1010. *
  1011. * A BCME_NOMEM error only indicates that the requested number of packets
  1012. * were not filled into the pool.
  1013. */
  1014. n = 1;
  1015. MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
  1016. if ((err = pktpool_init(osh, pktpool_shared,
  1017. &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
  1018. ASSERT(0);
  1019. goto error4;
  1020. }
  1021. pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
  1022. #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
  1023. n = 1;
  1024. if ((err = pktpool_init(osh, pktpool_shared_lfrag,
  1025. &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) {
  1026. ASSERT(0);
  1027. goto error5;
  1028. }
  1029. pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
  1030. #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
  1031. n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
  1032. if (pktpool_init(osh, pktpool_resv_lfrag,
  1033. &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) {
  1034. ASSERT(0);
  1035. goto error5;
  1036. }
  1037. pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
  1038. #endif /* RESVFRAGPOOL */
  1039. #endif /* BCMFRAGPOOL */
  1040. #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
  1041. n = 1;
  1042. if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
  1043. &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) {
  1044. ASSERT(0);
  1045. goto error6;
  1046. }
  1047. pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
  1048. #endif // endif
  1049. #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
  1050. /* Attach poolreorg module */
  1051. if ((frwd_poolreorg_info = poolreorg_attach(osh,
  1052. #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
  1053. pktpool_shared_lfrag,
  1054. #else
  1055. NULL,
  1056. #endif // endif
  1057. #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
  1058. pktpool_shared_rxlfrag,
  1059. #else
  1060. NULL,
  1061. #endif // endif
  1062. pktpool_shared)) == NULL) {
  1063. ASSERT(0);
  1064. goto error7;
  1065. }
  1066. #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
  1067. pktpool_osh = osh;
  1068. MALLOC_CLEAR_NOPERSIST(osh);
  1069. return BCME_OK;
  1070. #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
  1071. /* detach poolreorg module */
  1072. poolreorg_detach(frwd_poolreorg_info);
  1073. error7:
  1074. #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
  1075. #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
  1076. pktpool_deinit(osh, pktpool_shared_rxlfrag);
  1077. error6:
  1078. #endif // endif
  1079. #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
  1080. pktpool_deinit(osh, pktpool_shared_lfrag);
  1081. error5:
  1082. #endif // endif
  1083. #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
  1084. (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
  1085. pktpool_deinit(osh, pktpool_shared);
  1086. #endif // endif
  1087. error4:
  1088. #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
  1089. hnd_free(pktpool_shared_rxlfrag);
  1090. pktpool_shared_rxlfrag = (pktpool_t *)NULL;
  1091. error3:
  1092. #endif /* BCMRXFRAGPOOL */
  1093. #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
  1094. hnd_free(pktpool_shared_lfrag);
  1095. pktpool_shared_lfrag = (pktpool_t *)NULL;
  1096. error2:
  1097. #endif /* BCMFRAGPOOL */
  1098. hnd_free(pktpool_shared);
  1099. pktpool_shared = (pktpool_t *)NULL;
  1100. error1:
  1101. pktpool_dettach(osh);
  1102. error0:
  1103. MALLOC_CLEAR_NOPERSIST(osh);
  1104. return err;
  1105. } /* hnd_pktpool_init */
  1106. /** is called at each 'wl up' */
  1107. int
  1108. hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
  1109. {
  1110. return (pktpool_fill(pktpool_osh, pktpool, minimal));
  1111. }
  1112. /** refills pktpools after reclaim, is called once */
  1113. void
  1114. hnd_pktpool_refill(bool minimal)
  1115. {
  1116. if (POOL_ENAB(pktpool_shared)) {
  1117. #if defined(SRMEM)
  1118. if (SRMEM_ENAB()) {
  1119. int maxlen = pktpool_max_pkts(pktpool_shared);
  1120. int n_pkts = pktpool_tot_pkts(pktpool_shared);
  1121. for (; n_pkts < maxlen; n_pkts++) {
  1122. void *p;
  1123. if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
  1124. break;
  1125. pktpool_add(pktpool_shared, p);
  1126. }
  1127. }
  1128. #endif /* SRMEM */
  1129. pktpool_fill(pktpool_osh, pktpool_shared, minimal);
  1130. }
  1131. /* fragpool reclaim */
  1132. #ifdef BCMFRAGPOOL
  1133. if (POOL_ENAB(pktpool_shared_lfrag)) {
  1134. pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
  1135. }
  1136. #endif /* BCMFRAGPOOL */
  1137. /* rx fragpool reclaim */
  1138. #ifdef BCMRXFRAGPOOL
  1139. if (POOL_ENAB(pktpool_shared_rxlfrag)) {
  1140. pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
  1141. }
  1142. #endif // endif
  1143. #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
  1144. if (POOL_ENAB(pktpool_resv_lfrag)) {
  1145. int resv_size = (PKTFRAGSZ + LBUFFRAGSZ)*RESV_FRAG_POOL_LEN;
  1146. hnd_resv_pool_init(resv_pool_info, resv_size);
  1147. hnd_resv_pool_enable(resv_pool_info);
  1148. }
  1149. #endif /* BCMRESVFRAGPOOL */
  1150. }
  1151. #endif /* BCMPKTPOOL */