/* * HND generic packet pool operation primitives * * Portions of this code are copyright (c) 2017 Cypress Semiconductor Corporation * * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * * <> * * $Id: hnd_pktpool.c 591285 2015-10-07 11:56:29Z $ */ #include #include #include #include #include /* mutex macros for thread safe */ #ifdef HND_PKTPOOL_THREAD_SAFE #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) #else #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS #endif /* Registry size is one larger than max pools, as slot #0 is reserved */ #define PKTPOOLREG_RSVD_ID (0U) #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead)) #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL)) #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp))) #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp))) /* Tag a registry entry as free for use */ #define PKTPOOL_REGISTRY_CLR(id) \ PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR) #define PKTPOOL_REGISTRY_ISCLR(id) \ (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR)) /* Tag registry entry 0 as reserved */ #define PKTPOOL_REGISTRY_RSV() \ PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR) #define PKTPOOL_REGISTRY_ISRSVD() \ (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)) /* Walk all un-reserved entries in registry */ #define PKTPOOL_REGISTRY_FOREACH(id) \ for ((id) = 1U; (id) <= pktpools_max; (id)++) enum pktpool_empty_cb_state { EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */ EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */ EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */ }; uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */ pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ /* Register/Deregister a pktpool with registry during pktpool_init/deinit */ static int pktpool_register(pktpool_t * poolptr); static int pktpool_deregister(pktpool_t * poolptr); /** add declaration */ static int pktpool_avail_notify(pktpool_t *pktp); /** accessor functions required when ROMming this file, forced into RAM */ pktpool_t * BCMRAMFN(get_pktpools_registry)(int id) { return pktpools_registry[id]; } static void BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp) { pktpools_registry[id] = pp; } static bool BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp) { return pktpools_registry[id] == pp; } int /* Construct a pool registry to serve a maximum of total_pools */ pktpool_attach(osl_t *osh, uint32 total_pools) { uint32 poolid; if (pktpools_max != 0U) { return BCME_ERROR; } ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID); /* Initialize registry: reserve slot#0 and tag others as free */ PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */ PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */ PKTPOOL_REGISTRY_CLR(poolid); } pktpools_max = total_pools; return (int)pktpools_max; } int /* Destruct the pool registry. Ascertain all pools were first de-inited */ pktpool_dettach(osl_t *osh) { uint32 poolid; if (pktpools_max == 0U) { return BCME_OK; } /* Ascertain that no pools are still registered */ ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */ PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */ ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid)); } pktpools_max = 0U; /* restore boot state */ return BCME_OK; } static int /* Register a pool in a free slot; return the registry slot index */ pktpool_register(pktpool_t * poolptr) { uint32 poolid; if (pktpools_max == 0U) { return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */ } ASSERT(pktpools_max != 0U); /* find an empty slot in pktpools_registry */ PKTPOOL_REGISTRY_FOREACH(poolid) { if (PKTPOOL_REGISTRY_ISCLR(poolid)) { PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */ return (int)poolid; /* return pool ID */ } } /* FOREACH */ return PKTPOOL_INVALID_ID; /* error: registry is full */ } static int /* Deregister a pktpool, given the pool pointer; tag slot as free */ pktpool_deregister(pktpool_t * poolptr) { uint32 poolid; ASSERT(POOLPTR(poolptr) != POOLPTR(NULL)); poolid = POOLID(poolptr); ASSERT(poolid <= pktpools_max); /* Asertain that a previously registered poolptr is being de-registered */ if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) { PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */ } else { ASSERT(0); return BCME_ERROR; /* mismatch in registry */ } return BCME_OK; } /* * pktpool_init: * User provides a pktpool_t sturcture and specifies the number of packets to * be pre-filled into the pool (pplen). The size of all packets in a pool must * be the same and is specified by plen. * pktpool_init first attempts to register the pool and fetch a unique poolid. * If registration fails, it is considered an BCME_ERR, caused by either the * registry was not pre-created (pktpool_attach) or the registry is full. * If registration succeeds, then the requested number of packets will be filled * into the pool as part of initialization. In the event that there is no * available memory to service the request, then BCME_NOMEM will be returned * along with the count of how many packets were successfully allocated. * In dongle builds, prior to memory reclaimation, one should limit the number * of packets to be allocated during pktpool_init and fill the pool up after * reclaim stage. */ int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type) { int i, err = BCME_OK; int pktplen; uint8 pktp_id; ASSERT(pktp != NULL); ASSERT(osh != NULL); ASSERT(pplen != NULL); pktplen = *pplen; bzero(pktp, sizeof(pktpool_t)); /* assign a unique pktpool id */ if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) { return BCME_ERROR; } POOLSETID(pktp, pktp_id); pktp->inited = TRUE; pktp->istx = istx ? TRUE : FALSE; pktp->plen = (uint16)plen; pktp->type = type; if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) { return BCME_ERROR; } pktp->maxlen = PKTPOOL_LEN_MAX; pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen); for (i = 0; i < pktplen; i++) { void *p; p = PKTGET(osh, plen, TRUE); if (p == NULL) { /* Not able to allocate all requested pkts * so just return what was actually allocated * We can add to the pool later */ if (pktp->freelist == NULL) /* pktpool free list is empty */ err = BCME_NOMEM; goto exit; } PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */ PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */ pktp->freelist = p; pktp->avail++; #ifdef BCMDBG_POOL pktp->dbg_q[pktp->dbg_qlen++].p = p; #endif } exit: pktp->len = pktp->avail; *pplen = pktp->len; return err; } /* * pktpool_deinit: * Prior to freeing a pktpool, all packets must be first freed into the pktpool. * Upon pktpool_deinit, all packets in the free pool will be freed to the heap. * An assert is in place to ensure that there are no packets still lingering * around. Packets freed to a pool after the deinit will cause a memory * corruption as the pktpool_t structure no longer exists. */ int pktpool_deinit(osl_t *osh, pktpool_t *pktp) { uint16 freed = 0; ASSERT(osh != NULL); ASSERT(pktp != NULL); #ifdef BCMDBG_POOL { int i; for (i = 0; i <= pktp->len; i++) { pktp->dbg_q[i].p = NULL; } } #endif while (pktp->freelist != NULL) { void * p = pktp->freelist; pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ PKTSETFREELIST(p, NULL); PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ PKTFREE(osh, p, pktp->istx); /* free the packet */ freed++; ASSERT(freed <= pktp->len); } pktp->avail -= freed; ASSERT(pktp->avail == 0); pktp->len -= freed; pktpool_deregister(pktp); /* release previously acquired unique pool id */ POOLSETID(pktp, PKTPOOL_INVALID_ID); if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; pktp->inited = FALSE; /* Are there still pending pkts? */ ASSERT(pktp->len == 0); return 0; } int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) { void *p; int err = 0; int len, psize, maxlen; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; ASSERT(pktp->plen != 0); maxlen = pktp->maxlen; psize = minimal ? (maxlen >> 2) : maxlen; for (len = (int)pktp->len; len < psize; len++) { p = PKTGET(osh, pktp->len, TRUE); if (p == NULL) { err = BCME_NOMEM; break; } if (pktpool_add(pktp, p) != BCME_OK) { PKTFREE(osh, p, FALSE); err = BCME_ERROR; break; } } /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; if (pktp->cbcnt) { if (pktp->empty == FALSE) pktpool_avail_notify(pktp); } return err; } static void * pktpool_deq(pktpool_t *pktp) { void *p = NULL; if (pktp->avail == 0) return NULL; ASSERT(pktp->freelist != NULL); p = pktp->freelist; /* dequeue packet from head of pktpool free list */ pktp->freelist = PKTFREELIST(p); /* free list points to next packet */ PKTSETFREELIST(p, NULL); pktp->avail--; return p; } static void pktpool_enq(pktpool_t *pktp, void *p) { ASSERT(p != NULL); PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */ pktp->freelist = p; /* free list points to newly inserted packet */ pktp->avail++; ASSERT(pktp->avail <= pktp->len); } /* utility for registering host addr fill function called from pciedev */ int /* BCMATTACHFN */ (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) { ASSERT(cb != NULL); ASSERT(pktp->cbext.cb == NULL); pktp->cbext.cb = cb; pktp->cbext.arg = arg; return 0; } int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) { ASSERT(cb != NULL); if (pktp == NULL) return BCME_ERROR; ASSERT(pktp->rxcplidfn.cb == NULL); pktp->rxcplidfn.cb = cb; pktp->rxcplidfn.arg = arg; return 0; } /* Callback functions for split rx modes */ /* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */ void pktpool_invoke_dmarxfill(pktpool_t *pktp) { ASSERT(pktp->dmarxfill.cb); ASSERT(pktp->dmarxfill.arg); if (pktp->dmarxfill.cb) pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg); } int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { ASSERT(cb != NULL); pktp->dmarxfill.cb = cb; pktp->dmarxfill.arg = arg; return 0; } /* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */ int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { int err = 0; int i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; ASSERT(cb != NULL); i = pktp->cbcnt; if (i == PKTPOOL_CB_MAX_AVL) { err = BCME_ERROR; goto done; } ASSERT(pktp->cbs[i].cb == NULL); pktp->cbs[i].cb = cb; pktp->cbs[i].arg = arg; pktp->cbcnt++; done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return err; } int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { int err = 0; int i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; ASSERT(cb != NULL); i = pktp->ecbcnt; if (i == PKTPOOL_CB_MAX) { err = BCME_ERROR; goto done; } ASSERT(pktp->ecbs[i].cb == NULL); pktp->ecbs[i].cb = cb; pktp->ecbs[i].arg = arg; pktp->ecbcnt++; done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return err; } static int pktpool_empty_notify(pktpool_t *pktp) { int i; pktp->empty = TRUE; for (i = 0; i < pktp->ecbcnt; i++) { ASSERT(pktp->ecbs[i].cb != NULL); pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg); } pktp->empty = FALSE; return 0; } #ifdef BCMDBG_POOL int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { int err = 0; int i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; ASSERT(cb); i = pktp->dbg_cbcnt; if (i == PKTPOOL_CB_MAX) { err = BCME_ERROR; goto done; } ASSERT(pktp->dbg_cbs[i].cb == NULL); pktp->dbg_cbs[i].cb = cb; pktp->dbg_cbs[i].arg = arg; pktp->dbg_cbcnt++; done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return err; } int pktpool_dbg_notify(pktpool_t *pktp); int pktpool_dbg_notify(pktpool_t *pktp) { int i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; for (i = 0; i < pktp->dbg_cbcnt; i++) { ASSERT(pktp->dbg_cbs[i].cb); pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); } /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return 0; } int pktpool_dbg_dump(pktpool_t *pktp) { int i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); for (i = 0; i < pktp->dbg_qlen; i++) { ASSERT(pktp->dbg_q[i].p); printf("%d, p: 0x%x dur:%lu us state:%d\n", i, pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); } /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return 0; } int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) { int i; int state; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; bzero(stats, sizeof(pktpool_stats_t)); for (i = 0; i < pktp->dbg_qlen; i++) { ASSERT(pktp->dbg_q[i].p != NULL); state = PKTPOOLSTATE(pktp->dbg_q[i].p); switch (state) { case POOL_TXENQ: stats->enq++; break; case POOL_TXDH: stats->txdh++; break; case POOL_TXD11: stats->txd11++; break; case POOL_RXDH: stats->rxdh++; break; case POOL_RXD11: stats->rxd11++; break; case POOL_RXFILL: stats->rxfill++; break; case POOL_IDLE: stats->idle++; break; } } /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return 0; } int pktpool_start_trigger(pktpool_t *pktp, void *p) { uint32 cycles, i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; if (!PKTPOOL(OSH_NULL, p)) goto done; OSL_GETCYCLES(cycles); for (i = 0; i < pktp->dbg_qlen; i++) { ASSERT(pktp->dbg_q[i].p != NULL); if (pktp->dbg_q[i].p == p) { pktp->dbg_q[i].cycles = cycles; break; } } done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return 0; } int pktpool_stop_trigger(pktpool_t *pktp, void *p); int pktpool_stop_trigger(pktpool_t *pktp, void *p) { uint32 cycles, i; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; if (!PKTPOOL(OSH_NULL, p)) goto done; OSL_GETCYCLES(cycles); for (i = 0; i < pktp->dbg_qlen; i++) { ASSERT(pktp->dbg_q[i].p != NULL); if (pktp->dbg_q[i].p == p) { if (pktp->dbg_q[i].cycles == 0) break; if (cycles >= pktp->dbg_q[i].cycles) pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles; else pktp->dbg_q[i].dur = (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1; pktp->dbg_q[i].cycles = 0; break; } } done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return 0; } #endif /* BCMDBG_POOL */ int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) { ASSERT(pktp); /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; pktp->availcb_excl = NULL; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return 0; } int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) { int i; int err; ASSERT(pktp); /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; ASSERT(pktp->availcb_excl == NULL); for (i = 0; i < pktp->cbcnt; i++) { if (cb == pktp->cbs[i].cb) { pktp->availcb_excl = &pktp->cbs[i]; break; } } if (pktp->availcb_excl == NULL) err = BCME_ERROR; else err = 0; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return err; } static int pktpool_avail_notify(pktpool_t *pktp) { int i, k, idx; int avail; ASSERT(pktp); if (pktp->availcb_excl != NULL) { pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); return 0; } k = pktp->cbcnt - 1; for (i = 0; i < pktp->cbcnt; i++) { avail = pktp->avail; if (avail) { if (pktp->cbtoggle) idx = i; else idx = k--; ASSERT(pktp->cbs[idx].cb != NULL); pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg); } } /* Alternate between filling from head or tail */ pktp->cbtoggle ^= 1; return 0; } void * pktpool_get(pktpool_t *pktp) { void *p; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; p = pktpool_deq(pktp); if (p == NULL) { /* Notify and try to reclaim tx pkts */ if (pktp->ecbcnt) pktpool_empty_notify(pktp); p = pktpool_deq(pktp); if (p == NULL) goto done; } done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return NULL; return p; } void pktpool_free(pktpool_t *pktp, void *p) { /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return; ASSERT(p != NULL); #ifdef BCMDBG_POOL /* pktpool_stop_trigger(pktp, p); */ #endif pktpool_enq(pktp, p); /** * Feed critical DMA with freshly freed packets, to avoid DMA starvation. * If any avail callback functions are registered, send a notification * that a new packet is available in the pool. */ if (pktp->cbcnt) { /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled. * This allows to feed on burst basis as opposed to inefficient per-packet basis. */ if (pktp->emptycb_disable == EMPTYCB_ENABLED) { /** * If the call originated from pktpool_empty_notify, the just freed packet * is needed in pktpool_get. * Therefore don't call pktpool_avail_notify. */ if (pktp->empty == FALSE) pktpool_avail_notify(pktp); } else { /** * The callback is temporarily disabled, log that a packet has been freed. */ pktp->emptycb_disable = EMPTYCB_SKIPPED; } } /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return; } int pktpool_add(pktpool_t *pktp, void *p) { int err = 0; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; ASSERT(p != NULL); if (pktp->len == pktp->maxlen) { err = BCME_RANGE; goto done; } /* pkts in pool have same length */ ASSERT(pktp->plen == PKTLEN(OSH_NULL, p)); PKTSETPOOL(OSH_NULL, p, TRUE, pktp); pktp->len++; pktpool_enq(pktp, p); #ifdef BCMDBG_POOL pktp->dbg_q[pktp->dbg_qlen++].p = p; #endif done: /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return err; } /* Force pktpool_setmaxlen () into RAM as it uses a constant * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips. */ int BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) { /* protect shared resource */ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return BCME_ERROR; if (maxlen > PKTPOOL_LEN_MAX) maxlen = PKTPOOL_LEN_MAX; /* if pool is already beyond maxlen, then just cap it * since we currently do not reduce the pool len * already allocated */ pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen; /* protect shared resource */ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) return BCME_ERROR; return pktp->maxlen; } void pktpool_emptycb_disable(pktpool_t *pktp, bool disable) { ASSERT(pktp); /** * To more efficiently use the cpu cycles, callbacks can be temporarily disabled. * If callback is going to be re-enabled, check if any packet got * freed and added back to the pool while callback was disabled. * When this is the case do the callback now, provided that callback functions * are registered and this call did not originate from pktpool_empty_notify. */ if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) && (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { pktpool_avail_notify(pktp); } /* Enable or temporarily disable callback when packet becomes available. */ pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED; } bool pktpool_emptycb_disabled(pktpool_t *pktp) { ASSERT(pktp); return pktp->emptycb_disable != EMPTYCB_ENABLED; } #ifdef BCMPKTPOOL #include pktpool_t *pktpool_shared = NULL; #ifdef BCMFRAGPOOL pktpool_t *pktpool_shared_lfrag = NULL; #endif /* BCMFRAGPOOL */ pktpool_t *pktpool_shared_rxlfrag = NULL; static osl_t *pktpool_osh = NULL; void hnd_pktpool_init(osl_t *osh) { int n; /* Construct a packet pool registry before initializing packet pools */ n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); if (n != PKTPOOL_MAXIMUM_ID) { ASSERT(0); return; } pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); if (pktpool_shared == NULL) { ASSERT(0); goto error1; } #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); if (pktpool_shared_lfrag == NULL) { ASSERT(0); goto error2; } #endif #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); if (pktpool_shared_rxlfrag == NULL) { ASSERT(0); goto error3; } #endif /* * At this early stage, there's not enough memory to allocate all * requested pkts in the shared pool. Need to add to the pool * after reclaim * * n = NRXBUFPOST + SDPCMD_RXBUFS; * * Initialization of packet pools may fail (BCME_ERROR), if the packet pool * registry is not initialized or the registry is depleted. * * A BCME_NOMEM error only indicates that the requested number of packets * were not filled into the pool. */ n = 1; if (pktpool_init(osh, pktpool_shared, &n, PKTBUFSZ, FALSE, lbuf_basic) == BCME_ERROR) { ASSERT(0); goto error4; } pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN); #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) n = 1; if (pktpool_init(osh, pktpool_shared_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) { ASSERT(0); goto error5; } pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN); #endif #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) n = 1; if (pktpool_init(osh, pktpool_shared_rxlfrag, &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag) == BCME_ERROR) { ASSERT(0); goto error6; } pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN); #endif pktpool_osh = osh; return; #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) error6: #endif #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) pktpool_deinit(osh, pktpool_shared_lfrag); error5: #endif #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \ (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)) pktpool_deinit(osh, pktpool_shared); #endif error4: #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) hnd_free(pktpool_shared_rxlfrag); pktpool_shared_rxlfrag = (pktpool_t *)NULL; error3: #endif /* BCMRXFRAGPOOL */ #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) hnd_free(pktpool_shared_lfrag); pktpool_shared_lfrag = (pktpool_t *)NULL; error2: #endif /* BCMFRAGPOOL */ hnd_free(pktpool_shared); pktpool_shared = (pktpool_t *)NULL; error1: pktpool_dettach(osh); } void hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) { pktpool_fill(pktpool_osh, pktpool, minimal); } /* refill pktpools after reclaim */ void hnd_pktpool_refill(bool minimal) { if (POOL_ENAB(pktpool_shared)) { pktpool_fill(pktpool_osh, pktpool_shared, minimal); } /* fragpool reclaim */ #ifdef BCMFRAGPOOL if (POOL_ENAB(pktpool_shared_lfrag)) { pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); } #endif /* BCMFRAGPOOL */ /* rx fragpool reclaim */ #ifdef BCMRXFRAGPOOL if (POOL_ENAB(pktpool_shared_rxlfrag)) { pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal); } #endif } #endif /* BCMPKTPOOL */