2452 lines
58 KiB
C
2452 lines
58 KiB
C
/*
|
|
* Linux OS Independent Layer
|
|
*
|
|
* Copyright (C) 1999-2015, Broadcom Corporation
|
|
*
|
|
* Unless you and Broadcom execute a separate written software license
|
|
* agreement governing use of this software, this software is licensed to you
|
|
* under the terms of the GNU General Public License version 2 (the "GPL"),
|
|
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
|
|
* following added to such license:
|
|
*
|
|
* As a special exception, the copyright holders of this software give you
|
|
* permission to link this software with independent modules, and to copy and
|
|
* distribute the resulting executable under terms of your choice, provided that
|
|
* you also meet, for each linked independent module, the terms and conditions of
|
|
* the license of that module. An independent module is a module which is not
|
|
* derived from this software. The special exception does not apply to any
|
|
* modifications of the software.
|
|
*
|
|
* Notwithstanding the above, under no circumstances may you combine this
|
|
* software in any way with any other Broadcom software provided under a license
|
|
* other than the GPL, without Broadcom's express prior written consent.
|
|
*
|
|
* $Id: linux_osl.c 530336 2015-01-29 22:52:35Z $
|
|
*/
|
|
|
|
#define LINUX_PORT
|
|
|
|
#include <typedefs.h>
|
|
#include <bcmendian.h>
|
|
#include <linuxver.h>
|
|
#include <bcmdefs.h>
|
|
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <osl.h>
|
|
#include <bcmutils.h>
|
|
#include <linux/delay.h>
|
|
#include <pcicfg.h>
|
|
|
|
|
|
|
|
#ifdef BCM_SECURE_DMA
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/io.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <asm/io.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <asm/memory.h>
|
|
#if defined(__ARM_ARCH_7A__)
|
|
#include <arch/arm/include/asm/tlbflush.h>
|
|
#include <arch/arm/mm/mm.h>
|
|
#endif
|
|
#include <linux/brcmstb/cma_driver.h>
|
|
#endif /* BCM_SECURE_DMA */
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/mmc/host.h>
|
|
|
|
|
|
#if defined(BCMPCIE)
|
|
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
|
|
#include <bcmpcie.h>
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
|
|
#endif /* BCMPCIE */
|
|
|
|
#define PCI_CFG_RETRY 10
|
|
|
|
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
|
|
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
|
|
#define DUMPBUFSZ 1024
|
|
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
#define DHD_SKB_HDRSIZE 336
|
|
#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
|
|
#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
|
|
#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
|
|
|
|
#define STATIC_BUF_MAX_NUM 16
|
|
#define STATIC_BUF_SIZE (PAGE_SIZE*2)
|
|
#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
|
|
|
|
typedef struct bcm_static_buf {
|
|
struct semaphore static_sem;
|
|
unsigned char *buf_ptr;
|
|
unsigned char buf_use[STATIC_BUF_MAX_NUM];
|
|
} bcm_static_buf_t;
|
|
|
|
static bcm_static_buf_t *bcm_static_buf = 0;
|
|
|
|
#define STATIC_PKT_MAX_NUM 8
|
|
#if defined(ENHANCED_STATIC_BUF)
|
|
#define STATIC_PKT_4PAGE_NUM 1
|
|
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
|
|
#else
|
|
#define STATIC_PKT_4PAGE_NUM 0
|
|
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
|
|
#endif /* ENHANCED_STATIC_BUF */
|
|
|
|
typedef struct bcm_static_pkt {
|
|
struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
|
|
struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
|
|
#ifdef ENHANCED_STATIC_BUF
|
|
struct sk_buff *skb_16k;
|
|
#endif
|
|
struct semaphore osl_pkt_sem;
|
|
unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM];
|
|
} bcm_static_pkt_t;
|
|
|
|
static bcm_static_pkt_t *bcm_static_skb = 0;
|
|
|
|
#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
|
|
#define STATIC_BUF_FLOWRING_SIZE ((PAGE_SIZE)*(7))
|
|
#define STATIC_BUF_FLOWRING_NUM 42
|
|
#define RINGID_TO_FLOWID(idx) ((idx) + (BCMPCIE_H2D_COMMON_MSGRINGS) \
|
|
- (BCMPCIE_H2D_TXFLOWRINGID))
|
|
typedef struct bcm_static_flowring_buf {
|
|
spinlock_t flowring_lock;
|
|
void *buf_ptr[STATIC_BUF_FLOWRING_NUM];
|
|
unsigned char buf_use[STATIC_BUF_FLOWRING_NUM];
|
|
} bcm_static_flowring_buf_t;
|
|
|
|
bcm_static_flowring_buf_t *bcm_static_flowring = 0;
|
|
#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
|
|
|
|
void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
|
|
typedef struct bcm_mem_link {
|
|
struct bcm_mem_link *prev;
|
|
struct bcm_mem_link *next;
|
|
uint size;
|
|
int line;
|
|
void *osh;
|
|
char file[BCM_MEM_FILENAME_LEN];
|
|
} bcm_mem_link_t;
|
|
|
|
struct osl_cmn_info {
|
|
atomic_t malloced;
|
|
atomic_t pktalloced; /* Number of allocated packet buffers */
|
|
spinlock_t dbgmem_lock;
|
|
bcm_mem_link_t *dbgmem_list;
|
|
spinlock_t pktalloc_lock;
|
|
atomic_t refcount; /* Number of references to this shared structure. */
|
|
};
|
|
typedef struct osl_cmn_info osl_cmn_t;
|
|
|
|
struct osl_info {
|
|
osl_pubinfo_t pub;
|
|
#ifdef CTFPOOL
|
|
ctfpool_t *ctfpool;
|
|
#endif /* CTFPOOL */
|
|
uint magic;
|
|
void *pdev;
|
|
uint failed;
|
|
uint bustype;
|
|
osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
|
|
|
|
void *bus_handle;
|
|
#ifdef BCMDBG_CTRACE
|
|
spinlock_t ctrace_lock;
|
|
struct list_head ctrace_list;
|
|
int ctrace_num;
|
|
#endif /* BCMDBG_CTRACE */
|
|
uint32 flags; /* If specific cases to be handled in the OSL */
|
|
#ifdef BCM_SECURE_DMA
|
|
struct cma_dev *cma;
|
|
struct sec_mem_elem *sec_list_512;
|
|
struct sec_mem_elem *sec_list_base_512;
|
|
struct sec_mem_elem *sec_list_2048;
|
|
struct sec_mem_elem *sec_list_base_2048;
|
|
struct sec_mem_elem *sec_list_4096;
|
|
struct sec_mem_elem *sec_list_base_4096;
|
|
phys_addr_t contig_base;
|
|
void *contig_base_va;
|
|
phys_addr_t contig_base_alloc;
|
|
void *contig_base_alloc_va;
|
|
phys_addr_t contig_base_alloc_coherent;
|
|
void *contig_base_alloc_coherent_va;
|
|
phys_addr_t contig_delta_va_pa;
|
|
struct {
|
|
phys_addr_t pa;
|
|
void *va;
|
|
bool avail;
|
|
} sec_cma_coherent[SEC_CMA_COHERENT_MAX];
|
|
|
|
#endif /* BCM_SECURE_DMA */
|
|
|
|
};
|
|
#ifdef BCM_SECURE_DMA
|
|
phys_addr_t g_contig_delta_va_pa;
|
|
static void osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn);
|
|
static int osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn);
|
|
static void osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn);
|
|
static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
|
|
bool iscache, bool isdecr);
|
|
static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
|
|
static void osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
|
|
sec_mem_elem_t **list);
|
|
static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
|
|
void *sec_list_base);
|
|
static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
|
|
int direction, struct sec_cma_info *ptr_cma_info, uint offset);
|
|
static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
|
|
static void osl_sec_dma_init_consistent(osl_t *osh);
|
|
static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
|
|
ulong *pap);
|
|
static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
|
|
#endif /* BCM_SECURE_DMA */
|
|
|
|
#define OSL_PKTTAG_CLEAR(p) \
|
|
do { \
|
|
struct sk_buff *s = (struct sk_buff *)(p); \
|
|
ASSERT(OSL_PKTTAG_SZ == 32); \
|
|
*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
|
|
*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
|
|
*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
|
|
*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
|
|
} while (0)
|
|
|
|
/* PCMCIA attribute space access macros */
|
|
|
|
/* Global ASSERT type flag */
|
|
uint32 g_assert_type = 0;
|
|
module_param(g_assert_type, int, 0);
|
|
|
|
static int16 linuxbcmerrormap[] =
|
|
{ 0, /* 0 */
|
|
-EINVAL, /* BCME_ERROR */
|
|
-EINVAL, /* BCME_BADARG */
|
|
-EINVAL, /* BCME_BADOPTION */
|
|
-EINVAL, /* BCME_NOTUP */
|
|
-EINVAL, /* BCME_NOTDOWN */
|
|
-EINVAL, /* BCME_NOTAP */
|
|
-EINVAL, /* BCME_NOTSTA */
|
|
-EINVAL, /* BCME_BADKEYIDX */
|
|
-EINVAL, /* BCME_RADIOOFF */
|
|
-EINVAL, /* BCME_NOTBANDLOCKED */
|
|
-EINVAL, /* BCME_NOCLK */
|
|
-EINVAL, /* BCME_BADRATESET */
|
|
-EINVAL, /* BCME_BADBAND */
|
|
-E2BIG, /* BCME_BUFTOOSHORT */
|
|
-E2BIG, /* BCME_BUFTOOLONG */
|
|
-EBUSY, /* BCME_BUSY */
|
|
-EINVAL, /* BCME_NOTASSOCIATED */
|
|
-EINVAL, /* BCME_BADSSIDLEN */
|
|
-EINVAL, /* BCME_OUTOFRANGECHAN */
|
|
-EINVAL, /* BCME_BADCHAN */
|
|
-EFAULT, /* BCME_BADADDR */
|
|
-ENOMEM, /* BCME_NORESOURCE */
|
|
-EOPNOTSUPP, /* BCME_UNSUPPORTED */
|
|
-EMSGSIZE, /* BCME_BADLENGTH */
|
|
-EINVAL, /* BCME_NOTREADY */
|
|
-EPERM, /* BCME_EPERM */
|
|
-ENOMEM, /* BCME_NOMEM */
|
|
-EINVAL, /* BCME_ASSOCIATED */
|
|
-ERANGE, /* BCME_RANGE */
|
|
-EINVAL, /* BCME_NOTFOUND */
|
|
-EINVAL, /* BCME_WME_NOT_ENABLED */
|
|
-EINVAL, /* BCME_TSPEC_NOTFOUND */
|
|
-EINVAL, /* BCME_ACM_NOTSUPPORTED */
|
|
-EINVAL, /* BCME_NOT_WME_ASSOCIATION */
|
|
-EIO, /* BCME_SDIO_ERROR */
|
|
-ENODEV, /* BCME_DONGLE_DOWN */
|
|
-EINVAL, /* BCME_VERSION */
|
|
-EIO, /* BCME_TXFAIL */
|
|
-EIO, /* BCME_RXFAIL */
|
|
-ENODEV, /* BCME_NODEVICE */
|
|
-EINVAL, /* BCME_NMODE_DISABLED */
|
|
-ENODATA, /* BCME_NONRESIDENT */
|
|
-EINVAL, /* BCME_SCANREJECT */
|
|
-EINVAL, /* BCME_USAGE_ERROR */
|
|
-EIO, /* BCME_IOCTL_ERROR */
|
|
-EIO, /* BCME_SERIAL_PORT_ERR */
|
|
-EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
|
|
-EIO, /* BCME_DECERR */
|
|
-EIO, /* BCME_ENCERR */
|
|
-EIO, /* BCME_MICERR */
|
|
-ERANGE, /* BCME_REPLAY */
|
|
-EINVAL, /* BCME_IE_NOTFOUND */
|
|
|
|
/* When an new error code is added to bcmutils.h, add os
|
|
* specific error translation here as well
|
|
*/
|
|
/* check if BCME_LAST changed since the last time this function was updated */
|
|
#if BCME_LAST != -52
|
|
#error "You need to add a OS error translation in the linuxbcmerrormap \
|
|
for new error code defined in bcmutils.h"
|
|
#endif
|
|
};
|
|
uint lmtest = FALSE;
|
|
|
|
/* translate bcmerrors into linux errors */
|
|
int
|
|
osl_error(int bcmerror)
|
|
{
|
|
if (bcmerror > 0)
|
|
bcmerror = 0;
|
|
else if (bcmerror < BCME_LAST)
|
|
bcmerror = BCME_ERROR;
|
|
|
|
/* Array bounds covered by ASSERT in osl_attach */
|
|
return linuxbcmerrormap[-bcmerror];
|
|
}
|
|
#ifdef SHARED_OSL_CMN
|
|
osl_t *
|
|
osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
|
|
{
|
|
#else
|
|
osl_t *
|
|
osl_attach(void *pdev, uint bustype, bool pkttag)
|
|
{
|
|
void **osl_cmn = NULL;
|
|
#endif /* SHARED_OSL_CMN */
|
|
osl_t *osh;
|
|
gfp_t flags;
|
|
|
|
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
|
|
if (!(osh = kmalloc(sizeof(osl_t), flags)))
|
|
return osh;
|
|
|
|
ASSERT(osh);
|
|
|
|
bzero(osh, sizeof(osl_t));
|
|
|
|
if (osl_cmn == NULL || *osl_cmn == NULL) {
|
|
if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
|
|
kfree(osh);
|
|
return NULL;
|
|
}
|
|
bzero(osh->cmn, sizeof(osl_cmn_t));
|
|
if (osl_cmn)
|
|
*osl_cmn = osh->cmn;
|
|
atomic_set(&osh->cmn->malloced, 0);
|
|
osh->cmn->dbgmem_list = NULL;
|
|
spin_lock_init(&(osh->cmn->dbgmem_lock));
|
|
|
|
spin_lock_init(&(osh->cmn->pktalloc_lock));
|
|
|
|
} else {
|
|
osh->cmn = *osl_cmn;
|
|
}
|
|
atomic_add(1, &osh->cmn->refcount);
|
|
|
|
/* Check that error map has the right number of entries in it */
|
|
ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
|
|
|
|
osh->failed = 0;
|
|
osh->pdev = pdev;
|
|
osh->pub.pkttag = pkttag;
|
|
osh->bustype = bustype;
|
|
osh->magic = OS_HANDLE_MAGIC;
|
|
#ifdef BCM_SECURE_DMA
|
|
|
|
osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION);
|
|
|
|
osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
|
|
phys_to_page((u32)osh->contig_base_alloc),
|
|
CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
|
|
|
|
osh->contig_base_alloc_coherent = osh->contig_base_alloc;
|
|
osl_sec_dma_init_consistent(osh);
|
|
|
|
osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
|
|
|
|
osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
|
|
phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
|
|
osh->contig_base_va = osh->contig_base_alloc_va;
|
|
|
|
/*
|
|
* osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
|
|
* osh->sec_list_base_512 = osh->sec_list_512;
|
|
* osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
|
|
* osh->sec_list_base_2048 = osh->sec_list_2048;
|
|
*/
|
|
osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096);
|
|
osh->sec_list_base_4096 = osh->sec_list_4096;
|
|
|
|
#endif /* BCM_SECURE_DMA */
|
|
|
|
switch (bustype) {
|
|
case PCI_BUS:
|
|
case SI_BUS:
|
|
case PCMCIA_BUS:
|
|
osh->pub.mmbus = TRUE;
|
|
break;
|
|
case JTAG_BUS:
|
|
case SDIO_BUS:
|
|
case USB_BUS:
|
|
case SPI_BUS:
|
|
case RPC_BUS:
|
|
osh->pub.mmbus = FALSE;
|
|
break;
|
|
default:
|
|
ASSERT(FALSE);
|
|
break;
|
|
}
|
|
|
|
#ifdef BCMDBG_CTRACE
|
|
spin_lock_init(&osh->ctrace_lock);
|
|
INIT_LIST_HEAD(&osh->ctrace_list);
|
|
osh->ctrace_num = 0;
|
|
#endif /* BCMDBG_CTRACE */
|
|
|
|
|
|
return osh;
|
|
}
|
|
|
|
int osl_static_mem_init(osl_t *osh, void *adapter)
|
|
{
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
if (!bcm_static_buf && adapter) {
|
|
if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
|
|
3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
|
|
printk("can not alloc static buf!\n");
|
|
bcm_static_skb = NULL;
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
return -ENOMEM;
|
|
}
|
|
else
|
|
printk("alloc static buf at %p!\n", bcm_static_buf);
|
|
|
|
|
|
sema_init(&bcm_static_buf->static_sem, 1);
|
|
|
|
bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
|
|
}
|
|
|
|
#ifdef BCMSDIO
|
|
if (!bcm_static_skb && adapter) {
|
|
int i;
|
|
void *skb_buff_ptr = 0;
|
|
bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
|
|
skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
|
|
if (!skb_buff_ptr) {
|
|
printk("cannot alloc static buf!\n");
|
|
bcm_static_buf = NULL;
|
|
bcm_static_skb = NULL;
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
|
|
(STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
|
|
for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
|
|
bcm_static_skb->pkt_use[i] = 0;
|
|
|
|
sema_init(&bcm_static_skb->osl_pkt_sem, 1);
|
|
}
|
|
#endif /* BCMSDIO */
|
|
#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
|
|
if (!bcm_static_flowring && adapter) {
|
|
int i;
|
|
void *flowring_ptr = 0;
|
|
bcm_static_flowring =
|
|
(bcm_static_flowring_buf_t *)((char *)bcm_static_buf + 4096);
|
|
flowring_ptr = wifi_platform_prealloc(adapter, 10, 0);
|
|
if (!flowring_ptr) {
|
|
printk("%s: flowring_ptr is NULL\n", __FUNCTION__);
|
|
bcm_static_buf = NULL;
|
|
bcm_static_skb = NULL;
|
|
bcm_static_flowring = NULL;
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
bcopy(flowring_ptr, bcm_static_flowring->buf_ptr,
|
|
sizeof(void *) * STATIC_BUF_FLOWRING_NUM);
|
|
for (i = 0; i < STATIC_BUF_FLOWRING_NUM; i++) {
|
|
bcm_static_flowring->buf_use[i] = 0;
|
|
}
|
|
|
|
spin_lock_init(&bcm_static_flowring->flowring_lock);
|
|
}
|
|
#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
|
|
return 0;
|
|
}
|
|
|
|
void osl_set_bus_handle(osl_t *osh, void *bus_handle)
|
|
{
|
|
osh->bus_handle = bus_handle;
|
|
}
|
|
|
|
void* osl_get_bus_handle(osl_t *osh)
|
|
{
|
|
return osh->bus_handle;
|
|
}
|
|
|
|
void
|
|
osl_detach(osl_t *osh)
|
|
{
|
|
if (osh == NULL)
|
|
return;
|
|
#ifdef BCM_SECURE_DMA
|
|
osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION);
|
|
osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
|
|
osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
|
|
osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
|
|
osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK);
|
|
#endif /* BCM_SECURE_DMA */
|
|
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
atomic_sub(1, &osh->cmn->refcount);
|
|
if (atomic_read(&osh->cmn->refcount) == 0) {
|
|
kfree(osh->cmn);
|
|
}
|
|
kfree(osh);
|
|
}
|
|
|
|
int osl_static_mem_deinit(osl_t *osh, void *adapter)
|
|
{
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
if (bcm_static_buf) {
|
|
bcm_static_buf = 0;
|
|
}
|
|
#ifdef BCMSDIO
|
|
if (bcm_static_skb) {
|
|
bcm_static_skb = 0;
|
|
}
|
|
#endif /* BCMSDIO */
|
|
#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
|
|
if (bcm_static_flowring) {
|
|
bcm_static_flowring = 0;
|
|
}
|
|
#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
return 0;
|
|
}
|
|
|
|
static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
|
|
{
|
|
struct sk_buff *skb;
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
|
|
gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
|
|
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
|
|
flags |= GFP_DMA;
|
|
#endif
|
|
skb = __dev_alloc_skb(len, flags);
|
|
#else
|
|
skb = dev_alloc_skb(len);
|
|
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
|
|
return skb;
|
|
}
|
|
|
|
#ifdef CTFPOOL
|
|
|
|
#ifdef CTFPOOL_SPINLOCK
|
|
#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
|
|
#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
|
|
#else
|
|
#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
|
|
#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
|
|
#endif /* CTFPOOL_SPINLOCK */
|
|
/*
|
|
* Allocate and add an object to packet pool.
|
|
*/
|
|
void *
|
|
osl_ctfpool_add(osl_t *osh)
|
|
{
|
|
struct sk_buff *skb;
|
|
#ifdef CTFPOOL_SPINLOCK
|
|
unsigned long flags;
|
|
#endif /* CTFPOOL_SPINLOCK */
|
|
|
|
if ((osh == NULL) || (osh->ctfpool == NULL))
|
|
return NULL;
|
|
|
|
CTFPOOL_LOCK(osh->ctfpool, flags);
|
|
ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
|
|
|
|
/* No need to allocate more objects */
|
|
if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
return NULL;
|
|
}
|
|
|
|
/* Allocate a new skb and add it to the ctfpool */
|
|
skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
|
|
if (skb == NULL) {
|
|
printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
|
|
osh->ctfpool->obj_size);
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
return NULL;
|
|
}
|
|
|
|
/* Add to ctfpool */
|
|
skb->next = (struct sk_buff *)osh->ctfpool->head;
|
|
osh->ctfpool->head = skb;
|
|
osh->ctfpool->fast_frees++;
|
|
osh->ctfpool->curr_obj++;
|
|
|
|
/* Hijack a skb member to store ptr to ctfpool */
|
|
CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
|
|
|
|
/* Use bit flag to indicate skb from fast ctfpool */
|
|
PKTFAST(osh, skb) = FASTBUF;
|
|
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
|
|
return skb;
|
|
}
|
|
|
|
/*
|
|
* Add new objects to the pool.
|
|
*/
|
|
void
|
|
osl_ctfpool_replenish(osl_t *osh, uint thresh)
|
|
{
|
|
if ((osh == NULL) || (osh->ctfpool == NULL))
|
|
return;
|
|
|
|
/* Do nothing if no refills are required */
|
|
while ((osh->ctfpool->refills > 0) && (thresh--)) {
|
|
osl_ctfpool_add(osh);
|
|
osh->ctfpool->refills--;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Initialize the packet pool with specified number of objects.
|
|
*/
|
|
int32
|
|
osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
|
|
{
|
|
gfp_t flags;
|
|
|
|
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
|
|
osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
|
|
ASSERT(osh->ctfpool);
|
|
|
|
osh->ctfpool->max_obj = numobj;
|
|
osh->ctfpool->obj_size = size;
|
|
|
|
spin_lock_init(&osh->ctfpool->lock);
|
|
|
|
while (numobj--) {
|
|
if (!osl_ctfpool_add(osh))
|
|
return -1;
|
|
osh->ctfpool->fast_frees--;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Cleanup the packet pool objects.
|
|
*/
|
|
void
|
|
osl_ctfpool_cleanup(osl_t *osh)
|
|
{
|
|
struct sk_buff *skb, *nskb;
|
|
#ifdef CTFPOOL_SPINLOCK
|
|
unsigned long flags;
|
|
#endif /* CTFPOOL_SPINLOCK */
|
|
|
|
if ((osh == NULL) || (osh->ctfpool == NULL))
|
|
return;
|
|
|
|
CTFPOOL_LOCK(osh->ctfpool, flags);
|
|
|
|
skb = osh->ctfpool->head;
|
|
|
|
while (skb != NULL) {
|
|
nskb = skb->next;
|
|
dev_kfree_skb(skb);
|
|
skb = nskb;
|
|
osh->ctfpool->curr_obj--;
|
|
}
|
|
|
|
ASSERT(osh->ctfpool->curr_obj == 0);
|
|
osh->ctfpool->head = NULL;
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
|
|
kfree(osh->ctfpool);
|
|
osh->ctfpool = NULL;
|
|
}
|
|
|
|
void
|
|
osl_ctfpool_stats(osl_t *osh, void *b)
|
|
{
|
|
struct bcmstrbuf *bb;
|
|
|
|
if ((osh == NULL) || (osh->ctfpool == NULL))
|
|
return;
|
|
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
if (bcm_static_buf) {
|
|
bcm_static_buf = 0;
|
|
}
|
|
#ifdef BCMSDIO
|
|
if (bcm_static_skb) {
|
|
bcm_static_skb = 0;
|
|
}
|
|
#endif /* BCMSDIO */
|
|
#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
|
|
if (bcm_static_flowring) {
|
|
bcm_static_flowring = 0;
|
|
}
|
|
#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
|
|
bb = b;
|
|
|
|
ASSERT((osh != NULL) && (bb != NULL));
|
|
|
|
bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
|
|
osh->ctfpool->max_obj, osh->ctfpool->obj_size,
|
|
osh->ctfpool->curr_obj, osh->ctfpool->refills);
|
|
bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
|
|
osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
|
|
osh->ctfpool->slow_allocs);
|
|
}
|
|
|
|
static inline struct sk_buff *
|
|
osl_pktfastget(osl_t *osh, uint len)
|
|
{
|
|
struct sk_buff *skb;
|
|
#ifdef CTFPOOL_SPINLOCK
|
|
unsigned long flags;
|
|
#endif /* CTFPOOL_SPINLOCK */
|
|
|
|
/* Try to do fast allocate. Return null if ctfpool is not in use
|
|
* or if there are no items in the ctfpool.
|
|
*/
|
|
if (osh->ctfpool == NULL)
|
|
return NULL;
|
|
|
|
CTFPOOL_LOCK(osh->ctfpool, flags);
|
|
if (osh->ctfpool->head == NULL) {
|
|
ASSERT(osh->ctfpool->curr_obj == 0);
|
|
osh->ctfpool->slow_allocs++;
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
return NULL;
|
|
}
|
|
|
|
if (len > osh->ctfpool->obj_size) {
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
return NULL;
|
|
}
|
|
|
|
ASSERT(len <= osh->ctfpool->obj_size);
|
|
|
|
/* Get an object from ctfpool */
|
|
skb = (struct sk_buff *)osh->ctfpool->head;
|
|
osh->ctfpool->head = (void *)skb->next;
|
|
|
|
osh->ctfpool->fast_allocs++;
|
|
osh->ctfpool->curr_obj--;
|
|
ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
|
|
CTFPOOL_UNLOCK(osh->ctfpool, flags);
|
|
|
|
/* Init skb struct */
|
|
skb->next = skb->prev = NULL;
|
|
#if defined(__ARM_ARCH_7A__)
|
|
skb->data = skb->head + NET_SKB_PAD;
|
|
skb->tail = skb->head + NET_SKB_PAD;
|
|
#else
|
|
skb->data = skb->head + 16;
|
|
skb->tail = skb->head + 16;
|
|
#endif /* __ARM_ARCH_7A__ */
|
|
skb->len = 0;
|
|
skb->cloned = 0;
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
|
|
skb->list = NULL;
|
|
#endif
|
|
atomic_set(&skb->users, 1);
|
|
|
|
PKTSETCLINK(skb, NULL);
|
|
PKTCCLRATTR(skb);
|
|
PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
|
|
|
|
return skb;
|
|
}
|
|
#endif /* CTFPOOL */
|
|
|
|
#if defined(BCM_GMAC3)
|
|
/* Account for a packet delivered to downstream forwarder.
|
|
* Decrement a GMAC forwarder interface's pktalloced count.
|
|
*/
|
|
void BCMFASTPATH
|
|
osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
|
|
{
|
|
|
|
atomic_sub(skb_cnt, &osh->cmn->pktalloced);
|
|
}
|
|
|
|
/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
|
|
* Increment a GMAC forwarder interface's pktalloced count.
|
|
*/
|
|
#ifdef BCMDBG_CTRACE
|
|
void BCMFASTPATH
|
|
osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
|
|
#else
|
|
void BCMFASTPATH
|
|
osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
|
|
#endif /* BCMDBG_CTRACE */
|
|
{
|
|
#if defined(BCMDBG_CTRACE)
|
|
int i;
|
|
struct sk_buff *skb;
|
|
#endif
|
|
|
|
#if defined(BCMDBG_CTRACE)
|
|
if (skb_cnt > 1) {
|
|
struct sk_buff **skb_array = (struct sk_buff **)skbs;
|
|
for (i = 0; i < skb_cnt; i++) {
|
|
skb = skb_array[i];
|
|
#if defined(BCMDBG_CTRACE)
|
|
ASSERT(!PKTISCHAINED(skb));
|
|
ADD_CTRACE(osh, skb, file, line);
|
|
#endif /* BCMDBG_CTRACE */
|
|
}
|
|
} else {
|
|
skb = (struct sk_buff *)skbs;
|
|
#if defined(BCMDBG_CTRACE)
|
|
ASSERT(!PKTISCHAINED(skb));
|
|
ADD_CTRACE(osh, skb, file, line);
|
|
#endif /* BCMDBG_CTRACE */
|
|
}
|
|
#endif
|
|
|
|
atomic_add(skb_cnt, &osh->cmn->pktalloced);
|
|
}
|
|
|
|
#endif /* BCM_GMAC3 */
|
|
|
|
/* Convert a driver packet to native(OS) packet
|
|
* In the process, packettag is zeroed out before sending up
|
|
* IP code depends on skb->cb to be setup correctly with various options
|
|
* In our case, that means it should be 0
|
|
*/
|
|
struct sk_buff * BCMFASTPATH
|
|
osl_pkt_tonative(osl_t *osh, void *pkt)
|
|
{
|
|
struct sk_buff *nskb;
|
|
#ifdef BCMDBG_CTRACE
|
|
struct sk_buff *nskb1, *nskb2;
|
|
#endif
|
|
|
|
if (osh->pub.pkttag)
|
|
OSL_PKTTAG_CLEAR(pkt);
|
|
|
|
/* Decrement the packet counter */
|
|
for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
|
|
atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
|
|
|
|
#ifdef BCMDBG_CTRACE
|
|
for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
|
|
if (PKTISCHAINED(nskb1)) {
|
|
nskb2 = PKTCLINK(nskb1);
|
|
}
|
|
else
|
|
nskb2 = NULL;
|
|
|
|
DEL_CTRACE(osh, nskb1);
|
|
}
|
|
#endif /* BCMDBG_CTRACE */
|
|
}
|
|
return (struct sk_buff *)pkt;
|
|
}
|
|
|
|
/* Convert a native(OS) packet to driver packet.
|
|
* In the process, native packet is destroyed, there is no copying
|
|
* Also, a packettag is zeroed out
|
|
*/
|
|
#ifdef BCMDBG_CTRACE
|
|
void * BCMFASTPATH
|
|
osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
|
|
#else
|
|
void * BCMFASTPATH
|
|
osl_pkt_frmnative(osl_t *osh, void *pkt)
|
|
#endif /* BCMDBG_CTRACE */
|
|
{
|
|
struct sk_buff *nskb;
|
|
#ifdef BCMDBG_CTRACE
|
|
struct sk_buff *nskb1, *nskb2;
|
|
#endif
|
|
|
|
if (osh->pub.pkttag)
|
|
OSL_PKTTAG_CLEAR(pkt);
|
|
|
|
/* Increment the packet counter */
|
|
for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
|
|
atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
|
|
|
|
#ifdef BCMDBG_CTRACE
|
|
for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
|
|
if (PKTISCHAINED(nskb1)) {
|
|
nskb2 = PKTCLINK(nskb1);
|
|
}
|
|
else
|
|
nskb2 = NULL;
|
|
|
|
ADD_CTRACE(osh, nskb1, file, line);
|
|
}
|
|
#endif /* BCMDBG_CTRACE */
|
|
}
|
|
return (void *)pkt;
|
|
}
|
|
|
|
/* Return a new packet. zero out pkttag */
|
|
#ifdef BCMDBG_CTRACE
|
|
void * BCMFASTPATH
|
|
osl_pktget(osl_t *osh, uint len, int line, char *file)
|
|
#else
|
|
void * BCMFASTPATH
|
|
osl_pktget(osl_t *osh, uint len)
|
|
#endif /* BCMDBG_CTRACE */
|
|
{
|
|
struct sk_buff *skb;
|
|
uchar num = 0;
|
|
if (lmtest != FALSE) {
|
|
get_random_bytes(&num, sizeof(uchar));
|
|
if ((num + 1) <= (256 * lmtest / 100))
|
|
return NULL;
|
|
}
|
|
|
|
#ifdef CTFPOOL
|
|
/* Allocate from local pool */
|
|
skb = osl_pktfastget(osh, len);
|
|
if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
|
|
#else /* CTFPOOL */
|
|
if ((skb = osl_alloc_skb(osh, len))) {
|
|
#endif /* CTFPOOL */
|
|
skb->tail += len;
|
|
skb->len += len;
|
|
skb->priority = 0;
|
|
|
|
#ifdef BCMDBG_CTRACE
|
|
ADD_CTRACE(osh, skb, file, line);
|
|
#endif
|
|
atomic_inc(&osh->cmn->pktalloced);
|
|
}
|
|
|
|
return ((void*) skb);
|
|
}
|
|
|
|
#ifdef CTFPOOL
|
|
static inline void
|
|
osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
|
|
{
|
|
ctfpool_t *ctfpool;
|
|
#ifdef CTFPOOL_SPINLOCK
|
|
unsigned long flags;
|
|
#endif /* CTFPOOL_SPINLOCK */
|
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
|
|
skb->tstamp.tv.sec = 0;
|
|
#else
|
|
skb->stamp.tv_sec = 0;
|
|
#endif
|
|
|
|
/* We only need to init the fields that we change */
|
|
skb->dev = NULL;
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
|
|
skb->dst = NULL;
|
|
#endif
|
|
OSL_PKTTAG_CLEAR(skb);
|
|
skb->ip_summed = 0;
|
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
|
|
skb_orphan(skb);
|
|
#else
|
|
skb->destructor = NULL;
|
|
#endif
|
|
|
|
ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
|
|
ASSERT(ctfpool != NULL);
|
|
|
|
/* Add object to the ctfpool */
|
|
CTFPOOL_LOCK(ctfpool, flags);
|
|
skb->next = (struct sk_buff *)ctfpool->head;
|
|
ctfpool->head = (void *)skb;
|
|
|
|
ctfpool->fast_frees++;
|
|
ctfpool->curr_obj++;
|
|
|
|
ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
|
|
CTFPOOL_UNLOCK(ctfpool, flags);
|
|
}
|
|
#endif /* CTFPOOL */
|
|
|
|
/* Free the driver packet. Free the tag if present */
|
|
void BCMFASTPATH
|
|
osl_pktfree(osl_t *osh, void *p, bool send)
|
|
{
|
|
struct sk_buff *skb, *nskb;
|
|
if (osh == NULL)
|
|
return;
|
|
|
|
skb = (struct sk_buff*) p;
|
|
|
|
if (send && osh->pub.tx_fn)
|
|
osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
|
|
|
|
PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
|
|
|
|
/* perversion: we use skb->next to chain multi-skb packets */
|
|
while (skb) {
|
|
nskb = skb->next;
|
|
skb->next = NULL;
|
|
|
|
#ifdef BCMDBG_CTRACE
|
|
DEL_CTRACE(osh, skb);
|
|
#endif
|
|
|
|
|
|
#ifdef CTFPOOL
|
|
if (PKTISFAST(osh, skb)) {
|
|
if (atomic_read(&skb->users) == 1)
|
|
smp_rmb();
|
|
else if (!atomic_dec_and_test(&skb->users))
|
|
goto next_skb;
|
|
osl_pktfastfree(osh, skb);
|
|
} else
|
|
#endif
|
|
{
|
|
dev_kfree_skb_any(skb);
|
|
}
|
|
#ifdef CTFPOOL
|
|
next_skb:
|
|
#endif
|
|
atomic_dec(&osh->cmn->pktalloced);
|
|
skb = nskb;
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
void*
|
|
osl_pktget_static(osl_t *osh, uint len)
|
|
{
|
|
int i = 0;
|
|
struct sk_buff *skb;
|
|
|
|
if (!bcm_static_skb)
|
|
return osl_pktget(osh, len);
|
|
|
|
if (len > DHD_SKB_MAX_BUFSIZE) {
|
|
printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
|
|
return osl_pktget(osh, len);
|
|
}
|
|
|
|
down(&bcm_static_skb->osl_pkt_sem);
|
|
|
|
if (len <= DHD_SKB_1PAGE_BUFSIZE) {
|
|
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
|
|
if (bcm_static_skb->pkt_use[i] == 0)
|
|
break;
|
|
}
|
|
|
|
if (i != STATIC_PKT_MAX_NUM) {
|
|
bcm_static_skb->pkt_use[i] = 1;
|
|
|
|
skb = bcm_static_skb->skb_4k[i];
|
|
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
|
skb_set_tail_pointer(skb, len);
|
|
#else
|
|
skb->tail = skb->data + len;
|
|
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
|
|
skb->len = len;
|
|
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
return skb;
|
|
}
|
|
}
|
|
|
|
if (len <= DHD_SKB_2PAGE_BUFSIZE) {
|
|
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
|
|
if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM]
|
|
== 0)
|
|
break;
|
|
}
|
|
|
|
if (i != STATIC_PKT_MAX_NUM) {
|
|
bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1;
|
|
skb = bcm_static_skb->skb_8k[i];
|
|
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
|
skb_set_tail_pointer(skb, len);
|
|
#else
|
|
skb->tail = skb->data + len;
|
|
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
|
|
skb->len = len;
|
|
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
return skb;
|
|
}
|
|
}
|
|
|
|
#if defined(ENHANCED_STATIC_BUF)
|
|
if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) {
|
|
bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1;
|
|
|
|
skb = bcm_static_skb->skb_16k;
|
|
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
|
skb_set_tail_pointer(skb, len);
|
|
#else
|
|
skb->tail = skb->data + len;
|
|
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
|
|
skb->len = len;
|
|
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
return skb;
|
|
}
|
|
#endif /* ENHANCED_STATIC_BUF */
|
|
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
printk("%s: all static pkt in use!\n", __FUNCTION__);
|
|
return osl_pktget(osh, len);
|
|
}
|
|
|
|
void
|
|
osl_pktfree_static(osl_t *osh, void *p, bool send)
|
|
{
|
|
int i;
|
|
if (!bcm_static_skb) {
|
|
osl_pktfree(osh, p, send);
|
|
return;
|
|
}
|
|
|
|
down(&bcm_static_skb->osl_pkt_sem);
|
|
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
|
|
if (p == bcm_static_skb->skb_4k[i]) {
|
|
bcm_static_skb->pkt_use[i] = 0;
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
return;
|
|
}
|
|
}
|
|
|
|
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
|
|
if (p == bcm_static_skb->skb_8k[i]) {
|
|
bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
return;
|
|
}
|
|
}
|
|
#ifdef ENHANCED_STATIC_BUF
|
|
if (p == bcm_static_skb->skb_16k) {
|
|
bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 0;
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
return;
|
|
}
|
|
#endif
|
|
up(&bcm_static_skb->osl_pkt_sem);
|
|
osl_pktfree(osh, p, send);
|
|
}
|
|
|
|
#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
|
|
void*
|
|
osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align_bits,
|
|
uint *alloced, dmaaddr_t *pap, uint16 idx)
|
|
{
|
|
void *va = NULL;
|
|
uint16 align = (1 << align_bits);
|
|
uint16 flow_id = RINGID_TO_FLOWID(idx);
|
|
unsigned long flags;
|
|
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
|
|
if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
|
|
size += align;
|
|
|
|
if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) {
|
|
printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id);
|
|
return osl_dma_alloc_consistent(osh, size, align_bits,
|
|
alloced, pap);
|
|
}
|
|
|
|
if (!bcm_static_flowring) {
|
|
printk("%s: bcm_static_flowring is not initialized\n",
|
|
__FUNCTION__);
|
|
return osl_dma_alloc_consistent(osh, size, align_bits,
|
|
alloced, pap);
|
|
}
|
|
|
|
if (size > STATIC_BUF_FLOWRING_SIZE) {
|
|
printk("%s: attempt to allocate huge packet, size=%d\n",
|
|
__FUNCTION__, size);
|
|
return osl_dma_alloc_consistent(osh, size, align_bits,
|
|
alloced, pap);
|
|
}
|
|
|
|
*alloced = size;
|
|
|
|
spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags);
|
|
if (bcm_static_flowring->buf_use[flow_id]) {
|
|
printk("%s: flowring %d is already alloced\n",
|
|
__FUNCTION__, flow_id);
|
|
spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
|
|
return NULL;
|
|
}
|
|
|
|
va = bcm_static_flowring->buf_ptr[flow_id];
|
|
if (va) {
|
|
*pap = (ulong)__virt_to_phys((ulong)va);
|
|
bcm_static_flowring->buf_use[flow_id] = 1;
|
|
}
|
|
spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
|
|
|
|
return va;
|
|
}
|
|
|
|
void
|
|
osl_dma_free_consistent_static(osl_t *osh, void *va, uint size,
|
|
dmaaddr_t pa, uint16 idx)
|
|
{
|
|
uint16 flow_id = RINGID_TO_FLOWID(idx);
|
|
unsigned long flags;
|
|
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
|
|
if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) {
|
|
printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id);
|
|
return osl_dma_free_consistent(osh, va, size, pa);
|
|
}
|
|
|
|
if (!bcm_static_flowring) {
|
|
printk("%s: bcm_static_flowring is not initialized\n",
|
|
__FUNCTION__);
|
|
return osl_dma_free_consistent(osh, va, size, pa);
|
|
}
|
|
|
|
spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags);
|
|
if (bcm_static_flowring->buf_use[flow_id]) {
|
|
bcm_static_flowring->buf_use[flow_id] = 0;
|
|
} else {
|
|
printk("%s: flowring %d is already freed\n",
|
|
__FUNCTION__, flow_id);
|
|
}
|
|
spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
|
|
}
|
|
#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
|
|
uint32
|
|
osl_pci_read_config(osl_t *osh, uint offset, uint size)
|
|
{
|
|
uint val = 0;
|
|
uint retry = PCI_CFG_RETRY;
|
|
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
|
|
/* only 4byte access supported */
|
|
ASSERT(size == 4);
|
|
|
|
do {
|
|
pci_read_config_dword(osh->pdev, offset, &val);
|
|
if (val != 0xffffffff)
|
|
break;
|
|
} while (retry--);
|
|
|
|
|
|
return (val);
|
|
}
|
|
|
|
void
|
|
osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
|
|
{
|
|
uint retry = PCI_CFG_RETRY;
|
|
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
|
|
/* only 4byte access supported */
|
|
ASSERT(size == 4);
|
|
|
|
do {
|
|
pci_write_config_dword(osh->pdev, offset, val);
|
|
if (offset != PCI_BAR0_WIN)
|
|
break;
|
|
if (osl_pci_read_config(osh, offset, size) == val)
|
|
break;
|
|
} while (retry--);
|
|
|
|
}
|
|
|
|
/* return bus # for the pci device pointed by osh->pdev */
|
|
uint
|
|
osl_pci_bus(osl_t *osh)
|
|
{
|
|
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
|
|
|
|
#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
|
|
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
|
|
#else
|
|
return ((struct pci_dev *)osh->pdev)->bus->number;
|
|
#endif
|
|
}
|
|
|
|
/* return slot # for the pci device pointed by osh->pdev */
|
|
uint
|
|
osl_pci_slot(osl_t *osh)
|
|
{
|
|
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
|
|
|
|
#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
|
|
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
|
|
#else
|
|
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
|
|
#endif
|
|
}
|
|
|
|
/* return domain # for the pci device pointed by osh->pdev */
|
|
uint
|
|
osl_pcie_domain(osl_t *osh)
|
|
{
|
|
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
|
|
|
|
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
|
|
}
|
|
|
|
/* return bus # for the pci device pointed by osh->pdev */
|
|
uint
|
|
osl_pcie_bus(osl_t *osh)
|
|
{
|
|
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
|
|
|
|
return ((struct pci_dev *)osh->pdev)->bus->number;
|
|
}
|
|
|
|
/* return the pci device pointed by osh->pdev */
|
|
struct pci_dev *
|
|
osl_pci_device(osl_t *osh)
|
|
{
|
|
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
|
|
|
|
return osh->pdev;
|
|
}
|
|
|
|
static void
|
|
osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
|
|
{
|
|
}
|
|
|
|
void
|
|
osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
|
|
{
|
|
osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
|
|
}
|
|
|
|
void
|
|
osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
|
|
{
|
|
osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
|
|
}
|
|
|
|
void *
|
|
osl_malloc(osl_t *osh, uint size)
|
|
{
|
|
void *addr;
|
|
gfp_t flags;
|
|
|
|
/* only ASSERT if osh is defined */
|
|
if (osh)
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
if (bcm_static_buf)
|
|
{
|
|
int i = 0;
|
|
if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
|
|
{
|
|
down(&bcm_static_buf->static_sem);
|
|
|
|
for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
|
|
{
|
|
if (bcm_static_buf->buf_use[i] == 0)
|
|
break;
|
|
}
|
|
|
|
if (i == STATIC_BUF_MAX_NUM)
|
|
{
|
|
up(&bcm_static_buf->static_sem);
|
|
printk("all static buff in use!\n");
|
|
goto original;
|
|
}
|
|
|
|
bcm_static_buf->buf_use[i] = 1;
|
|
up(&bcm_static_buf->static_sem);
|
|
|
|
bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
|
|
if (osh)
|
|
atomic_add(size, &osh->cmn->malloced);
|
|
|
|
return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
|
|
}
|
|
}
|
|
original:
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
|
|
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
|
|
if ((addr = kmalloc(size, flags)) == NULL) {
|
|
if (osh)
|
|
osh->failed++;
|
|
return (NULL);
|
|
}
|
|
if (osh && osh->cmn)
|
|
atomic_add(size, &osh->cmn->malloced);
|
|
|
|
return (addr);
|
|
}
|
|
|
|
void *
|
|
osl_mallocz(osl_t *osh, uint size)
|
|
{
|
|
void *ptr;
|
|
|
|
ptr = osl_malloc(osh, size);
|
|
|
|
if (ptr != NULL) {
|
|
bzero(ptr, size);
|
|
}
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void
|
|
osl_mfree(osl_t *osh, void *addr, uint size)
|
|
{
|
|
#ifdef CONFIG_DHD_USE_STATIC_BUF
|
|
if (bcm_static_buf)
|
|
{
|
|
if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
|
|
<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
|
|
{
|
|
int buf_idx = 0;
|
|
|
|
buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
|
|
|
|
down(&bcm_static_buf->static_sem);
|
|
bcm_static_buf->buf_use[buf_idx] = 0;
|
|
up(&bcm_static_buf->static_sem);
|
|
|
|
if (osh && osh->cmn) {
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
atomic_sub(size, &osh->cmn->malloced);
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
#endif /* CONFIG_DHD_USE_STATIC_BUF */
|
|
if (osh && osh->cmn) {
|
|
ASSERT(osh->magic == OS_HANDLE_MAGIC);
|
|
|
|
ASSERT(size <= osl_malloced(osh));
|
|
|
|
atomic_sub(size, &osh->cmn->malloced);
|
|
}
|
|
kfree(addr);
|
|
}
|
|
|
|
uint
|
|
osl_check_memleak(osl_t *osh)
|
|
{
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
if (atomic_read(&osh->cmn->refcount) == 1)
|
|
return (atomic_read(&osh->cmn->malloced));
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
uint
|
|
osl_malloced(osl_t *osh)
|
|
{
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
return (atomic_read(&osh->cmn->malloced));
|
|
}
|
|
|
|
uint
|
|
osl_malloc_failed(osl_t *osh)
|
|
{
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
return (osh->failed);
|
|
}
|
|
|
|
|
|
uint
|
|
osl_dma_consistent_align(void)
|
|
{
|
|
return (PAGE_SIZE);
|
|
}
|
|
|
|
void*
|
|
osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
|
|
{
|
|
void *va;
|
|
uint16 align = (1 << align_bits);
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
|
|
if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
|
|
size += align;
|
|
*alloced = size;
|
|
|
|
#ifndef BCM_SECURE_DMA
|
|
{
|
|
dma_addr_t pap_lin;
|
|
struct pci_dev *hwdev = osh->pdev;
|
|
#ifdef PCIE_TX_DEFERRAL
|
|
va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_KERNEL);
|
|
#else
|
|
va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_ATOMIC);
|
|
#endif
|
|
#ifdef BCMDMA64OSL
|
|
PHYSADDRPTRLOSET(pap, (uint32)(pap_lin));
|
|
PHYSADDRPTRHISET(pap, (uint32)(pap_lin >> 32));
|
|
#else
|
|
*pap = (dmaaddr_t)pap_lin;
|
|
#endif
|
|
}
|
|
#else
|
|
va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
|
|
#endif /* BCM_SECURE_DMA */
|
|
return va;
|
|
}
|
|
|
|
void
|
|
osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
|
|
{
|
|
#ifndef BCM_SECURE_DMA
|
|
#ifdef BCMDMA64OSL
|
|
dma_addr_t physaddr = ((dma_addr_t)PHYSADDRHI(pa) << 32) |
|
|
((dma_addr_t)PHYSADDRLO(pa));
|
|
#endif /* BCMDMA64OSL */
|
|
|
|
struct pci_dev *hwdev = osh->pdev;
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
|
|
#ifdef BCMDMA64OSL
|
|
dma_free_coherent(&hwdev->dev, size, va, physaddr);
|
|
#else
|
|
dma_free_coherent(&hwdev->dev, size, va, (dma_addr_t)pa);
|
|
#endif /* BCMDMA64OSL */
|
|
|
|
#else
|
|
osl_sec_dma_free_consistent(osh, va, size, pa);
|
|
#endif /* BCM_SECURE_DMA */
|
|
}
|
|
|
|
dmaaddr_t BCMFASTPATH
|
|
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
|
|
{
|
|
int dir;
|
|
dma_addr_t physaddr;
|
|
#ifdef BCMDMA64OSL
|
|
dmaaddr_t dmaaddr;
|
|
#endif
|
|
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
|
|
|
|
#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
|
|
if (dmah != NULL) {
|
|
int32 nsegs, i, totsegs = 0, totlen = 0;
|
|
struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
|
|
struct sk_buff *skb;
|
|
for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
|
|
sg = &_sg[totsegs];
|
|
if (skb_is_nonlinear(skb)) {
|
|
nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
|
|
ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
|
|
pci_map_sg(osh->pdev, sg, nsegs, dir);
|
|
} else {
|
|
nsegs = 1;
|
|
ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
|
|
sg->page_link = 0;
|
|
sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
|
|
pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
|
|
}
|
|
totsegs += nsegs;
|
|
totlen += PKTLEN(osh, skb);
|
|
}
|
|
dmah->nsegs = totsegs;
|
|
dmah->origsize = totlen;
|
|
for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
|
|
dmah->segs[i].addr = sg_phys(sg);
|
|
dmah->segs[i].length = sg->length;
|
|
}
|
|
return dmah->segs[0].addr;
|
|
}
|
|
#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
|
|
|
|
physaddr = pci_map_single(osh->pdev, va, size, dir);
|
|
#ifdef BCMDMA64OSL
|
|
PHYSADDRLOSET(dmaaddr, (uint32) physaddr);
|
|
PHYSADDRHISET(dmaaddr, (uint32) (physaddr >> 32));
|
|
return dmaaddr;
|
|
#else
|
|
return physaddr;
|
|
#endif
|
|
}
|
|
|
|
void BCMFASTPATH
|
|
osl_dma_unmap(osl_t *osh, dmaaddr_t physaddr, uint size, int direction)
|
|
{
|
|
int dir;
|
|
dma_addr_t pa;
|
|
#ifdef BCMDMA64OSL
|
|
pa = ((dma_addr_t) PHYSADDRHI(physaddr)) << 32 |
|
|
(dma_addr_t)PHYSADDRLO(physaddr);
|
|
#else
|
|
pa = (dma_addr_t) physaddr;
|
|
#endif /* BCMDMA64OSL */
|
|
|
|
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
|
|
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
|
|
pci_unmap_single(osh->pdev, pa, size, dir);
|
|
}
|
|
|
|
|
|
#if defined(BCMASSERT_LOG)
|
|
void
|
|
osl_assert(const char *exp, const char *file, int line)
|
|
{
|
|
char tempbuf[256];
|
|
const char *basename;
|
|
|
|
basename = strrchr(file, '/');
|
|
/* skip the '/' */
|
|
if (basename)
|
|
basename++;
|
|
|
|
if (!basename)
|
|
basename = file;
|
|
|
|
#ifdef BCMASSERT_LOG
|
|
snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
|
|
exp, basename, line);
|
|
printk("%s", tempbuf);
|
|
#endif /* BCMASSERT_LOG */
|
|
|
|
|
|
}
|
|
#endif
|
|
|
|
void
|
|
osl_delay(uint usec)
|
|
{
|
|
uint d;
|
|
|
|
while (usec > 0) {
|
|
d = MIN(usec, 1000);
|
|
udelay(d);
|
|
usec -= d;
|
|
}
|
|
}
|
|
|
|
void
|
|
osl_sleep(uint ms)
|
|
{
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
|
|
if (ms < 20)
|
|
usleep_range(ms*1000, ms*1000 + 1000);
|
|
else
|
|
#endif
|
|
msleep(ms);
|
|
}
|
|
|
|
|
|
|
|
/* Clone a packet.
|
|
* The pkttag contents are NOT cloned.
|
|
*/
|
|
#ifdef BCMDBG_CTRACE
|
|
void *
|
|
osl_pktdup(osl_t *osh, void *skb, int line, char *file)
|
|
#else
|
|
void *
|
|
osl_pktdup(osl_t *osh, void *skb)
|
|
#endif /* BCMDBG_CTRACE */
|
|
{
|
|
void * p;
|
|
|
|
ASSERT(!PKTISCHAINED(skb));
|
|
|
|
/* clear the CTFBUF flag if set and map the rest of the buffer
|
|
* before cloning.
|
|
*/
|
|
PKTCTFMAP(osh, skb);
|
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
|
|
if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
|
|
#else
|
|
if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
|
|
#endif
|
|
return NULL;
|
|
|
|
#ifdef CTFPOOL
|
|
if (PKTISFAST(osh, skb)) {
|
|
ctfpool_t *ctfpool;
|
|
|
|
/* if the buffer allocated from ctfpool is cloned then
|
|
* we can't be sure when it will be freed. since there
|
|
* is a chance that we will be losing a buffer
|
|
* from our pool, we increment the refill count for the
|
|
* object to be alloced later.
|
|
*/
|
|
ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
|
|
ASSERT(ctfpool != NULL);
|
|
PKTCLRFAST(osh, p);
|
|
PKTCLRFAST(osh, skb);
|
|
ctfpool->refills++;
|
|
}
|
|
#endif /* CTFPOOL */
|
|
|
|
/* Clear PKTC context */
|
|
PKTSETCLINK(p, NULL);
|
|
PKTCCLRFLAGS(p);
|
|
PKTCSETCNT(p, 1);
|
|
PKTCSETLEN(p, PKTLEN(osh, skb));
|
|
|
|
/* skb_clone copies skb->cb.. we don't want that */
|
|
if (osh->pub.pkttag)
|
|
OSL_PKTTAG_CLEAR(p);
|
|
|
|
/* Increment the packet counter */
|
|
atomic_inc(&osh->cmn->pktalloced);
|
|
#ifdef BCMDBG_CTRACE
|
|
ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
|
|
#endif
|
|
return (p);
|
|
}
|
|
|
|
#ifdef BCMDBG_CTRACE
|
|
int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
|
|
{
|
|
unsigned long flags;
|
|
struct sk_buff *skb;
|
|
int ck = FALSE;
|
|
|
|
spin_lock_irqsave(&osh->ctrace_lock, flags);
|
|
|
|
list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
|
|
if (pkt == skb) {
|
|
ck = TRUE;
|
|
break;
|
|
}
|
|
}
|
|
|
|
spin_unlock_irqrestore(&osh->ctrace_lock, flags);
|
|
return ck;
|
|
}
|
|
|
|
void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
|
|
{
|
|
unsigned long flags;
|
|
struct sk_buff *skb;
|
|
int idx = 0;
|
|
int i, j;
|
|
|
|
spin_lock_irqsave(&osh->ctrace_lock, flags);
|
|
|
|
if (b != NULL)
|
|
bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
|
|
else
|
|
printk(" Total %d sbk not free\n", osh->ctrace_num);
|
|
|
|
list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
|
|
if (b != NULL)
|
|
bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
|
|
else
|
|
printk("[%d] skb %p:\n", ++idx, skb);
|
|
|
|
for (i = 0; i < skb->ctrace_count; i++) {
|
|
j = (skb->ctrace_start + i) % CTRACE_NUM;
|
|
if (b != NULL)
|
|
bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]);
|
|
else
|
|
printk(" [%s(%d)]\n", skb->func[j], skb->line[j]);
|
|
}
|
|
if (b != NULL)
|
|
bcm_bprintf(b, "\n");
|
|
else
|
|
printk("\n");
|
|
}
|
|
|
|
spin_unlock_irqrestore(&osh->ctrace_lock, flags);
|
|
|
|
return;
|
|
}
|
|
#endif /* BCMDBG_CTRACE */
|
|
|
|
|
|
/*
|
|
* OSLREGOPS specifies the use of osl_XXX routines to be used for register access
|
|
*/
|
|
|
|
/*
|
|
* BINOSL selects the slightly slower function-call-based binary compatible osl.
|
|
*/
|
|
|
|
uint
|
|
osl_pktalloced(osl_t *osh)
|
|
{
|
|
if (atomic_read(&osh->cmn->refcount) == 1)
|
|
return (atomic_read(&osh->cmn->pktalloced));
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
uint32
|
|
osl_rand(void)
|
|
{
|
|
uint32 rand;
|
|
|
|
get_random_bytes(&rand, sizeof(rand));
|
|
|
|
return rand;
|
|
}
|
|
|
|
/* Linux Kernel: File Operations: start */
|
|
void *
|
|
osl_os_open_image(char *filename)
|
|
{
|
|
struct file *fp;
|
|
|
|
fp = filp_open(filename, O_RDONLY, 0);
|
|
/*
|
|
* 2.6.11 (FC4) supports filp_open() but later revs don't?
|
|
* Alternative:
|
|
* fp = open_namei(AT_FDCWD, filename, O_RD, 0);
|
|
* ???
|
|
*/
|
|
if (IS_ERR(fp))
|
|
fp = NULL;
|
|
|
|
return fp;
|
|
}
|
|
|
|
int
|
|
osl_os_get_image_block(char *buf, int len, void *image)
|
|
{
|
|
struct file *fp = (struct file *)image;
|
|
int rdlen;
|
|
|
|
if (!image)
|
|
return 0;
|
|
|
|
#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 0))
|
|
rdlen = kernel_read(fp, buf, len, &fp->f_pos);
|
|
#else
|
|
rdlen = kernel_read(fp, fp->f_pos, buf, len);
|
|
if (rdlen > 0)
|
|
fp->f_pos += rdlen;
|
|
#endif
|
|
|
|
return rdlen;
|
|
}
|
|
|
|
void
|
|
osl_os_close_image(void *image)
|
|
{
|
|
if (image)
|
|
filp_close((struct file *)image, NULL);
|
|
}
|
|
|
|
int
|
|
osl_os_image_size(void *image)
|
|
{
|
|
int len = 0, curroffset;
|
|
|
|
if (image) {
|
|
/* store the current offset */
|
|
curroffset = generic_file_llseek(image, 0, 1);
|
|
/* goto end of file to get length */
|
|
len = generic_file_llseek(image, 0, 2);
|
|
/* restore back the offset */
|
|
generic_file_llseek(image, curroffset, 0);
|
|
}
|
|
return len;
|
|
}
|
|
|
|
/* Linux Kernel: File Operations: end */
|
|
|
|
|
|
/* APIs to set/get specific quirks in OSL layer */
|
|
void
|
|
osl_flag_set(osl_t *osh, uint32 mask)
|
|
{
|
|
osh->flags |= mask;
|
|
}
|
|
|
|
bool
|
|
osl_is_flag_set(osl_t *osh, uint32 mask)
|
|
{
|
|
return (osh->flags & mask);
|
|
}
|
|
#ifdef BCM_SECURE_DMA
|
|
|
|
static void
|
|
osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn)
|
|
{
|
|
int ret;
|
|
|
|
#if defined(__ARM_ARCH_7A__)
|
|
if (regn == CONT_ARMREGION) {
|
|
ret = osl_sec_dma_alloc_contig_mem(osh, memsize, regn);
|
|
if (ret != BCME_OK)
|
|
printk("linux_osl.c: CMA memory access failed\n");
|
|
}
|
|
#endif
|
|
/* implement the MIPS Here */
|
|
}
|
|
|
|
static int
|
|
osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn)
|
|
{
|
|
u64 addr;
|
|
|
|
printk("linux_osl.c: The value of cma mem block size = %ld\n", memsize);
|
|
osh->cma = cma_dev_get_cma_dev(regn);
|
|
printk("The value of cma = %p\n", osh->cma);
|
|
if (!osh->cma) {
|
|
printk("linux_osl.c:contig_region index is invalid\n");
|
|
return BCME_ERROR;
|
|
}
|
|
if (cma_dev_get_mem(osh->cma, &addr, (u32)memsize, SEC_DMA_ALIGN) < 0) {
|
|
printk("linux_osl.c: contiguous memory block allocation failure\n");
|
|
return BCME_ERROR;
|
|
}
|
|
osh->contig_base_alloc = (phys_addr_t)addr;
|
|
osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
|
|
printk("contig base alloc=%lx \n", (ulong)osh->contig_base_alloc);
|
|
|
|
return BCME_OK;
|
|
}
|
|
|
|
static void
|
|
osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn)
|
|
{
|
|
int ret;
|
|
|
|
ret = cma_dev_put_mem(osh->cma, (u64)osh->contig_base, memsize);
|
|
if (ret)
|
|
printf("%s contig base free failed\n", __FUNCTION__);
|
|
}
|
|
|
|
static void *
|
|
osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
|
|
{
|
|
|
|
struct page **map;
|
|
int order, i;
|
|
void *addr = NULL;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
order = get_order(size);
|
|
|
|
map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
|
|
|
|
if (map == NULL)
|
|
return NULL;
|
|
|
|
for (i = 0; i < (size >> PAGE_SHIFT); i++)
|
|
map[i] = page + i;
|
|
|
|
if (iscache) {
|
|
addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
|
|
if (isdecr) {
|
|
osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
|
|
g_contig_delta_va_pa = osh->contig_delta_va_pa;
|
|
}
|
|
}
|
|
else {
|
|
|
|
#if defined(__ARM_ARCH_7A__)
|
|
addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
|
|
pgprot_noncached(__pgprot(PAGE_KERNEL)));
|
|
#endif
|
|
if (isdecr) {
|
|
osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page));
|
|
g_contig_delta_va_pa = osh->contig_delta_va_pa;
|
|
}
|
|
}
|
|
|
|
kfree(map);
|
|
return (void *)addr;
|
|
}
|
|
|
|
static void
|
|
osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
|
|
{
|
|
vunmap(contig_base_va);
|
|
}
|
|
|
|
static void
|
|
osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
|
|
{
|
|
if (sec_list_base)
|
|
kfree(sec_list_base);
|
|
}
|
|
|
|
static void
|
|
osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
|
|
{
|
|
int i;
|
|
sec_mem_elem_t *sec_mem_elem;
|
|
|
|
if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
|
|
|
|
*list = sec_mem_elem;
|
|
bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
|
|
for (i = 0; i < max-1; i++) {
|
|
sec_mem_elem->next = (sec_mem_elem + 1);
|
|
sec_mem_elem->size = mbsize;
|
|
sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc;
|
|
sec_mem_elem->vac = osh->contig_base_alloc_va;
|
|
|
|
osh->contig_base_alloc += mbsize;
|
|
osh->contig_base_alloc_va += mbsize;
|
|
|
|
sec_mem_elem = sec_mem_elem + 1;
|
|
}
|
|
sec_mem_elem->next = NULL;
|
|
sec_mem_elem->size = mbsize;
|
|
sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc;
|
|
sec_mem_elem->vac = osh->contig_base_alloc_va;
|
|
|
|
osh->contig_base_alloc += mbsize;
|
|
osh->contig_base_alloc_va += mbsize;
|
|
|
|
}
|
|
else
|
|
printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
|
|
}
|
|
|
|
|
|
static sec_mem_elem_t * BCMFASTPATH
|
|
osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
|
|
struct sec_cma_info *ptr_cma_info, uint offset)
|
|
{
|
|
sec_mem_elem_t *sec_mem_elem = NULL;
|
|
|
|
if (size <= 512 && osh->sec_list_512) {
|
|
sec_mem_elem = osh->sec_list_512;
|
|
osh->sec_list_512 = sec_mem_elem->next;
|
|
}
|
|
else if (size <= 2048 && osh->sec_list_2048) {
|
|
sec_mem_elem = osh->sec_list_2048;
|
|
osh->sec_list_2048 = sec_mem_elem->next;
|
|
}
|
|
else if (osh->sec_list_4096) {
|
|
sec_mem_elem = osh->sec_list_4096;
|
|
osh->sec_list_4096 = sec_mem_elem->next;
|
|
} else {
|
|
printf("%s No matching Pool available size=%d \n", __FUNCTION__, size);
|
|
return NULL;
|
|
}
|
|
|
|
if (sec_mem_elem != NULL) {
|
|
sec_mem_elem->next = NULL;
|
|
|
|
if (ptr_cma_info->sec_alloc_list_tail) {
|
|
ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
|
|
}
|
|
|
|
ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
|
|
if (ptr_cma_info->sec_alloc_list == NULL)
|
|
ptr_cma_info->sec_alloc_list = sec_mem_elem;
|
|
}
|
|
return sec_mem_elem;
|
|
}
|
|
|
|
static void BCMFASTPATH
|
|
osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
|
|
{
|
|
sec_mem_elem->dma_handle = 0x0;
|
|
sec_mem_elem->va = NULL;
|
|
|
|
if (sec_mem_elem->size == 512) {
|
|
sec_mem_elem->next = osh->sec_list_512;
|
|
osh->sec_list_512 = sec_mem_elem;
|
|
}
|
|
else if (sec_mem_elem->size == 2048) {
|
|
sec_mem_elem->next = osh->sec_list_2048;
|
|
osh->sec_list_2048 = sec_mem_elem;
|
|
}
|
|
else if (sec_mem_elem->size == 4096) {
|
|
sec_mem_elem->next = osh->sec_list_4096;
|
|
osh->sec_list_4096 = sec_mem_elem;
|
|
}
|
|
else
|
|
printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size);
|
|
}
|
|
|
|
|
|
static sec_mem_elem_t * BCMFASTPATH
|
|
osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
|
|
{
|
|
sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
|
|
sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
|
|
|
|
if (sec_mem_elem->dma_handle == dma_handle) {
|
|
|
|
ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
|
|
|
|
if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
|
|
ptr_cma_info->sec_alloc_list_tail = NULL;
|
|
ASSERT(ptr_cma_info->sec_alloc_list == NULL);
|
|
}
|
|
|
|
return sec_mem_elem;
|
|
}
|
|
|
|
while (sec_mem_elem != NULL) {
|
|
|
|
if (sec_mem_elem->dma_handle == dma_handle) {
|
|
|
|
sec_prv_elem->next = sec_mem_elem->next;
|
|
if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
|
|
ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
|
|
|
|
return sec_mem_elem;
|
|
}
|
|
sec_prv_elem = sec_mem_elem;
|
|
sec_mem_elem = sec_mem_elem->next;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static sec_mem_elem_t *
|
|
osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
|
|
{
|
|
sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
|
|
|
|
if (sec_mem_elem) {
|
|
|
|
ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
|
|
|
|
if (ptr_cma_info->sec_alloc_list == NULL)
|
|
ptr_cma_info->sec_alloc_list_tail = NULL;
|
|
|
|
return sec_mem_elem;
|
|
|
|
} else
|
|
return NULL;
|
|
}
|
|
|
|
static void * BCMFASTPATH
|
|
osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
|
|
{
|
|
return ptr_cma_info->sec_alloc_list_tail;
|
|
}
|
|
|
|
dma_addr_t BCMFASTPATH
|
|
osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
|
|
hnddma_seg_map_t *dmah, void *ptr_cma_info)
|
|
{
|
|
sec_mem_elem_t *sec_mem_elem;
|
|
struct page *pa_cma_page;
|
|
uint loffset;
|
|
void *vaorig = va + size;
|
|
dma_addr_t dma_handle = 0x0;
|
|
/* packet will be the one added with osl_sec_dma_map() just before this call */
|
|
|
|
sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
|
|
|
|
if (sec_mem_elem && sec_mem_elem->va == vaorig) {
|
|
|
|
pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
|
|
loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
|
|
|
|
dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size,
|
|
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
|
|
|
|
} else {
|
|
printf("%s: error orig va not found va = 0x%p \n",
|
|
__FUNCTION__, vaorig);
|
|
}
|
|
return dma_handle;
|
|
}
|
|
|
|
dma_addr_t BCMFASTPATH
|
|
osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
|
|
hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
|
|
{
|
|
|
|
sec_mem_elem_t *sec_mem_elem;
|
|
struct page *pa_cma_page;
|
|
void *pa_cma_kmap_va = NULL;
|
|
int *fragva;
|
|
uint buflen = 0;
|
|
struct sk_buff *skb;
|
|
dma_addr_t dma_handle = 0x0;
|
|
uint loffset;
|
|
int i = 0;
|
|
|
|
sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
|
|
|
|
if (sec_mem_elem == NULL) {
|
|
printk("linux_osl.c: osl_sec_dma_map - cma allocation failed\n");
|
|
return 0;
|
|
}
|
|
sec_mem_elem->va = va;
|
|
sec_mem_elem->direction = direction;
|
|
pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
|
|
|
|
loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
|
|
/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
|
|
* pa_cma_kmap_va += loffset;
|
|
*/
|
|
|
|
pa_cma_kmap_va = sec_mem_elem->vac;
|
|
|
|
if (direction == DMA_TX) {
|
|
|
|
if (p == NULL) {
|
|
|
|
memcpy(pa_cma_kmap_va+offset, va, size);
|
|
buflen = size;
|
|
}
|
|
else {
|
|
for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
|
|
if (skb_is_nonlinear(skb)) {
|
|
|
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
|
|
fragva = kmap_atomic(skb_frag_page(f));
|
|
memcpy((pa_cma_kmap_va+offset+buflen),
|
|
(fragva + f->page_offset), skb_frag_size(f));
|
|
kunmap_atomic(fragva);
|
|
buflen += skb_frag_size(f);
|
|
}
|
|
}
|
|
else {
|
|
memcpy((pa_cma_kmap_va+offset+buflen), skb->data, skb->len);
|
|
buflen += skb->len;
|
|
}
|
|
}
|
|
|
|
}
|
|
if (dmah) {
|
|
dmah->nsegs = 1;
|
|
dmah->origsize = buflen;
|
|
}
|
|
}
|
|
|
|
else if (direction == DMA_RX)
|
|
{
|
|
buflen = size;
|
|
if ((p != NULL) && (dmah != NULL)) {
|
|
dmah->nsegs = 1;
|
|
dmah->origsize = buflen;
|
|
}
|
|
}
|
|
if (direction == DMA_RX || direction == DMA_TX) {
|
|
|
|
dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset+offset, buflen,
|
|
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
|
|
|
|
}
|
|
if (dmah) {
|
|
dmah->segs[0].addr = dma_handle;
|
|
dmah->segs[0].length = buflen;
|
|
}
|
|
sec_mem_elem->dma_handle = dma_handle;
|
|
/* kunmap_atomic(pa_cma_kmap_va-loffset); */
|
|
return dma_handle;
|
|
}
|
|
|
|
dma_addr_t BCMFASTPATH
|
|
osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
|
|
{
|
|
|
|
struct page *pa_cma_page;
|
|
phys_addr_t pa_cma;
|
|
dma_addr_t dma_handle = 0x0;
|
|
uint loffset;
|
|
|
|
pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa);
|
|
pa_cma_page = phys_to_page(pa_cma);
|
|
loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
|
|
|
|
dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size,
|
|
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
|
|
|
|
return dma_handle;
|
|
|
|
}
|
|
|
|
void BCMFASTPATH
|
|
osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
|
|
void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
|
|
{
|
|
sec_mem_elem_t *sec_mem_elem;
|
|
struct page *pa_cma_page;
|
|
void *pa_cma_kmap_va = NULL;
|
|
uint buflen = 0;
|
|
dma_addr_t pa_cma;
|
|
void *va;
|
|
uint loffset = 0;
|
|
int read_count = 0;
|
|
BCM_REFERENCE(buflen);
|
|
BCM_REFERENCE(read_count);
|
|
|
|
sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
|
|
if (sec_mem_elem == NULL) {
|
|
printf("%s sec_mem_elem is NULL and dma_handle =0x%lx and dir=%d\n",
|
|
__FUNCTION__, (ulong)dma_handle, direction);
|
|
return;
|
|
}
|
|
|
|
va = sec_mem_elem->va;
|
|
va -= offset;
|
|
pa_cma = sec_mem_elem->pa_cma;
|
|
|
|
pa_cma_page = phys_to_page(pa_cma);
|
|
loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
|
|
|
|
if (direction == DMA_RX) {
|
|
|
|
if (p == NULL) {
|
|
|
|
/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
|
|
* pa_cma_kmap_va += loffset;
|
|
*/
|
|
|
|
pa_cma_kmap_va = sec_mem_elem->vac;
|
|
|
|
dma_unmap_page(osh->cma->dev, pa_cma, size, DMA_FROM_DEVICE);
|
|
memcpy(va, pa_cma_kmap_va, size);
|
|
/* kunmap_atomic(pa_cma_kmap_va); */
|
|
}
|
|
} else {
|
|
dma_unmap_page(osh->cma->dev, pa_cma, size+offset, DMA_TO_DEVICE);
|
|
}
|
|
|
|
osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
|
|
}
|
|
|
|
void
|
|
osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
|
|
{
|
|
|
|
sec_mem_elem_t *sec_mem_elem;
|
|
|
|
sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
|
|
|
|
while (sec_mem_elem != NULL) {
|
|
|
|
dma_unmap_page(osh->cma->dev, sec_mem_elem->pa_cma, sec_mem_elem->size,
|
|
sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
|
|
|
|
sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
|
|
}
|
|
}
|
|
|
|
static void
|
|
osl_sec_dma_init_consistent(osl_t *osh)
|
|
{
|
|
int i;
|
|
void *temp_va = osh->contig_base_alloc_coherent_va;
|
|
phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
|
|
|
|
for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
|
|
osh->sec_cma_coherent[i].avail = TRUE;
|
|
osh->sec_cma_coherent[i].va = temp_va;
|
|
osh->sec_cma_coherent[i].pa = temp_pa;
|
|
temp_va += SEC_CMA_COHERENT_BLK;
|
|
temp_pa += SEC_CMA_COHERENT_BLK;
|
|
}
|
|
}
|
|
|
|
static void *
|
|
osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
|
|
{
|
|
|
|
void *temp_va = NULL;
|
|
ulong temp_pa = 0;
|
|
int i;
|
|
|
|
if (size > SEC_CMA_COHERENT_BLK) {
|
|
printf("%s unsupported size\n", __FUNCTION__);
|
|
return NULL;
|
|
}
|
|
|
|
for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
|
|
if (osh->sec_cma_coherent[i].avail == TRUE) {
|
|
temp_va = osh->sec_cma_coherent[i].va;
|
|
temp_pa = osh->sec_cma_coherent[i].pa;
|
|
osh->sec_cma_coherent[i].avail = FALSE;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (i == SEC_CMA_COHERENT_MAX)
|
|
printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
|
|
temp_va, (ulong)temp_pa, size);
|
|
|
|
*pap = (unsigned long)temp_pa;
|
|
return temp_va;
|
|
}
|
|
|
|
static void
|
|
osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
|
|
{
|
|
int i = 0;
|
|
|
|
for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
|
|
if (osh->sec_cma_coherent[i].va == va) {
|
|
osh->sec_cma_coherent[i].avail = TRUE;
|
|
break;
|
|
}
|
|
}
|
|
if (i == SEC_CMA_COHERENT_MAX)
|
|
printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
|
|
va, (ulong)pa, size);
|
|
}
|
|
|
|
void
|
|
osl_sec_cma_baseaddr_memsize(osl_t *osh, dma_addr_t *cma_baseaddr, uint32 *cma_memsize)
|
|
{
|
|
*cma_baseaddr = osh->contig_base;
|
|
*cma_memsize = CMA_MEMBLOCK;
|
|
}
|
|
|
|
#endif /* BCM_SECURE_DMA */
|
|
|
|
int dhd_mmc_power_save_host(struct mmc_host *host)
|
|
{
|
|
if (!host) {
|
|
printf("%s: host is NULL\n", __FUNCTION__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (host->ios.power_mode == MMC_POWER_OFF)
|
|
return 0;
|
|
|
|
return mmc_power_save_host(host);
|
|
}
|
|
|
|
int dhd_mmc_power_restore_host(struct mmc_host *host)
|
|
{
|
|
if (!host) {
|
|
printf("%s: host is NULL\n", __FUNCTION__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (host->ios.power_mode == MMC_POWER_ON)
|
|
return 0;
|
|
|
|
return mmc_power_restore_host(host);
|
|
}
|