xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_indep_power/linux_osl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux OS Independent Layer
4  *
5  * Copyright (C) 1999-2017, Broadcom Corporation
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: linux_osl.c 680580 2017-01-20 11:49:58Z $
29  */
30 
31 #define LINUX_PORT
32 
33 #include <typedefs.h>
34 #include <bcmendian.h>
35 #include <linuxver.h>
36 #include <bcmdefs.h>
37 
38 
39 #if !defined(STBLINUX)
40 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
41 #include <asm/cacheflush.h>
42 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
43 #endif /* STBLINUX */
44 
45 #include <linux/random.h>
46 
47 #include <osl.h>
48 #include <bcmutils.h>
49 #include <linux/delay.h>
50 #include <linux/vmalloc.h>
51 #include <pcicfg.h>
52 #include <dngl_stats.h>
53 #include <dhd.h>
54 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
55 #include <asm-generic/pci-dma-compat.h>
56 #endif
57 
58 
59 #ifdef BCM_SECURE_DMA
60 #include <linux/module.h>
61 #include <linux/kernel.h>
62 #include <linux/io.h>
63 #include <linux/printk.h>
64 #include <linux/errno.h>
65 #include <linux/mm.h>
66 #include <linux/moduleparam.h>
67 #include <asm/io.h>
68 #include <linux/skbuff.h>
69 #include <stbutils.h>
70 #include <linux/highmem.h>
71 #include <linux/dma-mapping.h>
72 #include <asm/memory.h>
73 #endif /* BCM_SECURE_DMA */
74 
75 #include <linux/fs.h>
76 
77 #if defined(STB)
78 #include <linux/spinlock.h>
79 extern spinlock_t l2x0_reg_lock;
80 #endif
81 
82 #ifdef BCM_OBJECT_TRACE
83 #include <bcmutils.h>
84 #endif /* BCM_OBJECT_TRACE */
85 
86 #define PCI_CFG_RETRY		10
87 
88 #define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
89 #define BCM_MEM_FILENAME_LEN	24		/* Mem. filename length */
90 #define DUMPBUFSZ 1024
91 
92 /* dependancy check */
93 #if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
94 #error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
95 #endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
96 
97 #ifdef CONFIG_DHD_USE_STATIC_BUF
98 #ifdef DHD_USE_STATIC_CTRLBUF
99 #define DHD_SKB_1PAGE_BUFSIZE	(PAGE_SIZE*1)
100 #define DHD_SKB_2PAGE_BUFSIZE	(PAGE_SIZE*2)
101 #define DHD_SKB_4PAGE_BUFSIZE	(PAGE_SIZE*4)
102 
103 #define PREALLOC_FREE_MAGIC	0xFEDC
104 #define PREALLOC_USED_MAGIC	0xFCDE
105 #else
106 #define DHD_SKB_HDRSIZE		336
107 #define DHD_SKB_1PAGE_BUFSIZE	((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
108 #define DHD_SKB_2PAGE_BUFSIZE	((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
109 #define DHD_SKB_4PAGE_BUFSIZE	((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
110 #endif /* DHD_USE_STATIC_CTRLBUF */
111 
112 #define STATIC_BUF_MAX_NUM	16
113 #define STATIC_BUF_SIZE	(PAGE_SIZE*2)
114 #define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
115 
116 typedef struct bcm_static_buf {
117 	spinlock_t static_lock;
118 	unsigned char *buf_ptr;
119 	unsigned char buf_use[STATIC_BUF_MAX_NUM];
120 } bcm_static_buf_t;
121 
122 static bcm_static_buf_t *bcm_static_buf = 0;
123 
124 #ifdef DHD_USE_STATIC_CTRLBUF
125 #define STATIC_PKT_4PAGE_NUM	0
126 #define DHD_SKB_MAX_BUFSIZE	DHD_SKB_2PAGE_BUFSIZE
127 #elif defined(ENHANCED_STATIC_BUF)
128 #define STATIC_PKT_4PAGE_NUM	1
129 #define DHD_SKB_MAX_BUFSIZE	DHD_SKB_4PAGE_BUFSIZE
130 #else
131 #define STATIC_PKT_4PAGE_NUM	0
132 #define DHD_SKB_MAX_BUFSIZE	DHD_SKB_2PAGE_BUFSIZE
133 #endif /* DHD_USE_STATIC_CTRLBUF */
134 
135 #ifdef DHD_USE_STATIC_CTRLBUF
136 #define STATIC_PKT_1PAGE_NUM	0
137 #define STATIC_PKT_2PAGE_NUM	128
138 #else
139 #define STATIC_PKT_1PAGE_NUM	8
140 #define STATIC_PKT_2PAGE_NUM	8
141 #endif /* DHD_USE_STATIC_CTRLBUF */
142 
143 #define STATIC_PKT_1_2PAGE_NUM	\
144 	((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
145 #define STATIC_PKT_MAX_NUM	\
146 	((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
147 
148 typedef struct bcm_static_pkt {
149 #ifdef DHD_USE_STATIC_CTRLBUF
150 	struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
151 	unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
152 	spinlock_t osl_pkt_lock;
153 	uint32 last_allocated_index;
154 #else
155 	struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
156 	struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
157 #ifdef ENHANCED_STATIC_BUF
158 	struct sk_buff *skb_16k;
159 #endif /* ENHANCED_STATIC_BUF */
160 	struct semaphore osl_pkt_sem;
161 #endif /* DHD_USE_STATIC_CTRLBUF */
162 	unsigned char pkt_use[STATIC_PKT_MAX_NUM];
163 } bcm_static_pkt_t;
164 
165 static bcm_static_pkt_t *bcm_static_skb = 0;
166 
167 void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
168 #endif /* CONFIG_DHD_USE_STATIC_BUF */
169 
170 typedef struct bcm_mem_link {
171 	struct bcm_mem_link *prev;
172 	struct bcm_mem_link *next;
173 	uint	size;
174 	int	line;
175 	void 	*osh;
176 	char	file[BCM_MEM_FILENAME_LEN];
177 } bcm_mem_link_t;
178 
179 struct osl_cmn_info {
180 	atomic_t malloced;
181 	atomic_t pktalloced;    /* Number of allocated packet buffers */
182 	spinlock_t dbgmem_lock;
183 	bcm_mem_link_t *dbgmem_list;
184 	bcm_mem_link_t *dbgvmem_list;
185 	spinlock_t pktalloc_lock;
186 	atomic_t refcount; /* Number of references to this shared structure. */
187 };
188 typedef struct osl_cmn_info osl_cmn_t;
189 
190 struct osl_info {
191 	osl_pubinfo_t pub;
192 	uint32  flags;		/* If specific cases to be handled in the OSL */
193 #ifdef CTFPOOL
194 	ctfpool_t *ctfpool;
195 #endif /* CTFPOOL */
196 	uint magic;
197 	void *pdev;
198 	uint failed;
199 	uint bustype;
200 	osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
201 
202 	void *bus_handle;
203 #ifdef BCMDBG_CTRACE
204 	spinlock_t ctrace_lock;
205 	struct list_head ctrace_list;
206 	int ctrace_num;
207 #endif /* BCMDBG_CTRACE */
208 #ifdef	BCM_SECURE_DMA
209 	struct sec_mem_elem *sec_list_4096;
210 	struct sec_mem_elem *sec_list_base_4096;
211 	phys_addr_t  contig_base;
212 	void *contig_base_va;
213 	phys_addr_t  contig_base_alloc;
214 	void *contig_base_alloc_va;
215 	phys_addr_t contig_base_alloc_coherent;
216 	void *contig_base_alloc_coherent_va;
217 	void *contig_base_coherent_va;
218 	void *contig_delta_va_pa;
219 	struct {
220 		phys_addr_t pa;
221 		void *va;
222 		bool avail;
223 	} sec_cma_coherent[SEC_CMA_COHERENT_MAX];
224 	int stb_ext_params;
225 #endif /* BCM_SECURE_DMA */
226 };
227 #ifdef BCM_SECURE_DMA
228 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
229 	bool iscache, bool isdecr);
230 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
231 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
232 	sec_mem_elem_t **list);
233 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
234 	void *sec_list_base);
235 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
236 	int direction, struct sec_cma_info *ptr_cma_info, uint offset);
237 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
238 static void osl_sec_dma_init_consistent(osl_t *osh);
239 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
240 	ulong *pap);
241 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
242 #endif /* BCM_SECURE_DMA */
243 
244 #ifdef BCM_OBJECT_TRACE
245 /* don't clear the first 4 byte that is the pkt sn */
246 #define OSL_PKTTAG_CLEAR(p) \
247 do { \
248 	struct sk_buff *s = (struct sk_buff *)(p); \
249 	ASSERT(OSL_PKTTAG_SZ == 32); \
250 	*(uint32 *)(&s->cb[4]) = 0; \
251 	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
252 	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
253 	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
254 } while (0)
255 #else
256 #define OSL_PKTTAG_CLEAR(p) \
257 do { \
258 	struct sk_buff *s = (struct sk_buff *)(p); \
259 	ASSERT(OSL_PKTTAG_SZ == 32); \
260 	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
261 	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
262 	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
263 	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
264 } while (0)
265 #endif /* BCM_OBJECT_TRACE */
266 
267 /* PCMCIA attribute space access macros */
268 
269 uint32 g_assert_type = 0; /* By Default Kernel Panic */
270 
271 module_param(g_assert_type, int, 0);
272 #ifdef	BCM_SECURE_DMA
273 #define	SECDMA_MODULE_PARAMS	0
274 #define	SECDMA_EXT_FILE	1
275 unsigned long secdma_addr = 0;
276 unsigned long secdma_addr2 = 0;
277 u32 secdma_size = 0;
278 u32 secdma_size2 = 0;
279 module_param(secdma_addr, ulong, 0);
280 module_param(secdma_size, int, 0);
281 module_param(secdma_addr2, ulong, 0);
282 module_param(secdma_size2, int, 0);
283 static int secdma_found = 0;
284 #endif /* BCM_SECURE_DMA */
285 
286 static int16 linuxbcmerrormap[] =
287 {	0,				/* 0 */
288 	-EINVAL,		/* BCME_ERROR */
289 	-EINVAL,		/* BCME_BADARG */
290 	-EINVAL,		/* BCME_BADOPTION */
291 	-EINVAL,		/* BCME_NOTUP */
292 	-EINVAL,		/* BCME_NOTDOWN */
293 	-EINVAL,		/* BCME_NOTAP */
294 	-EINVAL,		/* BCME_NOTSTA */
295 	-EINVAL,		/* BCME_BADKEYIDX */
296 	-EINVAL,		/* BCME_RADIOOFF */
297 	-EINVAL,		/* BCME_NOTBANDLOCKED */
298 	-EINVAL, 		/* BCME_NOCLK */
299 	-EINVAL, 		/* BCME_BADRATESET */
300 	-EINVAL, 		/* BCME_BADBAND */
301 	-E2BIG,			/* BCME_BUFTOOSHORT */
302 	-E2BIG,			/* BCME_BUFTOOLONG */
303 	-EBUSY, 		/* BCME_BUSY */
304 	-EINVAL, 		/* BCME_NOTASSOCIATED */
305 	-EINVAL, 		/* BCME_BADSSIDLEN */
306 	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
307 	-EINVAL, 		/* BCME_BADCHAN */
308 	-EFAULT, 		/* BCME_BADADDR */
309 	-ENOMEM, 		/* BCME_NORESOURCE */
310 	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
311 	-EMSGSIZE,		/* BCME_BADLENGTH */
312 	-EINVAL,		/* BCME_NOTREADY */
313 	-EPERM,			/* BCME_EPERM */
314 	-ENOMEM, 		/* BCME_NOMEM */
315 	-EINVAL, 		/* BCME_ASSOCIATED */
316 	-ERANGE, 		/* BCME_RANGE */
317 	-EINVAL, 		/* BCME_NOTFOUND */
318 	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
319 	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
320 	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
321 	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
322 	-EIO,			/* BCME_SDIO_ERROR */
323 	-ENODEV,		/* BCME_DONGLE_DOWN */
324 	-EINVAL,		/* BCME_VERSION */
325 	-EIO,			/* BCME_TXFAIL */
326 	-EIO,			/* BCME_RXFAIL */
327 	-ENODEV,		/* BCME_NODEVICE */
328 	-EINVAL,		/* BCME_NMODE_DISABLED */
329 	-ENODATA,		/* BCME_NONRESIDENT */
330 	-EINVAL,		/* BCME_SCANREJECT */
331 	-EINVAL,		/* BCME_USAGE_ERROR */
332 	-EIO,     		/* BCME_IOCTL_ERROR */
333 	-EIO,			/* BCME_SERIAL_PORT_ERR */
334 	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
335 	-EIO,			/* BCME_DECERR */
336 	-EIO,			/* BCME_ENCERR */
337 	-EIO,			/* BCME_MICERR */
338 	-ERANGE,		/* BCME_REPLAY */
339 	-EINVAL,		/* BCME_IE_NOTFOUND */
340 	-EINVAL,		/* BCME_DATA_NOTFOUND */
341 	-EINVAL,        /* BCME_NOT_GC */
342 	-EINVAL,        /* BCME_PRS_REQ_FAILED */
343 	-EINVAL,        /* BCME_NO_P2P_SE */
344 	-EINVAL,        /* BCME_NOA_PND */
345 	-EINVAL,        /* BCME_FRAG_Q_FAILED */
346 	-EINVAL,        /* BCME_GET_AF_FAILED */
347 	-EINVAL,		/* BCME_MSCH_NOTREADY */
348 
349 /* When an new error code is added to bcmutils.h, add os
350  * specific error translation here as well
351  */
352 /* check if BCME_LAST changed since the last time this function was updated */
353 #if BCME_LAST != -60
354 #error "You need to add a OS error translation in the linuxbcmerrormap \
355 	for new error code defined in bcmutils.h"
356 #endif
357 };
358 uint lmtest = FALSE;
359 
360 /* translate bcmerrors into linux errors */
361 int
osl_error(int bcmerror)362 osl_error(int bcmerror)
363 {
364 	if (bcmerror > 0)
365 		bcmerror = 0;
366 	else if (bcmerror < BCME_LAST)
367 		bcmerror = BCME_ERROR;
368 
369 	/* Array bounds covered by ASSERT in osl_attach */
370 	return linuxbcmerrormap[-bcmerror];
371 }
372 
373 osl_t *
374 #ifdef SHARED_OSL_CMN
osl_attach(void * pdev,uint bustype,bool pkttag,void ** osl_cmn)375 osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
376 #else
377 osl_attach(void *pdev, uint bustype, bool pkttag)
378 #endif /* SHARED_OSL_CMN */
379 {
380 #ifndef SHARED_OSL_CMN
381 	void **osl_cmn = NULL;
382 #endif /* SHARED_OSL_CMN */
383 	osl_t *osh;
384 	gfp_t flags;
385 #ifdef BCM_SECURE_DMA
386 	u32 secdma_memsize;
387 #endif
388 
389 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
390 	if (!(osh = kmalloc(sizeof(osl_t), flags)))
391 		return osh;
392 
393 	ASSERT(osh);
394 
395 	bzero(osh, sizeof(osl_t));
396 
397 	if (osl_cmn == NULL || *osl_cmn == NULL) {
398 		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
399 			kfree(osh);
400 			return NULL;
401 		}
402 		bzero(osh->cmn, sizeof(osl_cmn_t));
403 		if (osl_cmn)
404 			*osl_cmn = osh->cmn;
405 		atomic_set(&osh->cmn->malloced, 0);
406 		osh->cmn->dbgmem_list = NULL;
407 		spin_lock_init(&(osh->cmn->dbgmem_lock));
408 
409 		spin_lock_init(&(osh->cmn->pktalloc_lock));
410 
411 	} else {
412 		osh->cmn = *osl_cmn;
413 	}
414 	atomic_add(1, &osh->cmn->refcount);
415 
416 	bcm_object_trace_init();
417 
418 	/* Check that error map has the right number of entries in it */
419 	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
420 
421 	osh->failed = 0;
422 	osh->pdev = pdev;
423 	osh->pub.pkttag = pkttag;
424 	osh->bustype = bustype;
425 	osh->magic = OS_HANDLE_MAGIC;
426 #ifdef BCM_SECURE_DMA
427 
428 	if ((secdma_addr != 0) && (secdma_size != 0)) {
429 		printk("linux_osl.c: Buffer info passed via module params, using it.\n");
430 		if (secdma_found == 0) {
431 			osh->contig_base_alloc = (phys_addr_t)secdma_addr;
432 			secdma_memsize = secdma_size;
433 		} else if (secdma_found == 1) {
434 			osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
435 			secdma_memsize = secdma_size2;
436 		} else {
437 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
438 			kfree(osh);
439 			return NULL;
440 		}
441 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
442 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
443 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
444 			(unsigned int)osh->contig_base_alloc);
445 		osh->stb_ext_params = SECDMA_MODULE_PARAMS;
446 	}
447 	else if (stbpriv_init(osh) == 0) {
448 		printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
449 		if (secdma_found == 0) {
450 			osh->contig_base_alloc =
451 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
452 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
453 		} else if (secdma_found == 1) {
454 			osh->contig_base_alloc =
455 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
456 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
457 		} else {
458 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
459 			kfree(osh);
460 			return NULL;
461 		}
462 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
463 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
464 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
465 			(unsigned int)osh->contig_base_alloc);
466 		osh->stb_ext_params = SECDMA_EXT_FILE;
467 	}
468 	else {
469 		printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
470 		kfree(osh);
471 		return NULL;
472 	}
473 	secdma_found++;
474 	osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
475 		phys_to_page((u32)osh->contig_base_alloc),
476 		CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
477 
478 	if (osh->contig_base_alloc_coherent_va == NULL) {
479 		if (osh->cmn)
480 			kfree(osh->cmn);
481 	    kfree(osh);
482 	    return NULL;
483 	}
484 	osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
485 	osh->contig_base_alloc_coherent = osh->contig_base_alloc;
486 	osl_sec_dma_init_consistent(osh);
487 
488 	osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
489 
490 	osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
491 		phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
492 	if (osh->contig_base_alloc_va == NULL) {
493 		osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
494 		if (osh->cmn)
495 			kfree(osh->cmn);
496 		kfree(osh);
497 		return NULL;
498 	}
499 	osh->contig_base_va = osh->contig_base_alloc_va;
500 
501 	if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
502 		CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
503 	    osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
504 	    osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
505 		if (osh->cmn)
506 			kfree(osh->cmn);
507 		kfree(osh);
508 		return NULL;
509 	}
510 	osh->sec_list_base_4096 = osh->sec_list_4096;
511 
512 #endif /* BCM_SECURE_DMA */
513 
514 	switch (bustype) {
515 		case PCI_BUS:
516 		case SI_BUS:
517 		case PCMCIA_BUS:
518 			osh->pub.mmbus = TRUE;
519 			break;
520 		case JTAG_BUS:
521 		case SDIO_BUS:
522 		case USB_BUS:
523 		case SPI_BUS:
524 		case RPC_BUS:
525 			osh->pub.mmbus = FALSE;
526 			break;
527 		default:
528 			ASSERT(FALSE);
529 			break;
530 	}
531 
532 #ifdef BCMDBG_CTRACE
533 	spin_lock_init(&osh->ctrace_lock);
534 	INIT_LIST_HEAD(&osh->ctrace_list);
535 	osh->ctrace_num = 0;
536 #endif /* BCMDBG_CTRACE */
537 
538 
539 	return osh;
540 }
541 
osl_static_mem_init(osl_t * osh,void * adapter)542 int osl_static_mem_init(osl_t *osh, void *adapter)
543 {
544 #ifdef CONFIG_DHD_USE_STATIC_BUF
545 		if (!bcm_static_buf && adapter) {
546 			if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
547 				DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
548 				printk("can not alloc static buf!\n");
549 				bcm_static_skb = NULL;
550 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
551 				return -ENOMEM;
552 			} else {
553 				printk("alloc static buf at %p!\n", bcm_static_buf);
554 			}
555 
556 			spin_lock_init(&bcm_static_buf->static_lock);
557 
558 			bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
559 		}
560 
561 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
562 		if (!bcm_static_skb && adapter) {
563 			int i;
564 			void *skb_buff_ptr = 0;
565 			bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
566 			skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
567 			if (!skb_buff_ptr) {
568 				printk("cannot alloc static buf!\n");
569 				bcm_static_buf = NULL;
570 				bcm_static_skb = NULL;
571 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
572 				return -ENOMEM;
573 			}
574 
575 			bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
576 				(STATIC_PKT_MAX_NUM));
577 			for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
578 				bcm_static_skb->pkt_use[i] = 0;
579 			}
580 
581 #ifdef DHD_USE_STATIC_CTRLBUF
582 			spin_lock_init(&bcm_static_skb->osl_pkt_lock);
583 			bcm_static_skb->last_allocated_index = 0;
584 #else
585 			sema_init(&bcm_static_skb->osl_pkt_sem, 1);
586 #endif /* DHD_USE_STATIC_CTRLBUF */
587 		}
588 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
589 #endif /* CONFIG_DHD_USE_STATIC_BUF */
590 
591 	return 0;
592 }
593 
osl_set_bus_handle(osl_t * osh,void * bus_handle)594 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
595 {
596 	osh->bus_handle = bus_handle;
597 }
598 
osl_get_bus_handle(osl_t * osh)599 void* osl_get_bus_handle(osl_t *osh)
600 {
601 	return osh->bus_handle;
602 }
603 
604 void
osl_detach(osl_t * osh)605 osl_detach(osl_t *osh)
606 {
607 	if (osh == NULL)
608 		return;
609 
610 #ifdef BCM_SECURE_DMA
611 	if (osh->stb_ext_params == SECDMA_EXT_FILE)
612 		stbpriv_exit(osh);
613 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
614 	osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
615 	osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
616 	secdma_found--;
617 #endif /* BCM_SECURE_DMA */
618 
619 
620 	bcm_object_trace_deinit();
621 
622 	ASSERT(osh->magic == OS_HANDLE_MAGIC);
623 	atomic_sub(1, &osh->cmn->refcount);
624 	if (atomic_read(&osh->cmn->refcount) == 0) {
625 			kfree(osh->cmn);
626 	}
627 	kfree(osh);
628 }
629 
osl_static_mem_deinit(osl_t * osh,void * adapter)630 int osl_static_mem_deinit(osl_t *osh, void *adapter)
631 {
632 #ifdef CONFIG_DHD_USE_STATIC_BUF
633 	if (bcm_static_buf) {
634 		bcm_static_buf = 0;
635 	}
636 #ifdef BCMSDIO
637 	if (bcm_static_skb) {
638 		bcm_static_skb = 0;
639 	}
640 #endif /* BCMSDIO */
641 #endif /* CONFIG_DHD_USE_STATIC_BUF */
642 	return 0;
643 }
644 
645 /* APIs to set/get specific quirks in OSL layer */
646 void BCMFASTPATH
osl_flag_set(osl_t * osh,uint32 mask)647 osl_flag_set(osl_t *osh, uint32 mask)
648 {
649 	osh->flags |= mask;
650 }
651 
652 void
osl_flag_clr(osl_t * osh,uint32 mask)653 osl_flag_clr(osl_t *osh, uint32 mask)
654 {
655 	osh->flags &= ~mask;
656 }
657 
658 #if defined(STB)
659 inline bool BCMFASTPATH
660 #else
661 bool
662 #endif
osl_is_flag_set(osl_t * osh,uint32 mask)663 osl_is_flag_set(osl_t *osh, uint32 mask)
664 {
665 	return (osh->flags & mask);
666 }
667 
668 
669 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
670 
671 inline int BCMFASTPATH
osl_arch_is_coherent(void)672 osl_arch_is_coherent(void)
673 {
674 	return 0;
675 }
676 
677 inline int BCMFASTPATH
osl_acp_war_enab(void)678 osl_acp_war_enab(void)
679 {
680 	return 0;
681 }
682 
683 inline void BCMFASTPATH
osl_cache_flush(void * va,uint size)684 osl_cache_flush(void *va, uint size)
685 {
686 
687 	if (size > 0)
688 	dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TO_DEVICE);
689 }
690 
691 inline void BCMFASTPATH
osl_cache_inv(void * va,uint size)692 osl_cache_inv(void *va, uint size)
693 {
694 
695 	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
696 }
697 
698 inline void BCMFASTPATH
osl_prefetch(const void * ptr)699 osl_prefetch(const void *ptr)
700 {
701 	__asm__ __volatile__("pld\t%0" :: "o"(*(char *)ptr) : "cc");
702 }
703 
704 #endif
705 
706 /*
707  * To avoid ACP latency, a fwder buf will be sent directly to DDR using
708  * DDR aliasing into non-ACP address space. Such Fwder buffers must be
709  * explicitly managed from a coherency perspective.
710  */
711 static inline void BCMFASTPATH
osl_fwderbuf_reset(osl_t * osh,struct sk_buff * skb)712 osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
713 {
714 }
715 
osl_alloc_skb(osl_t * osh,unsigned int len)716 static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
717 {
718 	struct sk_buff *skb;
719 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
720 	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
721 #ifdef DHD_USE_ATOMIC_PKTGET
722 	flags = GFP_ATOMIC;
723 #endif /* DHD_USE_ATOMIC_PKTGET */
724 	skb = __dev_alloc_skb(len, flags);
725 #else
726 	skb = dev_alloc_skb(len);
727 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
728 	return skb;
729 }
730 
731 #ifdef CTFPOOL
732 
733 #ifdef CTFPOOL_SPINLOCK
734 #define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
735 #define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
736 #else
737 #define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
738 #define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
739 #endif /* CTFPOOL_SPINLOCK */
740 /*
741  * Allocate and add an object to packet pool.
742  */
743 void *
osl_ctfpool_add(osl_t * osh)744 osl_ctfpool_add(osl_t *osh)
745 {
746 	struct sk_buff *skb;
747 #ifdef CTFPOOL_SPINLOCK
748 	unsigned long flags;
749 #endif /* CTFPOOL_SPINLOCK */
750 
751 	if ((osh == NULL) || (osh->ctfpool == NULL))
752 		return NULL;
753 
754 	CTFPOOL_LOCK(osh->ctfpool, flags);
755 	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
756 
757 	/* No need to allocate more objects */
758 	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
759 		CTFPOOL_UNLOCK(osh->ctfpool, flags);
760 		return NULL;
761 	}
762 
763 	/* Allocate a new skb and add it to the ctfpool */
764 	skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
765 	if (skb == NULL) {
766 		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
767 		       osh->ctfpool->obj_size);
768 		CTFPOOL_UNLOCK(osh->ctfpool, flags);
769 		return NULL;
770 	}
771 
772 	/* Add to ctfpool */
773 	skb->next = (struct sk_buff *)osh->ctfpool->head;
774 	osh->ctfpool->head = skb;
775 	osh->ctfpool->fast_frees++;
776 	osh->ctfpool->curr_obj++;
777 
778 	/* Hijack a skb member to store ptr to ctfpool */
779 	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
780 
781 	/* Use bit flag to indicate skb from fast ctfpool */
782 	PKTFAST(osh, skb) = FASTBUF;
783 
784 	/* If ctfpool's osh is a fwder osh, reset the fwder buf */
785 	osl_fwderbuf_reset(osh->ctfpool->osh, skb);
786 
787 	CTFPOOL_UNLOCK(osh->ctfpool, flags);
788 
789 	return skb;
790 }
791 
792 /*
793  * Add new objects to the pool.
794  */
795 void
osl_ctfpool_replenish(osl_t * osh,uint thresh)796 osl_ctfpool_replenish(osl_t *osh, uint thresh)
797 {
798 	if ((osh == NULL) || (osh->ctfpool == NULL))
799 		return;
800 
801 	/* Do nothing if no refills are required */
802 	while ((osh->ctfpool->refills > 0) && (thresh--)) {
803 		osl_ctfpool_add(osh);
804 		osh->ctfpool->refills--;
805 	}
806 }
807 
808 /*
809  * Initialize the packet pool with specified number of objects.
810  */
811 int32
osl_ctfpool_init(osl_t * osh,uint numobj,uint size)812 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
813 {
814 	gfp_t flags;
815 
816 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
817 	osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
818 	ASSERT(osh->ctfpool);
819 
820 	osh->ctfpool->osh = osh;
821 
822 	osh->ctfpool->max_obj = numobj;
823 	osh->ctfpool->obj_size = size;
824 
825 	spin_lock_init(&osh->ctfpool->lock);
826 
827 	while (numobj--) {
828 		if (!osl_ctfpool_add(osh))
829 			return -1;
830 		osh->ctfpool->fast_frees--;
831 	}
832 
833 	return 0;
834 }
835 
836 /*
837  * Cleanup the packet pool objects.
838  */
839 void
osl_ctfpool_cleanup(osl_t * osh)840 osl_ctfpool_cleanup(osl_t *osh)
841 {
842 	struct sk_buff *skb, *nskb;
843 #ifdef CTFPOOL_SPINLOCK
844 	unsigned long flags;
845 #endif /* CTFPOOL_SPINLOCK */
846 
847 	if ((osh == NULL) || (osh->ctfpool == NULL))
848 		return;
849 
850 	CTFPOOL_LOCK(osh->ctfpool, flags);
851 
852 	skb = osh->ctfpool->head;
853 
854 	while (skb != NULL) {
855 		nskb = skb->next;
856 		dev_kfree_skb(skb);
857 		skb = nskb;
858 		osh->ctfpool->curr_obj--;
859 	}
860 
861 	ASSERT(osh->ctfpool->curr_obj == 0);
862 	osh->ctfpool->head = NULL;
863 	CTFPOOL_UNLOCK(osh->ctfpool, flags);
864 
865 	kfree(osh->ctfpool);
866 	osh->ctfpool = NULL;
867 }
868 
869 void
osl_ctfpool_stats(osl_t * osh,void * b)870 osl_ctfpool_stats(osl_t *osh, void *b)
871 {
872 	struct bcmstrbuf *bb;
873 
874 	if ((osh == NULL) || (osh->ctfpool == NULL))
875 		return;
876 
877 #ifdef CONFIG_DHD_USE_STATIC_BUF
878 	if (bcm_static_buf) {
879 		bcm_static_buf = 0;
880 	}
881 #ifdef BCMSDIO
882 	if (bcm_static_skb) {
883 		bcm_static_skb = 0;
884 	}
885 #endif /* BCMSDIO */
886 #endif /* CONFIG_DHD_USE_STATIC_BUF */
887 
888 	bb = b;
889 
890 	ASSERT((osh != NULL) && (bb != NULL));
891 
892 	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
893 	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
894 	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
895 	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
896 	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
897 	            osh->ctfpool->slow_allocs);
898 }
899 
900 static inline struct sk_buff *
osl_pktfastget(osl_t * osh,uint len)901 osl_pktfastget(osl_t *osh, uint len)
902 {
903 	struct sk_buff *skb;
904 #ifdef CTFPOOL_SPINLOCK
905 	unsigned long flags;
906 #endif /* CTFPOOL_SPINLOCK */
907 
908 	/* Try to do fast allocate. Return null if ctfpool is not in use
909 	 * or if there are no items in the ctfpool.
910 	 */
911 	if (osh->ctfpool == NULL)
912 		return NULL;
913 
914 	CTFPOOL_LOCK(osh->ctfpool, flags);
915 	if (osh->ctfpool->head == NULL) {
916 		ASSERT(osh->ctfpool->curr_obj == 0);
917 		osh->ctfpool->slow_allocs++;
918 		CTFPOOL_UNLOCK(osh->ctfpool, flags);
919 		return NULL;
920 	}
921 
922 	if (len > osh->ctfpool->obj_size) {
923 		CTFPOOL_UNLOCK(osh->ctfpool, flags);
924 		return NULL;
925 	}
926 
927 	ASSERT(len <= osh->ctfpool->obj_size);
928 
929 	/* Get an object from ctfpool */
930 	skb = (struct sk_buff *)osh->ctfpool->head;
931 	osh->ctfpool->head = (void *)skb->next;
932 
933 	osh->ctfpool->fast_allocs++;
934 	osh->ctfpool->curr_obj--;
935 	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
936 	CTFPOOL_UNLOCK(osh->ctfpool, flags);
937 
938 	/* Init skb struct */
939 	skb->next = skb->prev = NULL;
940 #if defined(__ARM_ARCH_7A__)
941 	skb->data = skb->head + NET_SKB_PAD;
942 	skb->tail = skb->head + NET_SKB_PAD;
943 #else
944 	skb->data = skb->head + 16;
945 	skb->tail = skb->head + 16;
946 #endif /* __ARM_ARCH_7A__ */
947 	skb->len = 0;
948 	skb->cloned = 0;
949 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
950 	skb->list = NULL;
951 #endif
952 	atomic_set(&skb->users, 1);
953 
954 	PKTSETCLINK(skb, NULL);
955 	PKTCCLRATTR(skb);
956 	PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
957 
958 	return skb;
959 }
960 #endif /* CTFPOOL */
961 
962 #if defined(BCM_GMAC3)
963 /* Account for a packet delivered to downstream forwarder.
964  * Decrement a GMAC forwarder interface's pktalloced count.
965  */
966 void BCMFASTPATH
osl_pkt_tofwder(osl_t * osh,void * skbs,int skb_cnt)967 osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
968 {
969 
970 	atomic_sub(skb_cnt, &osh->cmn->pktalloced);
971 }
972 
973 /* Account for a downstream forwarder delivered packet to a WL/DHD driver.
974  * Increment a GMAC forwarder interface's pktalloced count.
975  */
976 void BCMFASTPATH
977 #ifdef BCMDBG_CTRACE
osl_pkt_frmfwder(osl_t * osh,void * skbs,int skb_cnt,int line,char * file)978 osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
979 #else
980 osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
981 #endif /* BCMDBG_CTRACE */
982 {
983 #if defined(BCMDBG_CTRACE)
984 	int i;
985 	struct sk_buff *skb;
986 #endif
987 
988 #if defined(BCMDBG_CTRACE)
989 	if (skb_cnt > 1) {
990 		struct sk_buff **skb_array = (struct sk_buff **)skbs;
991 		for (i = 0; i < skb_cnt; i++) {
992 			skb = skb_array[i];
993 #if defined(BCMDBG_CTRACE)
994 			ASSERT(!PKTISCHAINED(skb));
995 			ADD_CTRACE(osh, skb, file, line);
996 #endif /* BCMDBG_CTRACE */
997 		}
998 	} else {
999 		skb = (struct sk_buff *)skbs;
1000 #if defined(BCMDBG_CTRACE)
1001 		ASSERT(!PKTISCHAINED(skb));
1002 		ADD_CTRACE(osh, skb, file, line);
1003 #endif /* BCMDBG_CTRACE */
1004 	}
1005 #endif
1006 
1007 	atomic_add(skb_cnt, &osh->cmn->pktalloced);
1008 }
1009 
1010 #endif /* BCM_GMAC3 */
1011 
1012 /* Convert a driver packet to native(OS) packet
1013  * In the process, packettag is zeroed out before sending up
1014  * IP code depends on skb->cb to be setup correctly with various options
1015  * In our case, that means it should be 0
1016  */
1017 struct sk_buff * BCMFASTPATH
osl_pkt_tonative(osl_t * osh,void * pkt)1018 osl_pkt_tonative(osl_t *osh, void *pkt)
1019 {
1020 	struct sk_buff *nskb;
1021 #ifdef BCMDBG_CTRACE
1022 	struct sk_buff *nskb1, *nskb2;
1023 #endif
1024 
1025 	if (osh->pub.pkttag)
1026 		OSL_PKTTAG_CLEAR(pkt);
1027 
1028 	/* Decrement the packet counter */
1029 	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
1030 		atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
1031 
1032 #ifdef BCMDBG_CTRACE
1033 		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
1034 			if (PKTISCHAINED(nskb1)) {
1035 				nskb2 = PKTCLINK(nskb1);
1036 			} else {
1037 				nskb2 = NULL;
1038 			}
1039 
1040 			DEL_CTRACE(osh, nskb1);
1041 		}
1042 #endif /* BCMDBG_CTRACE */
1043 	}
1044 	return (struct sk_buff *)pkt;
1045 }
1046 
1047 /* Convert a native(OS) packet to driver packet.
1048  * In the process, native packet is destroyed, there is no copying
1049  * Also, a packettag is zeroed out
1050  */
1051 void * BCMFASTPATH
1052 #ifdef BCMDBG_CTRACE
osl_pkt_frmnative(osl_t * osh,void * pkt,int line,char * file)1053 osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
1054 #else
1055 osl_pkt_frmnative(osl_t *osh, void *pkt)
1056 #endif /* BCMDBG_CTRACE */
1057 {
1058 	struct sk_buff *cskb;
1059 	struct sk_buff *nskb;
1060 	unsigned long pktalloced = 0;
1061 
1062 	if (osh->pub.pkttag)
1063 		OSL_PKTTAG_CLEAR(pkt);
1064 
1065 	/* walk the PKTCLINK() list */
1066 	for (cskb = (struct sk_buff *)pkt;
1067 	     cskb != NULL;
1068 	     cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
1069 
1070 		/* walk the pkt buffer list */
1071 		for (nskb = cskb; nskb; nskb = nskb->next) {
1072 
1073 			/* Increment the packet counter */
1074 			pktalloced++;
1075 
1076 			/* clean the 'prev' pointer
1077 			 * Kernel 3.18 is leaving skb->prev pointer set to skb
1078 			 * to indicate a non-fragmented skb
1079 			 */
1080 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
1081 			nskb->prev = NULL;
1082 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
1083 
1084 
1085 #ifdef BCMDBG_CTRACE
1086 			ADD_CTRACE(osh, nskb, file, line);
1087 #endif /* BCMDBG_CTRACE */
1088 		}
1089 	}
1090 
1091 	/* Increment the packet counter */
1092 	atomic_add(pktalloced, &osh->cmn->pktalloced);
1093 
1094 	return (void *)pkt;
1095 }
1096 
1097 /* Return a new packet. zero out pkttag */
1098 void * BCMFASTPATH
1099 #ifdef BCMDBG_CTRACE
osl_pktget(osl_t * osh,uint len,int line,char * file)1100 osl_pktget(osl_t *osh, uint len, int line, char *file)
1101 #else
1102 #ifdef BCM_OBJECT_TRACE
1103 osl_pktget(osl_t *osh, uint len, int line, const char *caller)
1104 #else
1105 osl_pktget(osl_t *osh, uint len)
1106 #endif /* BCM_OBJECT_TRACE */
1107 #endif /* BCMDBG_CTRACE */
1108 {
1109 	struct sk_buff *skb;
1110 	uchar num = 0;
1111 	if (lmtest != FALSE) {
1112 		get_random_bytes(&num, sizeof(uchar));
1113 		if ((num + 1) <= (256 * lmtest / 100))
1114 			return NULL;
1115 	}
1116 
1117 #ifdef CTFPOOL
1118 	/* Allocate from local pool */
1119 	skb = osl_pktfastget(osh, len);
1120 	if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL))
1121 #else /* CTFPOOL */
1122 	if ((skb = osl_alloc_skb(osh, len)))
1123 #endif /* CTFPOOL */
1124 	{
1125 		skb->tail += len;
1126 		skb->len  += len;
1127 		skb->priority = 0;
1128 
1129 #ifdef BCMDBG_CTRACE
1130 		ADD_CTRACE(osh, skb, file, line);
1131 #endif
1132 		atomic_inc(&osh->cmn->pktalloced);
1133 #ifdef BCM_OBJECT_TRACE
1134 		bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
1135 #endif /* BCM_OBJECT_TRACE */
1136 	}
1137 
1138 	return ((void*) skb);
1139 }
1140 
1141 #ifdef CTFPOOL
1142 static inline void
osl_pktfastfree(osl_t * osh,struct sk_buff * skb)1143 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
1144 {
1145 	ctfpool_t *ctfpool;
1146 #ifdef CTFPOOL_SPINLOCK
1147 	unsigned long flags;
1148 #endif /* CTFPOOL_SPINLOCK */
1149 
1150 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1151 	skb->tstamp.tv.sec = 0;
1152 #else
1153 	skb->stamp.tv_sec = 0;
1154 #endif
1155 
1156 	/* We only need to init the fields that we change */
1157 	skb->dev = NULL;
1158 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
1159 	skb->dst = NULL;
1160 #endif
1161 	OSL_PKTTAG_CLEAR(skb);
1162 	skb->ip_summed = 0;
1163 
1164 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1165 	skb_orphan(skb);
1166 #else
1167 	skb->destructor = NULL;
1168 #endif
1169 
1170 	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1171 	ASSERT(ctfpool != NULL);
1172 
1173 	/* if osh is a fwder osh, reset the fwder buf */
1174 	osl_fwderbuf_reset(ctfpool->osh, skb);
1175 
1176 	/* Add object to the ctfpool */
1177 	CTFPOOL_LOCK(ctfpool, flags);
1178 	skb->next = (struct sk_buff *)ctfpool->head;
1179 	ctfpool->head = (void *)skb;
1180 
1181 	ctfpool->fast_frees++;
1182 	ctfpool->curr_obj++;
1183 
1184 	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
1185 	CTFPOOL_UNLOCK(ctfpool, flags);
1186 }
1187 #endif /* CTFPOOL */
1188 
1189 /* Free the driver packet. Free the tag if present */
1190 void BCMFASTPATH
1191 #ifdef BCM_OBJECT_TRACE
osl_pktfree(osl_t * osh,void * p,bool send,int line,const char * caller)1192 osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
1193 #else
1194 osl_pktfree(osl_t *osh, void *p, bool send)
1195 #endif /* BCM_OBJECT_TRACE */
1196 {
1197 	struct sk_buff *skb, *nskb;
1198 	if (osh == NULL)
1199 		return;
1200 
1201 	skb = (struct sk_buff*) p;
1202 
1203 	if (send && osh->pub.tx_fn)
1204 		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
1205 
1206 	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
1207 
1208 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
1209 	if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
1210 		printk("%s: pkt %p is from static pool\n",
1211 			__FUNCTION__, p);
1212 		dump_stack();
1213 		return;
1214 	}
1215 
1216 	if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
1217 		printk("%s: pkt %p is from static pool and not in used\n",
1218 			__FUNCTION__, p);
1219 		dump_stack();
1220 		return;
1221 	}
1222 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
1223 
1224 	/* perversion: we use skb->next to chain multi-skb packets */
1225 	while (skb) {
1226 		nskb = skb->next;
1227 		skb->next = NULL;
1228 
1229 #ifdef BCMDBG_CTRACE
1230 		DEL_CTRACE(osh, skb);
1231 #endif
1232 
1233 
1234 #ifdef BCM_OBJECT_TRACE
1235 		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
1236 #endif /* BCM_OBJECT_TRACE */
1237 
1238 #ifdef CTFPOOL
1239 		if (PKTISFAST(osh, skb)) {
1240 			if (atomic_read(&skb->users) == 1)
1241 				smp_rmb();
1242 			else if (!atomic_dec_and_test(&skb->users))
1243 				goto next_skb;
1244 			osl_pktfastfree(osh, skb);
1245 		} else
1246 #endif
1247 		{
1248 			dev_kfree_skb_any(skb);
1249 		}
1250 #ifdef CTFPOOL
1251 next_skb:
1252 #endif
1253 		atomic_dec(&osh->cmn->pktalloced);
1254 		skb = nskb;
1255 	}
1256 }
1257 
1258 #ifdef CONFIG_DHD_USE_STATIC_BUF
1259 void*
osl_pktget_static(osl_t * osh,uint len)1260 osl_pktget_static(osl_t *osh, uint len)
1261 {
1262 	int i = 0;
1263 	struct sk_buff *skb;
1264 #ifdef DHD_USE_STATIC_CTRLBUF
1265 	unsigned long flags;
1266 #endif /* DHD_USE_STATIC_CTRLBUF */
1267 
1268 	if (!bcm_static_skb)
1269 		return osl_pktget(osh, len);
1270 
1271 	if (len > DHD_SKB_MAX_BUFSIZE) {
1272 		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
1273 		return osl_pktget(osh, len);
1274 	}
1275 
1276 #ifdef DHD_USE_STATIC_CTRLBUF
1277 	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
1278 
1279 	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
1280 		uint32 index;
1281 		for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
1282 			index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
1283 			bcm_static_skb->last_allocated_index++;
1284 			if (bcm_static_skb->skb_8k[index] &&
1285 				bcm_static_skb->pkt_use[index] == 0) {
1286 				break;
1287 			}
1288 		}
1289 
1290 		if ((i != STATIC_PKT_2PAGE_NUM) &&
1291 			(index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
1292 			bcm_static_skb->pkt_use[index] = 1;
1293 			skb = bcm_static_skb->skb_8k[index];
1294 			skb->data = skb->head;
1295 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1296 			skb_set_tail_pointer(skb, NET_SKB_PAD);
1297 #else
1298 			skb->tail = skb->data + NET_SKB_PAD;
1299 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1300 			skb->data += NET_SKB_PAD;
1301 			skb->cloned = 0;
1302 			skb->priority = 0;
1303 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1304 			skb_set_tail_pointer(skb, len);
1305 #else
1306 			skb->tail = skb->data + len;
1307 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1308 			skb->len = len;
1309 			skb->mac_len = PREALLOC_USED_MAGIC;
1310 			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1311 			return skb;
1312 		}
1313 	}
1314 
1315 	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1316 	printk("%s: all static pkt in use!\n", __FUNCTION__);
1317 	return NULL;
1318 #else
1319 	down(&bcm_static_skb->osl_pkt_sem);
1320 
1321 	if (len <= DHD_SKB_1PAGE_BUFSIZE) {
1322 		for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
1323 			if (bcm_static_skb->skb_4k[i] &&
1324 				bcm_static_skb->pkt_use[i] == 0) {
1325 				break;
1326 			}
1327 		}
1328 
1329 		if (i != STATIC_PKT_1PAGE_NUM) {
1330 			bcm_static_skb->pkt_use[i] = 1;
1331 
1332 			skb = bcm_static_skb->skb_4k[i];
1333 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1334 			skb_set_tail_pointer(skb, len);
1335 #else
1336 			skb->tail = skb->data + len;
1337 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1338 			skb->len = len;
1339 
1340 			up(&bcm_static_skb->osl_pkt_sem);
1341 			return skb;
1342 		}
1343 	}
1344 
1345 	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
1346 		for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
1347 			if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
1348 				bcm_static_skb->pkt_use[i] == 0) {
1349 				break;
1350 			}
1351 		}
1352 
1353 		if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
1354 			bcm_static_skb->pkt_use[i] = 1;
1355 			skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
1356 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1357 			skb_set_tail_pointer(skb, len);
1358 #else
1359 			skb->tail = skb->data + len;
1360 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1361 			skb->len = len;
1362 
1363 			up(&bcm_static_skb->osl_pkt_sem);
1364 			return skb;
1365 		}
1366 	}
1367 
1368 #if defined(ENHANCED_STATIC_BUF)
1369 	if (bcm_static_skb->skb_16k &&
1370 		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
1371 		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
1372 
1373 		skb = bcm_static_skb->skb_16k;
1374 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1375 		skb_set_tail_pointer(skb, len);
1376 #else
1377 		skb->tail = skb->data + len;
1378 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1379 		skb->len = len;
1380 
1381 		up(&bcm_static_skb->osl_pkt_sem);
1382 		return skb;
1383 	}
1384 #endif /* ENHANCED_STATIC_BUF */
1385 
1386 	up(&bcm_static_skb->osl_pkt_sem);
1387 	printk("%s: all static pkt in use!\n", __FUNCTION__);
1388 	return osl_pktget(osh, len);
1389 #endif /* DHD_USE_STATIC_CTRLBUF */
1390 }
1391 
1392 void
osl_pktfree_static(osl_t * osh,void * p,bool send)1393 osl_pktfree_static(osl_t *osh, void *p, bool send)
1394 {
1395 	int i;
1396 #ifdef DHD_USE_STATIC_CTRLBUF
1397 	struct sk_buff *skb = (struct sk_buff *)p;
1398 	unsigned long flags;
1399 #endif /* DHD_USE_STATIC_CTRLBUF */
1400 
1401 	if (!p) {
1402 		return;
1403 	}
1404 
1405 	if (!bcm_static_skb) {
1406 		osl_pktfree(osh, p, send);
1407 		return;
1408 	}
1409 
1410 #ifdef DHD_USE_STATIC_CTRLBUF
1411 	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
1412 
1413 	for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
1414 		if (p == bcm_static_skb->skb_8k[i]) {
1415 			if (bcm_static_skb->pkt_use[i] == 0) {
1416 				printk("%s: static pkt idx %d(%p) is double free\n",
1417 					__FUNCTION__, i, p);
1418 			} else {
1419 				bcm_static_skb->pkt_use[i] = 0;
1420 			}
1421 
1422 			if (skb->mac_len != PREALLOC_USED_MAGIC) {
1423 				printk("%s: static pkt idx %d(%p) is not in used\n",
1424 					__FUNCTION__, i, p);
1425 			}
1426 
1427 			skb->mac_len = PREALLOC_FREE_MAGIC;
1428 			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1429 			return;
1430 		}
1431 	}
1432 
1433 	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
1434 	printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
1435 #else
1436 	down(&bcm_static_skb->osl_pkt_sem);
1437 	for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
1438 		if (p == bcm_static_skb->skb_4k[i]) {
1439 			bcm_static_skb->pkt_use[i] = 0;
1440 			up(&bcm_static_skb->osl_pkt_sem);
1441 			return;
1442 		}
1443 	}
1444 
1445 	for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
1446 		if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
1447 			bcm_static_skb->pkt_use[i] = 0;
1448 			up(&bcm_static_skb->osl_pkt_sem);
1449 			return;
1450 		}
1451 	}
1452 #ifdef ENHANCED_STATIC_BUF
1453 	if (p == bcm_static_skb->skb_16k) {
1454 		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
1455 		up(&bcm_static_skb->osl_pkt_sem);
1456 		return;
1457 	}
1458 #endif
1459 	up(&bcm_static_skb->osl_pkt_sem);
1460 #endif /* DHD_USE_STATIC_CTRLBUF */
1461 	osl_pktfree(osh, p, send);
1462 }
1463 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1464 
1465 uint32
osl_pci_read_config(osl_t * osh,uint offset,uint size)1466 osl_pci_read_config(osl_t *osh, uint offset, uint size)
1467 {
1468 	uint val = 0;
1469 	uint retry = PCI_CFG_RETRY;
1470 
1471 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1472 
1473 	/* only 4byte access supported */
1474 	ASSERT(size == 4);
1475 
1476 	do {
1477 		pci_read_config_dword(osh->pdev, offset, &val);
1478 		if (val != 0xffffffff)
1479 			break;
1480 	} while (retry--);
1481 
1482 
1483 	return (val);
1484 }
1485 
1486 void
osl_pci_write_config(osl_t * osh,uint offset,uint size,uint val)1487 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
1488 {
1489 	uint retry = PCI_CFG_RETRY;
1490 
1491 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1492 
1493 	/* only 4byte access supported */
1494 	ASSERT(size == 4);
1495 
1496 	do {
1497 		pci_write_config_dword(osh->pdev, offset, val);
1498 		if (offset != PCI_BAR0_WIN)
1499 			break;
1500 		if (osl_pci_read_config(osh, offset, size) == val)
1501 			break;
1502 	} while (retry--);
1503 
1504 }
1505 
1506 /* return bus # for the pci device pointed by osh->pdev */
1507 uint
osl_pci_bus(osl_t * osh)1508 osl_pci_bus(osl_t *osh)
1509 {
1510 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1511 
1512 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1513 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
1514 #else
1515 	return ((struct pci_dev *)osh->pdev)->bus->number;
1516 #endif
1517 }
1518 
1519 /* return slot # for the pci device pointed by osh->pdev */
1520 uint
osl_pci_slot(osl_t * osh)1521 osl_pci_slot(osl_t *osh)
1522 {
1523 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1524 
1525 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1526 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
1527 #else
1528 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
1529 #endif
1530 }
1531 
1532 /* return domain # for the pci device pointed by osh->pdev */
1533 uint
osl_pcie_domain(osl_t * osh)1534 osl_pcie_domain(osl_t *osh)
1535 {
1536 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1537 
1538 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
1539 }
1540 
1541 /* return bus # for the pci device pointed by osh->pdev */
1542 uint
osl_pcie_bus(osl_t * osh)1543 osl_pcie_bus(osl_t *osh)
1544 {
1545 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1546 
1547 	return ((struct pci_dev *)osh->pdev)->bus->number;
1548 }
1549 
1550 /* return the pci device pointed by osh->pdev */
1551 struct pci_dev *
osl_pci_device(osl_t * osh)1552 osl_pci_device(osl_t *osh)
1553 {
1554 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
1555 
1556 	return osh->pdev;
1557 }
1558 
1559 static void
osl_pcmcia_attr(osl_t * osh,uint offset,char * buf,int size,bool write)1560 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
1561 {
1562 }
1563 
1564 void
osl_pcmcia_read_attr(osl_t * osh,uint offset,void * buf,int size)1565 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
1566 {
1567 	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
1568 }
1569 
1570 void
osl_pcmcia_write_attr(osl_t * osh,uint offset,void * buf,int size)1571 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
1572 {
1573 	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
1574 }
1575 
1576 void *
osl_malloc(osl_t * osh,uint size)1577 osl_malloc(osl_t *osh, uint size)
1578 {
1579 	void *addr;
1580 	gfp_t flags;
1581 
1582 	/* only ASSERT if osh is defined */
1583 	if (osh)
1584 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
1585 #ifdef CONFIG_DHD_USE_STATIC_BUF
1586 	if (bcm_static_buf)
1587 	{
1588 		unsigned long irq_flags;
1589 		int i = 0;
1590 		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
1591 		{
1592 			spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
1593 
1594 			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
1595 			{
1596 				if (bcm_static_buf->buf_use[i] == 0)
1597 					break;
1598 			}
1599 
1600 			if (i == STATIC_BUF_MAX_NUM)
1601 			{
1602 				spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
1603 				printk("all static buff in use!\n");
1604 				goto original;
1605 			}
1606 
1607 			bcm_static_buf->buf_use[i] = 1;
1608 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
1609 
1610 			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
1611 			if (osh)
1612 				atomic_add(size, &osh->cmn->malloced);
1613 
1614 			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
1615 		}
1616 	}
1617 original:
1618 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1619 
1620 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1621 	if ((addr = kmalloc(size, flags)) == NULL) {
1622 		if (osh)
1623 			osh->failed++;
1624 		return (NULL);
1625 	}
1626 	if (osh && osh->cmn)
1627 		atomic_add(size, &osh->cmn->malloced);
1628 
1629 	return (addr);
1630 }
1631 
1632 void *
osl_mallocz(osl_t * osh,uint size)1633 osl_mallocz(osl_t *osh, uint size)
1634 {
1635 	void *ptr;
1636 
1637 	ptr = osl_malloc(osh, size);
1638 
1639 	if (ptr != NULL) {
1640 		bzero(ptr, size);
1641 	}
1642 
1643 	return ptr;
1644 }
1645 
1646 void
osl_mfree(osl_t * osh,void * addr,uint size)1647 osl_mfree(osl_t *osh, void *addr, uint size)
1648 {
1649 #ifdef CONFIG_DHD_USE_STATIC_BUF
1650 	unsigned long flags;
1651 
1652 	if (bcm_static_buf)
1653 	{
1654 		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
1655 			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
1656 		{
1657 			int buf_idx = 0;
1658 
1659 			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
1660 
1661 			spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
1662 			bcm_static_buf->buf_use[buf_idx] = 0;
1663 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
1664 
1665 			if (osh && osh->cmn) {
1666 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
1667 				atomic_sub(size, &osh->cmn->malloced);
1668 			}
1669 			return;
1670 		}
1671 	}
1672 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1673 	if (osh && osh->cmn) {
1674 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
1675 
1676 		ASSERT(size <= osl_malloced(osh));
1677 
1678 		atomic_sub(size, &osh->cmn->malloced);
1679 	}
1680 	kfree(addr);
1681 }
1682 
1683 void *
osl_vmalloc(osl_t * osh,uint size)1684 osl_vmalloc(osl_t *osh, uint size)
1685 {
1686 	void *addr;
1687 
1688 	/* only ASSERT if osh is defined */
1689 	if (osh)
1690 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
1691 	if ((addr = vmalloc(size)) == NULL) {
1692 		if (osh)
1693 			osh->failed++;
1694 		return (NULL);
1695 	}
1696 	if (osh && osh->cmn)
1697 		atomic_add(size, &osh->cmn->malloced);
1698 
1699 	return (addr);
1700 }
1701 
1702 void *
osl_vmallocz(osl_t * osh,uint size)1703 osl_vmallocz(osl_t *osh, uint size)
1704 {
1705 	void *ptr;
1706 
1707 	ptr = osl_vmalloc(osh, size);
1708 
1709 	if (ptr != NULL) {
1710 		bzero(ptr, size);
1711 	}
1712 
1713 	return ptr;
1714 }
1715 
1716 void
osl_vmfree(osl_t * osh,void * addr,uint size)1717 osl_vmfree(osl_t *osh, void *addr, uint size)
1718 {
1719 	if (osh && osh->cmn) {
1720 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
1721 
1722 		ASSERT(size <= osl_malloced(osh));
1723 
1724 		atomic_sub(size, &osh->cmn->malloced);
1725 	}
1726 	vfree(addr);
1727 }
1728 
1729 uint
osl_check_memleak(osl_t * osh)1730 osl_check_memleak(osl_t *osh)
1731 {
1732 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1733 	if (atomic_read(&osh->cmn->refcount) == 1)
1734 		return (atomic_read(&osh->cmn->malloced));
1735 	else
1736 		return 0;
1737 }
1738 
1739 uint
osl_malloced(osl_t * osh)1740 osl_malloced(osl_t *osh)
1741 {
1742 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1743 	return (atomic_read(&osh->cmn->malloced));
1744 }
1745 
1746 uint
osl_malloc_failed(osl_t * osh)1747 osl_malloc_failed(osl_t *osh)
1748 {
1749 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1750 	return (osh->failed);
1751 }
1752 
1753 
1754 uint
osl_dma_consistent_align(void)1755 osl_dma_consistent_align(void)
1756 {
1757 	return (PAGE_SIZE);
1758 }
1759 
1760 void*
osl_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,uint * alloced,dmaaddr_t * pap)1761 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
1762 {
1763 	void *va;
1764 	uint16 align = (1 << align_bits);
1765 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1766 
1767 	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1768 		size += align;
1769 	*alloced = size;
1770 
1771 #ifndef	BCM_SECURE_DMA
1772 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1773 	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1774 	if (va)
1775 		*pap = (ulong)__virt_to_phys((ulong)va);
1776 #else
1777 	{
1778 		dma_addr_t pap_lin;
1779 		struct pci_dev *hwdev = osh->pdev;
1780 		gfp_t flags;
1781 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
1782 		flags = GFP_ATOMIC;
1783 #else
1784 		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1785 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
1786 		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
1787 #ifdef BCMDMA64OSL
1788 		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
1789 		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
1790 #else
1791 		*pap = (dmaaddr_t)pap_lin;
1792 #endif /* BCMDMA64OSL */
1793 	}
1794 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1795 #else
1796 	va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
1797 #endif /* BCM_SECURE_DMA */
1798 	return va;
1799 }
1800 
1801 void
osl_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)1802 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1803 {
1804 #ifdef BCMDMA64OSL
1805 	dma_addr_t paddr;
1806 #endif /* BCMDMA64OSL */
1807 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1808 
1809 #ifndef BCM_SECURE_DMA
1810 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
1811 	kfree(va);
1812 #else
1813 #ifdef BCMDMA64OSL
1814 	PHYSADDRTOULONG(pa, paddr);
1815 	pci_free_consistent(osh->pdev, size, va, paddr);
1816 #else
1817 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1818 #endif /* BCMDMA64OSL */
1819 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1820 #else
1821 	osl_sec_dma_free_consistent(osh, va, size, pa);
1822 #endif /* BCM_SECURE_DMA */
1823 }
1824 
1825 dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)1826 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1827 {
1828 	int dir;
1829 	dmaaddr_t ret_addr;
1830 	dma_addr_t map_addr;
1831 	int ret;
1832 
1833 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1834 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1835 
1836 
1837 
1838 
1839 	map_addr = pci_map_single(osh->pdev, va, size, dir);
1840 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
1841 	ret = pci_dma_mapping_error(osh->pdev, map_addr);
1842 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
1843 	ret = pci_dma_mapping_error(map_addr);
1844 #else
1845 	ret = 0;
1846 #endif
1847 	if (ret) {
1848 		printk("%s: Failed to map memory\n", __FUNCTION__);
1849 		PHYSADDRLOSET(ret_addr, 0);
1850 		PHYSADDRHISET(ret_addr, 0);
1851 	} else {
1852 		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1853 		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1854 	}
1855 
1856 	return ret_addr;
1857 }
1858 
1859 void BCMFASTPATH
osl_dma_unmap(osl_t * osh,dmaaddr_t pa,uint size,int direction)1860 osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1861 {
1862 	int dir;
1863 #ifdef BCMDMA64OSL
1864 	dma_addr_t paddr;
1865 #endif /* BCMDMA64OSL */
1866 
1867 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1868 
1869 
1870 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1871 #ifdef BCMDMA64OSL
1872 	PHYSADDRTOULONG(pa, paddr);
1873 	pci_unmap_single(osh->pdev, paddr, size, dir);
1874 #else
1875 	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1876 #endif /* BCMDMA64OSL */
1877 }
1878 
1879 /* OSL function for CPU relax */
1880 inline void BCMFASTPATH
osl_cpu_relax(void)1881 osl_cpu_relax(void)
1882 {
1883 	cpu_relax();
1884 }
1885 
osl_preempt_disable(osl_t * osh)1886 extern void osl_preempt_disable(osl_t *osh)
1887 {
1888 	preempt_disable();
1889 }
1890 
osl_preempt_enable(osl_t * osh)1891 extern void osl_preempt_enable(osl_t *osh)
1892 {
1893 	preempt_enable();
1894 }
1895 
1896 #if defined(BCMASSERT_LOG)
1897 void
osl_assert(const char * exp,const char * file,int line)1898 osl_assert(const char *exp, const char *file, int line)
1899 {
1900 	char tempbuf[256];
1901 	const char *basename;
1902 
1903 	basename = strrchr(file, '/');
1904 	/* skip the '/' */
1905 	if (basename)
1906 		basename++;
1907 
1908 	if (!basename)
1909 		basename = file;
1910 
1911 #ifdef BCMASSERT_LOG
1912 	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1913 		exp, basename, line);
1914 #endif /* BCMASSERT_LOG */
1915 
1916 
1917 	switch (g_assert_type) {
1918 	case 0:
1919 		panic("%s", tempbuf);
1920 		break;
1921 	case 1:
1922 		/* fall through */
1923 	case 3:
1924 		printk("%s", tempbuf);
1925 		break;
1926 	case 2:
1927 		printk("%s", tempbuf);
1928 		BUG();
1929 		break;
1930 	default:
1931 		break;
1932 	}
1933 }
1934 #endif
1935 
1936 void
osl_delay(uint usec)1937 osl_delay(uint usec)
1938 {
1939 	uint d;
1940 
1941 	while (usec > 0) {
1942 		d = MIN(usec, 1000);
1943 		udelay(d);
1944 		usec -= d;
1945 	}
1946 }
1947 
1948 void
osl_sleep(uint ms)1949 osl_sleep(uint ms)
1950 {
1951 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1952 	if (ms < 20)
1953 		usleep_range(ms*1000, ms*1000 + 1000);
1954 	else
1955 #endif
1956 	msleep(ms);
1957 }
1958 
1959 uint64
osl_sysuptime_us(void)1960 osl_sysuptime_us(void)
1961 {
1962 	struct osl_timespec tv;
1963 	uint64 usec;
1964 
1965 	osl_do_gettimeofday(&tv);
1966 	/* tv_usec content is fraction of a second */
1967 	usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1968 	return usec;
1969 }
1970 
1971 
1972 /* Clone a packet.
1973  * The pkttag contents are NOT cloned.
1974  */
1975 void *
1976 #ifdef BCMDBG_CTRACE
osl_pktdup(osl_t * osh,void * skb,int line,char * file)1977 osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1978 #else
1979 #ifdef BCM_OBJECT_TRACE
1980 osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
1981 #else
1982 osl_pktdup(osl_t *osh, void *skb)
1983 #endif /* BCM_OBJECT_TRACE */
1984 #endif /* BCMDBG_CTRACE */
1985 {
1986 	void * p;
1987 
1988 	ASSERT(!PKTISCHAINED(skb));
1989 
1990 	/* clear the CTFBUF flag if set and map the rest of the buffer
1991 	 * before cloning.
1992 	 */
1993 	PKTCTFMAP(osh, skb);
1994 
1995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1996 	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1997 #else
1998 	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1999 #endif
2000 		return NULL;
2001 
2002 #ifdef CTFPOOL
2003 	if (PKTISFAST(osh, skb)) {
2004 		ctfpool_t *ctfpool;
2005 
2006 		/* if the buffer allocated from ctfpool is cloned then
2007 		 * we can't be sure when it will be freed. since there
2008 		 * is a chance that we will be losing a buffer
2009 		 * from our pool, we increment the refill count for the
2010 		 * object to be alloced later.
2011 		 */
2012 		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
2013 		ASSERT(ctfpool != NULL);
2014 		PKTCLRFAST(osh, p);
2015 		PKTCLRFAST(osh, skb);
2016 		ctfpool->refills++;
2017 	}
2018 #endif /* CTFPOOL */
2019 
2020 	/* Clear PKTC  context */
2021 	PKTSETCLINK(p, NULL);
2022 	PKTCCLRFLAGS(p);
2023 	PKTCSETCNT(p, 1);
2024 	PKTCSETLEN(p, PKTLEN(osh, skb));
2025 
2026 	/* skb_clone copies skb->cb.. we don't want that */
2027 	if (osh->pub.pkttag)
2028 		OSL_PKTTAG_CLEAR(p);
2029 
2030 	/* Increment the packet counter */
2031 	atomic_inc(&osh->cmn->pktalloced);
2032 #ifdef BCM_OBJECT_TRACE
2033 	bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
2034 #endif /* BCM_OBJECT_TRACE */
2035 
2036 #ifdef BCMDBG_CTRACE
2037 	ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
2038 #endif
2039 	return (p);
2040 }
2041 
2042 #ifdef BCMDBG_CTRACE
osl_pkt_is_frmnative(osl_t * osh,struct sk_buff * pkt)2043 int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
2044 {
2045 	unsigned long flags;
2046 	struct sk_buff *skb;
2047 	int ck = FALSE;
2048 
2049 	spin_lock_irqsave(&osh->ctrace_lock, flags);
2050 
2051 	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
2052 		if (pkt == skb) {
2053 			ck = TRUE;
2054 			break;
2055 		}
2056 	}
2057 
2058 	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
2059 	return ck;
2060 }
2061 
osl_ctrace_dump(osl_t * osh,struct bcmstrbuf * b)2062 void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
2063 {
2064 	unsigned long flags;
2065 	struct sk_buff *skb;
2066 	int idx = 0;
2067 	int i, j;
2068 
2069 	spin_lock_irqsave(&osh->ctrace_lock, flags);
2070 
2071 	if (b != NULL)
2072 		bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
2073 	else
2074 		printk(" Total %d sbk not free\n", osh->ctrace_num);
2075 
2076 	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
2077 		if (b != NULL)
2078 			bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
2079 		else
2080 			printk("[%d] skb %p:\n", ++idx, skb);
2081 
2082 		for (i = 0; i < skb->ctrace_count; i++) {
2083 			j = (skb->ctrace_start + i) % CTRACE_NUM;
2084 			if (b != NULL)
2085 				bcm_bprintf(b, "    [%s(%d)]\n", skb->func[j], skb->line[j]);
2086 			else
2087 				printk("    [%s(%d)]\n", skb->func[j], skb->line[j]);
2088 		}
2089 		if (b != NULL)
2090 			bcm_bprintf(b, "\n");
2091 		else
2092 			printk("\n");
2093 	}
2094 
2095 	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
2096 
2097 	return;
2098 }
2099 #endif /* BCMDBG_CTRACE */
2100 
2101 
2102 /*
2103  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
2104  */
2105 
2106 /*
2107  * BINOSL selects the slightly slower function-call-based binary compatible osl.
2108  */
2109 
2110 uint
osl_pktalloced(osl_t * osh)2111 osl_pktalloced(osl_t *osh)
2112 {
2113 	if (atomic_read(&osh->cmn->refcount) == 1)
2114 		return (atomic_read(&osh->cmn->pktalloced));
2115 	else
2116 		return 0;
2117 }
2118 
2119 uint32
osl_rand(void)2120 osl_rand(void)
2121 {
2122 	uint32 rand;
2123 
2124 	get_random_bytes(&rand, sizeof(rand));
2125 
2126 	return rand;
2127 }
2128 
2129 /* Linux Kernel: File Operations: start */
2130 void *
osl_os_open_image(char * filename)2131 osl_os_open_image(char *filename)
2132 {
2133 	struct file *fp;
2134 
2135 	fp = filp_open(filename, O_RDONLY, 0);
2136 	/*
2137 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
2138 	 * Alternative:
2139 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
2140 	 * ???
2141 	 */
2142 	 if (IS_ERR(fp))
2143 		 fp = NULL;
2144 
2145 	 return fp;
2146 }
2147 
2148 int
osl_os_get_image_block(char * buf,int len,void * image)2149 osl_os_get_image_block(char *buf, int len, void *image)
2150 {
2151 	struct file *fp = (struct file *)image;
2152 	int rdlen;
2153 
2154 	if (!image)
2155 		return 0;
2156 
2157 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
2158 	rdlen = kernel_read(fp, buf, len, &fp->f_pos);
2159 #else
2160 	rdlen = kernel_read(fp, fp->f_pos, buf, len);
2161 	if (rdlen > 0)
2162 		fp->f_pos += rdlen;
2163 #endif
2164 
2165 	return rdlen;
2166 }
2167 
2168 void
osl_os_close_image(void * image)2169 osl_os_close_image(void *image)
2170 {
2171 	if (image)
2172 		filp_close((struct file *)image, NULL);
2173 }
2174 
2175 int
osl_os_image_size(void * image)2176 osl_os_image_size(void *image)
2177 {
2178 	int len = 0, curroffset;
2179 
2180 	if (image) {
2181 		/* store the current offset */
2182 		curroffset = generic_file_llseek(image, 0, 1);
2183 		/* goto end of file to get length */
2184 		len = generic_file_llseek(image, 0, 2);
2185 		/* restore back the offset */
2186 		generic_file_llseek(image, curroffset, 0);
2187 	}
2188 	return len;
2189 }
2190 
2191 /* Linux Kernel: File Operations: end */
2192 
2193 #if (defined(STB) && defined(__arm__))
osl_pcie_rreg(osl_t * osh,ulong addr,void * v,uint size)2194 inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
2195 {
2196 	unsigned long flags = 0;
2197 	int pci_access = 0;
2198 #if defined(BCM_GMAC3)
2199 	const int acp_war_enab = 1;
2200 #else  /* !BCM_GMAC3 */
2201 	int acp_war_enab = ACP_WAR_ENAB();
2202 #endif /* !BCM_GMAC3 */
2203 
2204 	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
2205 		pci_access = 1;
2206 
2207 	if (pci_access && acp_war_enab)
2208 		spin_lock_irqsave(&l2x0_reg_lock, flags);
2209 
2210 	switch (size) {
2211 	case sizeof(uint8):
2212 		*(uint8*)v = readb((volatile uint8*)(addr));
2213 		break;
2214 	case sizeof(uint16):
2215 		*(uint16*)v = readw((volatile uint16*)(addr));
2216 		break;
2217 	case sizeof(uint32):
2218 		*(uint32*)v = readl((volatile uint32*)(addr));
2219 		break;
2220 	case sizeof(uint64):
2221 		*(uint64*)v = *((volatile uint64*)(addr));
2222 		break;
2223 	}
2224 
2225 	if (pci_access && acp_war_enab)
2226 		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
2227 }
2228 #endif
2229 
2230 #ifdef BCM_SECURE_DMA
2231 static void *
osl_sec_dma_ioremap(osl_t * osh,struct page * page,size_t size,bool iscache,bool isdecr)2232 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
2233 {
2234 
2235 	struct page **map;
2236 	int order, i;
2237 	void *addr = NULL;
2238 
2239 	size = PAGE_ALIGN(size);
2240 	order = get_order(size);
2241 
2242 	map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
2243 
2244 	if (map == NULL)
2245 		return NULL;
2246 
2247 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
2248 		map[i] = page + i;
2249 
2250 	if (iscache) {
2251 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
2252 		if (isdecr) {
2253 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
2254 		}
2255 	} else {
2256 
2257 #if defined(__ARM_ARCH_7A__)
2258 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
2259 			pgprot_noncached(__pgprot(PAGE_KERNEL)));
2260 #endif
2261 		if (isdecr) {
2262 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
2263 		}
2264 	}
2265 
2266 	kfree(map);
2267 	return (void *)addr;
2268 }
2269 
2270 static void
osl_sec_dma_iounmap(osl_t * osh,void * contig_base_va,size_t size)2271 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
2272 {
2273 	vunmap(contig_base_va);
2274 }
2275 
2276 static int
osl_sec_dma_init_elem_mem_block(osl_t * osh,size_t mbsize,int max,sec_mem_elem_t ** list)2277 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
2278 {
2279 	int i;
2280 	int ret = BCME_OK;
2281 	sec_mem_elem_t *sec_mem_elem;
2282 
2283 	if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
2284 
2285 		*list = sec_mem_elem;
2286 		bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
2287 		for (i = 0; i < max-1; i++) {
2288 			sec_mem_elem->next = (sec_mem_elem + 1);
2289 			sec_mem_elem->size = mbsize;
2290 			sec_mem_elem->pa_cma = osh->contig_base_alloc;
2291 			sec_mem_elem->vac = osh->contig_base_alloc_va;
2292 
2293 			sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
2294 			osh->contig_base_alloc += mbsize;
2295 			osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
2296 
2297 			sec_mem_elem = sec_mem_elem + 1;
2298 		}
2299 		sec_mem_elem->next = NULL;
2300 		sec_mem_elem->size = mbsize;
2301 		sec_mem_elem->pa_cma = osh->contig_base_alloc;
2302 		sec_mem_elem->vac = osh->contig_base_alloc_va;
2303 
2304 		sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
2305 		osh->contig_base_alloc += mbsize;
2306 		osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
2307 
2308 	} else {
2309 		printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
2310 		ret = BCME_ERROR;
2311 	}
2312 	return ret;
2313 }
2314 
2315 
2316 static void
osl_sec_dma_deinit_elem_mem_block(osl_t * osh,size_t mbsize,int max,void * sec_list_base)2317 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
2318 {
2319 	if (sec_list_base)
2320 		kfree(sec_list_base);
2321 }
2322 
2323 static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_alloc_mem_elem(osl_t * osh,void * va,uint size,int direction,struct sec_cma_info * ptr_cma_info,uint offset)2324 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
2325 	struct sec_cma_info *ptr_cma_info, uint offset)
2326 {
2327 	sec_mem_elem_t *sec_mem_elem = NULL;
2328 
2329 		ASSERT(osh->sec_list_4096);
2330 		sec_mem_elem = osh->sec_list_4096;
2331 		osh->sec_list_4096 = sec_mem_elem->next;
2332 
2333 		sec_mem_elem->next = NULL;
2334 
2335 	if (ptr_cma_info->sec_alloc_list_tail) {
2336 		ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
2337 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
2338 	}
2339 	else {
2340 		/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
2341 		ASSERT(ptr_cma_info->sec_alloc_list == NULL);
2342 		ptr_cma_info->sec_alloc_list = sec_mem_elem;
2343 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
2344 	}
2345 	return sec_mem_elem;
2346 }
2347 
2348 static void BCMFASTPATH
osl_sec_dma_free_mem_elem(osl_t * osh,sec_mem_elem_t * sec_mem_elem)2349 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
2350 {
2351 	sec_mem_elem->dma_handle = 0x0;
2352 	sec_mem_elem->va = NULL;
2353 		sec_mem_elem->next = osh->sec_list_4096;
2354 		osh->sec_list_4096 = sec_mem_elem;
2355 }
2356 
2357 static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_find_rem_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info,dma_addr_t dma_handle)2358 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
2359 {
2360 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
2361 	sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
2362 
2363 	if (sec_mem_elem->dma_handle == dma_handle) {
2364 
2365 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
2366 
2367 		if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
2368 			ptr_cma_info->sec_alloc_list_tail = NULL;
2369 			ASSERT(ptr_cma_info->sec_alloc_list == NULL);
2370 		}
2371 
2372 		return sec_mem_elem;
2373 	}
2374 	sec_mem_elem = sec_mem_elem->next;
2375 
2376 	while (sec_mem_elem != NULL) {
2377 
2378 		if (sec_mem_elem->dma_handle == dma_handle) {
2379 
2380 			sec_prv_elem->next = sec_mem_elem->next;
2381 			if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
2382 				ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
2383 
2384 			return sec_mem_elem;
2385 		}
2386 		sec_prv_elem = sec_mem_elem;
2387 		sec_mem_elem = sec_mem_elem->next;
2388 	}
2389 	return NULL;
2390 }
2391 
2392 static sec_mem_elem_t *
osl_sec_dma_rem_first_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)2393 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
2394 {
2395 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
2396 
2397 	if (sec_mem_elem) {
2398 
2399 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
2400 
2401 		if (ptr_cma_info->sec_alloc_list == NULL)
2402 			ptr_cma_info->sec_alloc_list_tail = NULL;
2403 
2404 		return sec_mem_elem;
2405 
2406 	} else
2407 		return NULL;
2408 }
2409 
2410 static void * BCMFASTPATH
osl_sec_dma_last_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)2411 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
2412 {
2413 	return ptr_cma_info->sec_alloc_list_tail;
2414 }
2415 
2416 dma_addr_t BCMFASTPATH
osl_sec_dma_map_txmeta(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info)2417 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
2418 	hnddma_seg_map_t *dmah, void *ptr_cma_info)
2419 {
2420 	sec_mem_elem_t *sec_mem_elem;
2421 	struct page *pa_cma_page;
2422 	uint loffset;
2423 	void *vaorig = ((uint8 *)va + size);
2424 	dma_addr_t dma_handle = 0x0;
2425 	/* packet will be the one added with osl_sec_dma_map() just before this call */
2426 
2427 	sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
2428 
2429 	if (sec_mem_elem && sec_mem_elem->va == vaorig) {
2430 
2431 		pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
2432 		loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
2433 
2434 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
2435 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
2436 
2437 	} else {
2438 		printf("%s: error orig va not found va = 0x%p \n",
2439 			__FUNCTION__, vaorig);
2440 	}
2441 	return dma_handle;
2442 }
2443 
2444 dma_addr_t BCMFASTPATH
osl_sec_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info,uint offset)2445 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
2446 	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
2447 {
2448 
2449 	sec_mem_elem_t *sec_mem_elem;
2450 	struct page *pa_cma_page;
2451 	void *pa_cma_kmap_va = NULL;
2452 	uint buflen = 0;
2453 	dma_addr_t dma_handle = 0x0;
2454 	uint loffset;
2455 
2456 	ASSERT((direction == DMA_RX) || (direction == DMA_TX));
2457 	sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
2458 
2459 	sec_mem_elem->va = va;
2460 	sec_mem_elem->direction = direction;
2461 	pa_cma_page = sec_mem_elem->pa_cma_page;
2462 
2463 	loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
2464 	/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2465 	* pa_cma_kmap_va += loffset;
2466 	*/
2467 
2468 	pa_cma_kmap_va = sec_mem_elem->vac;
2469 	pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
2470 	buflen = size;
2471 
2472 	if (direction == DMA_TX) {
2473 		memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
2474 
2475 		if (dmah) {
2476 			dmah->nsegs = 1;
2477 			dmah->origsize = buflen;
2478 		}
2479 	}
2480 	else
2481 	{
2482 		if ((p != NULL) && (dmah != NULL)) {
2483 			dmah->nsegs = 1;
2484 			dmah->origsize = buflen;
2485 		}
2486 		*(uint32 *)(pa_cma_kmap_va) = 0x0;
2487 	}
2488 
2489 	if (direction == DMA_RX) {
2490 		flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
2491 	}
2492 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
2493 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
2494 	if (dmah) {
2495 		dmah->segs[0].addr = dma_handle;
2496 		dmah->segs[0].length = buflen;
2497 	}
2498 	sec_mem_elem->dma_handle = dma_handle;
2499 	/* kunmap_atomic(pa_cma_kmap_va-loffset); */
2500 	return dma_handle;
2501 }
2502 
2503 dma_addr_t BCMFASTPATH
osl_sec_dma_dd_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * map)2504 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
2505 {
2506 
2507 	struct page *pa_cma_page;
2508 	phys_addr_t pa_cma;
2509 	dma_addr_t dma_handle = 0x0;
2510 	uint loffset;
2511 
2512 	pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
2513 	pa_cma_page = phys_to_page(pa_cma);
2514 	loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
2515 
2516 	dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
2517 		(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
2518 
2519 	return dma_handle;
2520 }
2521 
2522 void BCMFASTPATH
osl_sec_dma_unmap(osl_t * osh,dma_addr_t dma_handle,uint size,int direction,void * p,hnddma_seg_map_t * map,void * ptr_cma_info,uint offset)2523 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
2524 void *p, hnddma_seg_map_t *map,	void *ptr_cma_info, uint offset)
2525 {
2526 	sec_mem_elem_t *sec_mem_elem;
2527 	void *pa_cma_kmap_va = NULL;
2528 	uint buflen = 0;
2529 	dma_addr_t pa_cma;
2530 	void *va;
2531 	int read_count = 0;
2532 	BCM_REFERENCE(buflen);
2533 	BCM_REFERENCE(read_count);
2534 
2535 	sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
2536 	ASSERT(sec_mem_elem);
2537 
2538 	va = sec_mem_elem->va;
2539 	va = (uint8 *)va - offset;
2540 	pa_cma = sec_mem_elem->pa_cma;
2541 
2542 
2543 	if (direction == DMA_RX) {
2544 
2545 		if (p == NULL) {
2546 
2547 			/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
2548 			* pa_cma_kmap_va += loffset;
2549 			*/
2550 
2551 			pa_cma_kmap_va = sec_mem_elem->vac;
2552 
2553 			do {
2554 				invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
2555 
2556 				buflen = *(uint *)(pa_cma_kmap_va);
2557 				if (buflen)
2558 					break;
2559 
2560 				OSL_DELAY(1);
2561 				read_count++;
2562 			} while (read_count < 200);
2563 			dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
2564 			memcpy(va, pa_cma_kmap_va, size);
2565 			/* kunmap_atomic(pa_cma_kmap_va); */
2566 		}
2567 	} else {
2568 		dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
2569 	}
2570 
2571 	osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
2572 }
2573 
2574 void
osl_sec_dma_unmap_all(osl_t * osh,void * ptr_cma_info)2575 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
2576 {
2577 
2578 	sec_mem_elem_t *sec_mem_elem;
2579 
2580 	sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
2581 
2582 	while (sec_mem_elem != NULL) {
2583 
2584 		dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
2585 			sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2586 		osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
2587 
2588 		sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
2589 	}
2590 }
2591 
2592 static void
osl_sec_dma_init_consistent(osl_t * osh)2593 osl_sec_dma_init_consistent(osl_t *osh)
2594 {
2595 	int i;
2596 	void *temp_va = osh->contig_base_alloc_coherent_va;
2597 	phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
2598 
2599 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
2600 		osh->sec_cma_coherent[i].avail = TRUE;
2601 		osh->sec_cma_coherent[i].va = temp_va;
2602 		osh->sec_cma_coherent[i].pa = temp_pa;
2603 		temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
2604 		temp_pa += SEC_CMA_COHERENT_BLK;
2605 	}
2606 }
2607 
2608 static void *
osl_sec_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,ulong * pap)2609 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
2610 {
2611 
2612 	void *temp_va = NULL;
2613 	ulong temp_pa = 0;
2614 	int i;
2615 
2616 	if (size > SEC_CMA_COHERENT_BLK) {
2617 		printf("%s unsupported size\n", __FUNCTION__);
2618 		return NULL;
2619 	}
2620 
2621 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
2622 		if (osh->sec_cma_coherent[i].avail == TRUE) {
2623 			temp_va = osh->sec_cma_coherent[i].va;
2624 			temp_pa = osh->sec_cma_coherent[i].pa;
2625 			osh->sec_cma_coherent[i].avail = FALSE;
2626 			break;
2627 		}
2628 	}
2629 
2630 	if (i == SEC_CMA_COHERENT_MAX)
2631 		printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
2632 			temp_va, (ulong)temp_pa, size);
2633 
2634 	*pap = (unsigned long)temp_pa;
2635 	return temp_va;
2636 }
2637 
2638 static void
osl_sec_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)2639 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
2640 {
2641 	int i = 0;
2642 
2643 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
2644 		if (osh->sec_cma_coherent[i].va == va) {
2645 			osh->sec_cma_coherent[i].avail = TRUE;
2646 			break;
2647 		}
2648 	}
2649 	if (i == SEC_CMA_COHERENT_MAX)
2650 		printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
2651 			va, (ulong)pa, size);
2652 }
2653 
2654 #endif /* BCM_SECURE_DMA */
2655 
2656 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
2657 #include <linux/kallsyms.h>
2658 #include <net/sock.h>
2659 void
osl_pkt_orphan_partial(struct sk_buff * skb,int tsq)2660 osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
2661 {
2662 	uint32 fraction;
2663 	static void *p_tcp_wfree = NULL;
2664 
2665 	if (tsq <= 0)
2666 		return;
2667 
2668 	if (!skb->destructor || skb->destructor == sock_wfree)
2669 		return;
2670 
2671 	if (unlikely(!p_tcp_wfree)) {
2672 		char sym[KSYM_SYMBOL_LEN];
2673 		sprint_symbol(sym, (unsigned long)skb->destructor);
2674 		sym[9] = 0;
2675 		if (!strcmp(sym, "tcp_wfree"))
2676 			p_tcp_wfree = skb->destructor;
2677 		else
2678 			return;
2679 	}
2680 
2681 	if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
2682 		return;
2683 
2684 	/* abstract a certain portion of skb truesize from the socket
2685 	 * sk_wmem_alloc to allow more skb can be allocated for this
2686 	 * socket for better cusion meeting WiFi device requirement
2687 	 */
2688 	fraction = skb->truesize * (tsq - 1) / tsq;
2689 	skb->truesize -= fraction;
2690 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
2691 	atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
2692 #else
2693 	atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
2694 #endif /* LINUX_VERSION >= 4.13.0 */
2695 	skb_orphan(skb);
2696 }
2697 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
2698 
2699 /* timer apis */
2700 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
2701 
2702 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
2703 void
timer_cb_compat(struct timer_list * tl)2704 timer_cb_compat(struct timer_list *tl)
2705 {
2706 	timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
2707 	t->callback((ulong)t->arg);
2708 }
2709 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
2710 
2711 #ifdef REPORT_FATAL_TIMEOUTS
2712 osl_timer_t *
osl_timer_init(osl_t * osh,const char * name,void (* fn)(void * arg),void * arg)2713 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
2714 {
2715 	osl_timer_t *t;
2716 	BCM_REFERENCE(fn);
2717 	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
2718 		printk(KERN_ERR "osl_timer_init: malloced failed for osl_timer_t\n");
2719 		return (NULL);
2720 	}
2721 	bzero(t, sizeof(osl_timer_t));
2722 	if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
2723 		printk(KERN_ERR "osl_timer_init: malloc failed\n");
2724 		MFREE(NULL, t, sizeof(osl_timer_t));
2725 		return (NULL);
2726 	}
2727 	t->timer->data = (ulong)arg;
2728 	t->timer->function = (linux_timer_fn)fn;
2729 	t->set = TRUE;
2730 
2731 	init_timer(t->timer);
2732 
2733 	return (t);
2734 }
2735 
2736 void
osl_timer_add(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)2737 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
2738 {
2739 
2740 	if (t == NULL) {
2741 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
2742 		return;
2743 	}
2744 	ASSERT(!t->set);
2745 
2746 	t->set = TRUE;
2747 	if (periodic) {
2748 		printf("Periodic timers are not supported by Linux timer apis\n");
2749 	}
2750 	t->timer->expires = jiffies + ms*HZ/1000;
2751 
2752 	add_timer(t->timer);
2753 
2754 	return;
2755 }
2756 
2757 void
osl_timer_update(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)2758 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
2759 {
2760 
2761 	if (t == NULL) {
2762 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
2763 		return;
2764 	}
2765 	if (periodic) {
2766 		printf("Periodic timers are not supported by Linux timer apis\n");
2767 	}
2768 	t->set = TRUE;
2769 	t->timer->expires = jiffies + ms*HZ/1000;
2770 
2771 	mod_timer(t->timer, t->timer->expires);
2772 
2773 	return;
2774 }
2775 
2776 /*
2777  * Return TRUE if timer successfully deleted, FALSE if still pending
2778  */
2779 bool
osl_timer_del(osl_t * osh,osl_timer_t * t)2780 osl_timer_del(osl_t *osh, osl_timer_t *t)
2781 {
2782 	if (t == NULL) {
2783 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
2784 		return (FALSE);
2785 	}
2786 	if (t->set) {
2787 		t->set = FALSE;
2788 		if (t->timer) {
2789 			del_timer(t->timer);
2790 			MFREE(NULL, t->timer, sizeof(struct timer_list));
2791 		}
2792 		MFREE(NULL, t, sizeof(osl_timer_t));
2793 	}
2794 	return (TRUE);
2795 }
2796 #endif
2797 
2798 void
osl_do_gettimeofday(struct osl_timespec * ts)2799 osl_do_gettimeofday(struct osl_timespec *ts)
2800 {
2801 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2802 	struct timespec curtime;
2803 #else
2804 	struct timeval curtime;
2805 #endif
2806 
2807 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2808 	getnstimeofday(&curtime);
2809 	ts->tv_nsec = curtime.tv_nsec;
2810 	ts->tv_usec = curtime.tv_nsec / 1000;
2811 #else
2812 	do_gettimeofday(&curtime);
2813 	ts->tv_usec = curtime.tv_usec;
2814 	ts->tv_nsec = curtime.tv_usec * 1000;
2815 #endif
2816 	ts->tv_sec = curtime.tv_sec;
2817 }
2818 
2819 uint32
osl_do_gettimediff(struct osl_timespec * cur_ts,struct osl_timespec * old_ts)2820 osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts)
2821 {
2822 	uint32 diff_s, diff_us, total_diff_us;
2823 	bool pgc_g = FALSE;
2824 
2825 	diff_s = (uint32)cur_ts->tv_sec - (uint32)old_ts->tv_sec;
2826 	pgc_g = (cur_ts->tv_usec > old_ts->tv_usec) ? TRUE : FALSE;
2827 	diff_us = pgc_g ? (cur_ts->tv_usec - old_ts->tv_usec) : (old_ts->tv_usec - cur_ts->tv_usec);
2828 	total_diff_us = pgc_g ? (diff_s * 1000000 + diff_us) : (diff_s * 1000000 - diff_us);
2829 	return total_diff_us;
2830 }
2831 
2832 void
osl_get_monotonic_boottime(struct osl_timespec * ts)2833 osl_get_monotonic_boottime(struct osl_timespec *ts)
2834 {
2835 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
2836 	struct timespec curtime;
2837 #else
2838 	struct timeval curtime;
2839 #endif
2840 
2841 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
2842 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2843 	curtime = ktime_to_timespec(ktime_get_boottime());
2844 #else
2845 	get_monotonic_boottime(&curtime);
2846 #endif
2847 	ts->tv_sec = curtime.tv_sec;
2848 	ts->tv_usec = curtime.tv_nsec / 1000;
2849 	ts->tv_nsec = curtime.tv_nsec;
2850 #else
2851 	do_gettimeofday(&curtime);
2852 	ts->tv_sec = curtime.tv_sec;
2853 	ts->tv_usec = curtime.tv_usec;
2854 	ts->tv_nsec = curtime.tv_usec * 1000;
2855 #endif
2856 }