xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/linux_osl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Linux OS Independent Layer
3  *
4  * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: linux_osl.c 697654 2017-05-04 11:59:40Z $
30  */
31 
32 #define LINUX_PORT
33 
34 #include <typedefs.h>
35 #include <bcmendian.h>
36 #include <linuxver.h>
37 #include <bcmdefs.h>
38 
39 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
40 #include <asm/cacheflush.h>
41 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
42 
43 #include <linux/random.h>
44 
45 #include <osl.h>
46 #include <bcmutils.h>
47 #include <linux/delay.h>
48 #include <linux/vmalloc.h>
49 #include <pcicfg.h>
50 
51 #if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
52 #include <bcm_assert_log.h>
53 #endif // endif
54 
55 #ifdef BCM_SECURE_DMA
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/io.h>
59 #include <linux/printk.h>
60 #include <linux/errno.h>
61 #include <linux/mm.h>
62 #include <linux/moduleparam.h>
63 #include <asm/io.h>
64 #include <linux/skbuff.h>
65 #include <stbutils.h>
66 #include <linux/highmem.h>
67 #include <linux/dma-mapping.h>
68 #include <asm/memory.h>
69 #endif /* BCM_SECURE_DMA */
70 
71 #include <linux/fs.h>
72 
73 #if defined(STB)
74 #include <linux/spinlock.h>
75 extern spinlock_t l2x0_reg_lock;
76 #endif // endif
77 
78 #ifdef BCM_OBJECT_TRACE
79 #include <bcmutils.h>
80 #endif /* BCM_OBJECT_TRACE */
81 #include "linux_osl_priv.h"
82 
83 #define PCI_CFG_RETRY		10
84 
85 #define DUMPBUFSZ 1024
86 
87 #ifdef BCM_SECURE_DMA
88 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
89 	bool iscache, bool isdecr);
90 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
91 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
92 	sec_mem_elem_t **list);
93 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
94 	void *sec_list_base);
95 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
96 	int direction, struct sec_cma_info *ptr_cma_info, uint offset);
97 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
98 static void osl_sec_dma_init_consistent(osl_t *osh);
99 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
100 	ulong *pap);
101 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
102 #endif /* BCM_SECURE_DMA */
103 
104 /* PCMCIA attribute space access macros */
105 
106 #ifdef CUSTOMER_HW4_DEBUG
107 uint32 g_assert_type = 1; /* By Default not cause Kernel Panic */
108 #else
109 uint32 g_assert_type = 0; /* By Default Kernel Panic */
110 #endif /* CUSTOMER_HW4_DEBUG */
111 
112 module_param(g_assert_type, int, 0);
113 #ifdef	BCM_SECURE_DMA
114 #define	SECDMA_MODULE_PARAMS	0
115 #define	SECDMA_EXT_FILE	1
116 unsigned long secdma_addr = 0;
117 unsigned long secdma_addr2 = 0;
118 u32 secdma_size = 0;
119 u32 secdma_size2 = 0;
120 module_param(secdma_addr, ulong, 0);
121 module_param(secdma_size, int, 0);
122 module_param(secdma_addr2, ulong, 0);
123 module_param(secdma_size2, int, 0);
124 static int secdma_found = 0;
125 #endif /* BCM_SECURE_DMA */
126 
127 #ifdef USE_DMA_LOCK
128 static void osl_dma_lock(osl_t *osh);
129 static void osl_dma_unlock(osl_t *osh);
130 static void osl_dma_lock_init(osl_t *osh);
131 
132 #define DMA_LOCK(osh)		osl_dma_lock(osh)
133 #define DMA_UNLOCK(osh)		osl_dma_unlock(osh)
134 #define DMA_LOCK_INIT(osh)	osl_dma_lock_init(osh);
135 #else
136 #define DMA_LOCK(osh)		do { /* noop */ } while(0)
137 #define DMA_UNLOCK(osh)		do { /* noop */ } while(0)
138 #define DMA_LOCK_INIT(osh)	do { /* noop */ } while(0)
139 #endif /* USE_DMA_LOCK */
140 
141 static int16 linuxbcmerrormap[] =
142 {	0,				/* 0 */
143 	-EINVAL,		/* BCME_ERROR */
144 	-EINVAL,		/* BCME_BADARG */
145 	-EINVAL,		/* BCME_BADOPTION */
146 	-EINVAL,		/* BCME_NOTUP */
147 	-EINVAL,		/* BCME_NOTDOWN */
148 	-EINVAL,		/* BCME_NOTAP */
149 	-EINVAL,		/* BCME_NOTSTA */
150 	-EINVAL,		/* BCME_BADKEYIDX */
151 	-EINVAL,		/* BCME_RADIOOFF */
152 	-EINVAL,		/* BCME_NOTBANDLOCKED */
153 	-EINVAL, 		/* BCME_NOCLK */
154 	-EINVAL, 		/* BCME_BADRATESET */
155 	-EINVAL, 		/* BCME_BADBAND */
156 	-E2BIG,			/* BCME_BUFTOOSHORT */
157 	-E2BIG,			/* BCME_BUFTOOLONG */
158 	-EBUSY, 		/* BCME_BUSY */
159 	-EINVAL, 		/* BCME_NOTASSOCIATED */
160 	-EINVAL, 		/* BCME_BADSSIDLEN */
161 	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
162 	-EINVAL, 		/* BCME_BADCHAN */
163 	-EFAULT, 		/* BCME_BADADDR */
164 	-ENOMEM, 		/* BCME_NORESOURCE */
165 	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
166 	-EMSGSIZE,		/* BCME_BADLENGTH */
167 	-EINVAL,		/* BCME_NOTREADY */
168 	-EPERM,			/* BCME_EPERM */
169 	-ENOMEM, 		/* BCME_NOMEM */
170 	-EINVAL, 		/* BCME_ASSOCIATED */
171 	-ERANGE, 		/* BCME_RANGE */
172 	-EINVAL, 		/* BCME_NOTFOUND */
173 	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
174 	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
175 	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
176 	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
177 	-EIO,			/* BCME_SDIO_ERROR */
178 	-ENODEV,		/* BCME_DONGLE_DOWN */
179 	-EINVAL,		/* BCME_VERSION */
180 	-EIO,			/* BCME_TXFAIL */
181 	-EIO,			/* BCME_RXFAIL */
182 	-ENODEV,		/* BCME_NODEVICE */
183 	-EINVAL,		/* BCME_NMODE_DISABLED */
184 	-ENODATA,		/* BCME_NONRESIDENT */
185 	-EINVAL,		/* BCME_SCANREJECT */
186 	-EINVAL,		/* BCME_USAGE_ERROR */
187 	-EIO,     		/* BCME_IOCTL_ERROR */
188 	-EIO,			/* BCME_SERIAL_PORT_ERR */
189 	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
190 	-EIO,			/* BCME_DECERR */
191 	-EIO,			/* BCME_ENCERR */
192 	-EIO,			/* BCME_MICERR */
193 	-ERANGE,		/* BCME_REPLAY */
194 	-EINVAL,		/* BCME_IE_NOTFOUND */
195 	-EINVAL,		/* BCME_DATA_NOTFOUND */
196 	-EINVAL,        /* BCME_NOT_GC */
197 	-EINVAL,        /* BCME_PRS_REQ_FAILED */
198 	-EINVAL,        /* BCME_NO_P2P_SE */
199 	-EINVAL,        /* BCME_NOA_PND */
200 	-EINVAL,        /* BCME_FRAG_Q_FAILED */
201 	-EINVAL,        /* BCME_GET_AF_FAILED */
202 	-EINVAL,	/* BCME_MSCH_NOTREADY */
203 	-EINVAL,	/* BCME_IOV_LAST_CMD */
204 	-EINVAL,	/* BCME_MINIPMU_CAL_FAIL */
205 	-EINVAL,	/* BCME_RCAL_FAIL */
206 	-EINVAL,	/* BCME_LPF_RCCAL_FAIL */
207 	-EINVAL,	/* BCME_DACBUF_RCCAL_FAIL */
208 	-EINVAL,	/* BCME_VCOCAL_FAIL */
209 	-EINVAL,	/* BCME_BANDLOCKED */
210 	-EINVAL,	/* BCME_DNGL_DEVRESET */
211 
212 /* When an new error code is added to bcmutils.h, add os
213  * specific error translation here as well
214  */
215 /* check if BCME_LAST changed since the last time this function was updated */
216 #if BCME_LAST != -68
217 #error "You need to add a OS error translation in the linuxbcmerrormap \
218 	for new error code defined in bcmutils.h"
219 #endif // endif
220 };
221 uint lmtest = FALSE;
222 
223 #ifdef DHD_MAP_LOGGING
224 #define DHD_MAP_LOG_SIZE 2048
225 
226 typedef struct dhd_map_item {
227 	dmaaddr_t pa;		/* DMA address (physical) */
228 	uint64 ts_nsec;		/* timestamp: nsec */
229 	uint32 size;		/* mapping size */
230 	uint8 rsvd[4];		/* reserved for future use */
231 } dhd_map_item_t;
232 
233 typedef struct dhd_map_record {
234 	uint32 items;		/* number of total items */
235 	uint32 idx;		/* current index of metadata */
236 	dhd_map_item_t map[0];	/* metadata storage */
237 } dhd_map_log_t;
238 
239 void
osl_dma_map_dump(osl_t * osh)240 osl_dma_map_dump(osl_t *osh)
241 {
242 	dhd_map_log_t *map_log, *unmap_log;
243 	uint64 ts_sec, ts_usec;
244 
245 	map_log = (dhd_map_log_t *)(osh->dhd_map_log);
246 	unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
247 	osl_get_localtime(&ts_sec, &ts_usec);
248 
249 	if (map_log && unmap_log) {
250 		printk("%s: map_idx=%d unmap_idx=%d "
251 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
252 			map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
253 			(unsigned long)ts_usec);
254 		printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
255 			" dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
256 			(uint64)__virt_to_phys((ulong)(map_log->map)),
257 			(uint32)(sizeof(dhd_map_item_t) * map_log->items),
258 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
259 			(uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
260 	}
261 }
262 
263 static void *
osl_dma_map_log_init(uint32 item_len)264 osl_dma_map_log_init(uint32 item_len)
265 {
266 	dhd_map_log_t *map_log;
267 	gfp_t flags;
268 	uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
269 		(item_len * sizeof(dhd_map_item_t)));
270 
271 	flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
272 	map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
273 	if (map_log) {
274 		memset(map_log, 0, alloc_size);
275 		map_log->items = item_len;
276 		map_log->idx = 0;
277 	}
278 
279 	return (void *)map_log;
280 }
281 
282 static void
osl_dma_map_log_deinit(osl_t * osh)283 osl_dma_map_log_deinit(osl_t *osh)
284 {
285 	if (osh->dhd_map_log) {
286 		kfree(osh->dhd_map_log);
287 		osh->dhd_map_log = NULL;
288 	}
289 
290 	if (osh->dhd_unmap_log) {
291 		kfree(osh->dhd_unmap_log);
292 		osh->dhd_unmap_log = NULL;
293 	}
294 }
295 
296 static void
osl_dma_map_logging(osl_t * osh,void * handle,dmaaddr_t pa,uint32 len)297 osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
298 {
299 	dhd_map_log_t *log = (dhd_map_log_t *)handle;
300 	uint32 idx;
301 
302 	if (log == NULL) {
303 		printk("%s: log is NULL\n", __FUNCTION__);
304 		return;
305 	}
306 
307 	idx = log->idx;
308 	log->map[idx].ts_nsec = osl_localtime_ns();
309 	log->map[idx].pa = pa;
310 	log->map[idx].size = len;
311 	log->idx = (idx + 1) % log->items;
312 }
313 #endif /* DHD_MAP_LOGGING */
314 
315 /* translate bcmerrors into linux errors */
316 int
osl_error(int bcmerror)317 osl_error(int bcmerror)
318 {
319 	if (bcmerror > 0)
320 		bcmerror = 0;
321 	else if (bcmerror < BCME_LAST)
322 		bcmerror = BCME_ERROR;
323 
324 	/* Array bounds covered by ASSERT in osl_attach */
325 	return linuxbcmerrormap[-bcmerror];
326 }
327 osl_t *
osl_attach(void * pdev,uint bustype,bool pkttag)328 osl_attach(void *pdev, uint bustype, bool pkttag)
329 {
330 	void **osl_cmn = NULL;
331 	osl_t *osh;
332 	gfp_t flags;
333 #ifdef BCM_SECURE_DMA
334 	u32 secdma_memsize;
335 #endif // endif
336 
337 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
338 	if (!(osh = kmalloc(sizeof(osl_t), flags)))
339 		return osh;
340 
341 	ASSERT(osh);
342 
343 	bzero(osh, sizeof(osl_t));
344 
345 	if (osl_cmn == NULL || *osl_cmn == NULL) {
346 		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
347 			kfree(osh);
348 			return NULL;
349 		}
350 		bzero(osh->cmn, sizeof(osl_cmn_t));
351 		if (osl_cmn)
352 			*osl_cmn = osh->cmn;
353 		atomic_set(&osh->cmn->malloced, 0);
354 		osh->cmn->dbgmem_list = NULL;
355 		spin_lock_init(&(osh->cmn->dbgmem_lock));
356 
357 		spin_lock_init(&(osh->cmn->pktalloc_lock));
358 
359 	} else {
360 		osh->cmn = *osl_cmn;
361 	}
362 	atomic_add(1, &osh->cmn->refcount);
363 
364 	bcm_object_trace_init();
365 
366 	/* Check that error map has the right number of entries in it */
367 	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
368 
369 	osh->failed = 0;
370 	osh->pdev = pdev;
371 	osh->pub.pkttag = pkttag;
372 	osh->bustype = bustype;
373 	osh->magic = OS_HANDLE_MAGIC;
374 #ifdef BCM_SECURE_DMA
375 
376 	if ((secdma_addr != 0) && (secdma_size != 0)) {
377 		printk("linux_osl.c: Buffer info passed via module params, using it.\n");
378 		if (secdma_found == 0) {
379 			osh->contig_base_alloc = (phys_addr_t)secdma_addr;
380 			secdma_memsize = secdma_size;
381 		} else if (secdma_found == 1) {
382 			osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
383 			secdma_memsize = secdma_size2;
384 		} else {
385 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
386 			kfree(osh);
387 			return NULL;
388 		}
389 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
390 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
391 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
392 			(unsigned int)osh->contig_base_alloc);
393 		osh->stb_ext_params = SECDMA_MODULE_PARAMS;
394 	}
395 	else if (stbpriv_init(osh) == 0) {
396 		printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
397 		if (secdma_found == 0) {
398 			osh->contig_base_alloc =
399 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
400 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
401 		} else if (secdma_found == 1) {
402 			osh->contig_base_alloc =
403 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
404 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
405 		} else {
406 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
407 			kfree(osh);
408 			return NULL;
409 		}
410 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
411 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
412 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
413 			(unsigned int)osh->contig_base_alloc);
414 		osh->stb_ext_params = SECDMA_EXT_FILE;
415 	}
416 	else {
417 		printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
418 		kfree(osh);
419 		return NULL;
420 	}
421 	secdma_found++;
422 	osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
423 		phys_to_page((u32)osh->contig_base_alloc),
424 		CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
425 
426 	if (osh->contig_base_alloc_coherent_va == NULL) {
427 		if (osh->cmn)
428 			kfree(osh->cmn);
429 	    kfree(osh);
430 	    return NULL;
431 	}
432 	osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
433 	osh->contig_base_alloc_coherent = osh->contig_base_alloc;
434 	osl_sec_dma_init_consistent(osh);
435 
436 	osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
437 
438 	osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
439 		phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
440 	if (osh->contig_base_alloc_va == NULL) {
441 		osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
442 		if (osh->cmn)
443 			kfree(osh->cmn);
444 		kfree(osh);
445 		return NULL;
446 	}
447 	osh->contig_base_va = osh->contig_base_alloc_va;
448 
449 	if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
450 		CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
451 	    osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
452 	    osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
453 		if (osh->cmn)
454 			kfree(osh->cmn);
455 		kfree(osh);
456 		return NULL;
457 	}
458 	osh->sec_list_base_4096 = osh->sec_list_4096;
459 
460 #endif /* BCM_SECURE_DMA */
461 
462 	switch (bustype) {
463 		case PCI_BUS:
464 		case SI_BUS:
465 		case PCMCIA_BUS:
466 			osh->pub.mmbus = TRUE;
467 			break;
468 		case JTAG_BUS:
469 		case SDIO_BUS:
470 		case USB_BUS:
471 		case SPI_BUS:
472 		case RPC_BUS:
473 			osh->pub.mmbus = FALSE;
474 			break;
475 		default:
476 			ASSERT(FALSE);
477 			break;
478 	}
479 
480 	DMA_LOCK_INIT(osh);
481 
482 #ifdef DHD_MAP_LOGGING
483 	osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
484 	if (osh->dhd_map_log == NULL) {
485 		printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
486 	}
487 
488 	osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
489 	if (osh->dhd_unmap_log == NULL) {
490 		printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
491 	}
492 #endif /* DHD_MAP_LOGGING */
493 
494 	return osh;
495 }
496 
osl_set_bus_handle(osl_t * osh,void * bus_handle)497 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
498 {
499 	osh->bus_handle = bus_handle;
500 }
501 
osl_get_bus_handle(osl_t * osh)502 void* osl_get_bus_handle(osl_t *osh)
503 {
504 	return osh->bus_handle;
505 }
506 
507 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_set_bpt_cb(osl_t * osh,void * bpt_cb,void * bpt_ctx)508 void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
509 {
510 	if (osh) {
511 		osh->bpt_cb = (bpt_cb_fn)bpt_cb;
512 		osh->sih = bpt_ctx;
513 	}
514 }
515 #endif	/* BCM_BACKPLANE_TIMEOUT */
516 
517 void
osl_detach(osl_t * osh)518 osl_detach(osl_t *osh)
519 {
520 	if (osh == NULL)
521 		return;
522 
523 #ifdef BCM_SECURE_DMA
524 	if (osh->stb_ext_params == SECDMA_EXT_FILE)
525 		stbpriv_exit(osh);
526 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
527 	osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
528 	osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
529 	secdma_found--;
530 #endif /* BCM_SECURE_DMA */
531 
532 	bcm_object_trace_deinit();
533 
534 #ifdef DHD_MAP_LOGGING
535 	osl_dma_map_log_deinit(osh->dhd_map_log);
536 	osl_dma_map_log_deinit(osh->dhd_unmap_log);
537 #endif /* DHD_MAP_LOGGING */
538 
539 	ASSERT(osh->magic == OS_HANDLE_MAGIC);
540 	atomic_sub(1, &osh->cmn->refcount);
541 	if (atomic_read(&osh->cmn->refcount) == 0) {
542 			kfree(osh->cmn);
543 	}
544 	kfree(osh);
545 }
546 
547 /* APIs to set/get specific quirks in OSL layer */
548 void BCMFASTPATH
osl_flag_set(osl_t * osh,uint32 mask)549 osl_flag_set(osl_t *osh, uint32 mask)
550 {
551 	osh->flags |= mask;
552 }
553 
554 void
osl_flag_clr(osl_t * osh,uint32 mask)555 osl_flag_clr(osl_t *osh, uint32 mask)
556 {
557 	osh->flags &= ~mask;
558 }
559 
560 #if defined(STB)
561 inline bool BCMFASTPATH
562 #else
563 bool
564 #endif // endif
osl_is_flag_set(osl_t * osh,uint32 mask)565 osl_is_flag_set(osl_t *osh, uint32 mask)
566 {
567 	return (osh->flags & mask);
568 }
569 
570 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
571 	defined(STB_SOC_WIFI)
572 
573 inline int BCMFASTPATH
osl_arch_is_coherent(void)574 osl_arch_is_coherent(void)
575 {
576 	return 0;
577 }
578 
579 inline int BCMFASTPATH
osl_acp_war_enab(void)580 osl_acp_war_enab(void)
581 {
582 	return 0;
583 }
584 
585 inline void BCMFASTPATH
osl_cache_flush(void * va,uint size)586 osl_cache_flush(void *va, uint size)
587 {
588 
589 	if (size > 0)
590 #ifdef STB_SOC_WIFI
591 		dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
592 #else /* STB_SOC_WIFI */
593 		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
594 			DMA_TO_DEVICE);
595 #endif /* STB_SOC_WIFI */
596 }
597 
598 inline void BCMFASTPATH
osl_cache_inv(void * va,uint size)599 osl_cache_inv(void *va, uint size)
600 {
601 
602 #ifdef STB_SOC_WIFI
603 	dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
604 #else /* STB_SOC_WIFI */
605 	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
606 #endif /* STB_SOC_WIFI */
607 }
608 
609 inline void BCMFASTPATH
osl_prefetch(const void * ptr)610 osl_prefetch(const void *ptr)
611 {
612 #if !defined(STB_SOC_WIFI)
613 	__asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
614 #endif // endif
615 }
616 
617 #endif // endif
618 
619 uint32
osl_pci_read_config(osl_t * osh,uint offset,uint size)620 osl_pci_read_config(osl_t *osh, uint offset, uint size)
621 {
622 	uint val = 0;
623 	uint retry = PCI_CFG_RETRY;
624 
625 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
626 
627 	/* only 4byte access supported */
628 	ASSERT(size == 4);
629 
630 	do {
631 		pci_read_config_dword(osh->pdev, offset, &val);
632 		if (val != 0xffffffff)
633 			break;
634 	} while (retry--);
635 
636 	return (val);
637 }
638 
639 void
osl_pci_write_config(osl_t * osh,uint offset,uint size,uint val)640 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
641 {
642 	uint retry = PCI_CFG_RETRY;
643 
644 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
645 
646 	/* only 4byte access supported */
647 	ASSERT(size == 4);
648 
649 	do {
650 		pci_write_config_dword(osh->pdev, offset, val);
651 		if (offset != PCI_BAR0_WIN)
652 			break;
653 		if (osl_pci_read_config(osh, offset, size) == val)
654 			break;
655 	} while (retry--);
656 
657 }
658 
659 /* return bus # for the pci device pointed by osh->pdev */
660 uint
osl_pci_bus(osl_t * osh)661 osl_pci_bus(osl_t *osh)
662 {
663 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
664 
665 #if defined(__ARM_ARCH_7A__)
666 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
667 #else
668 	return ((struct pci_dev *)osh->pdev)->bus->number;
669 #endif // endif
670 }
671 
672 /* return slot # for the pci device pointed by osh->pdev */
673 uint
osl_pci_slot(osl_t * osh)674 osl_pci_slot(osl_t *osh)
675 {
676 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
677 
678 #if defined(__ARM_ARCH_7A__)
679 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
680 #else
681 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
682 #endif // endif
683 }
684 
685 /* return domain # for the pci device pointed by osh->pdev */
686 uint
osl_pcie_domain(osl_t * osh)687 osl_pcie_domain(osl_t *osh)
688 {
689 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
690 
691 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
692 }
693 
694 /* return bus # for the pci device pointed by osh->pdev */
695 uint
osl_pcie_bus(osl_t * osh)696 osl_pcie_bus(osl_t *osh)
697 {
698 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
699 
700 	return ((struct pci_dev *)osh->pdev)->bus->number;
701 }
702 
703 /* return the pci device pointed by osh->pdev */
704 struct pci_dev *
osl_pci_device(osl_t * osh)705 osl_pci_device(osl_t *osh)
706 {
707 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
708 
709 	return osh->pdev;
710 }
711 
712 static void
osl_pcmcia_attr(osl_t * osh,uint offset,char * buf,int size,bool write)713 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
714 {
715 }
716 
717 void
osl_pcmcia_read_attr(osl_t * osh,uint offset,void * buf,int size)718 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
719 {
720 	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
721 }
722 
723 void
osl_pcmcia_write_attr(osl_t * osh,uint offset,void * buf,int size)724 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
725 {
726 	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
727 }
728 
729 void *
osl_malloc(osl_t * osh,uint size)730 osl_malloc(osl_t *osh, uint size)
731 {
732 	void *addr;
733 	gfp_t flags;
734 
735 	/* only ASSERT if osh is defined */
736 	if (osh)
737 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
738 #ifdef CONFIG_DHD_USE_STATIC_BUF
739 	if (bcm_static_buf)
740 	{
741 		unsigned long irq_flags;
742 		int i = 0;
743 		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
744 		{
745 			spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
746 
747 			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
748 			{
749 				if (bcm_static_buf->buf_use[i] == 0)
750 					break;
751 			}
752 
753 			if (i == STATIC_BUF_MAX_NUM)
754 			{
755 				spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
756 				printk("all static buff in use!\n");
757 				goto original;
758 			}
759 
760 			bcm_static_buf->buf_use[i] = 1;
761 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
762 
763 			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
764 			if (osh)
765 				atomic_add(size, &osh->cmn->malloced);
766 
767 			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
768 		}
769 	}
770 original:
771 #endif /* CONFIG_DHD_USE_STATIC_BUF */
772 
773 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
774 #if defined(DHD_USE_KVMALLOC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
775 	if ((addr = kvmalloc(size, flags)) == NULL) {
776 #else
777 	if ((addr = kmalloc(size, flags)) == NULL) {
778 #endif // endif
779 		if (osh)
780 			osh->failed++;
781 		return (NULL);
782 	}
783 	if (osh && osh->cmn)
784 		atomic_add(size, &osh->cmn->malloced);
785 
786 	return (addr);
787 }
788 
789 void *
790 osl_mallocz(osl_t *osh, uint size)
791 {
792 	void *ptr;
793 
794 	ptr = osl_malloc(osh, size);
795 
796 	if (ptr != NULL) {
797 		bzero(ptr, size);
798 	}
799 
800 	return ptr;
801 }
802 
803 void
804 osl_mfree(osl_t *osh, void *addr, uint size)
805 {
806 #ifdef CONFIG_DHD_USE_STATIC_BUF
807 	unsigned long flags;
808 
809 	if (bcm_static_buf)
810 	{
811 		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
812 			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
813 		{
814 			int buf_idx = 0;
815 
816 			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
817 
818 			spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
819 			bcm_static_buf->buf_use[buf_idx] = 0;
820 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
821 
822 			if (osh && osh->cmn) {
823 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
824 				atomic_sub(size, &osh->cmn->malloced);
825 			}
826 			return;
827 		}
828 	}
829 #endif /* CONFIG_DHD_USE_STATIC_BUF */
830 	if (osh && osh->cmn) {
831 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
832 
833 		ASSERT(size <= osl_malloced(osh));
834 
835 		atomic_sub(size, &osh->cmn->malloced);
836 	}
837 #if defined(DHD_USE_KVMALLOC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
838 	kvfree(addr);
839 #else
840 	kfree(addr);
841 #endif // endif
842 }
843 
844 void *
845 osl_vmalloc(osl_t *osh, uint size)
846 {
847 	void *addr;
848 
849 	/* only ASSERT if osh is defined */
850 	if (osh)
851 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
852 	if ((addr = vmalloc(size)) == NULL) {
853 		if (osh)
854 			osh->failed++;
855 		return (NULL);
856 	}
857 	if (osh && osh->cmn)
858 		atomic_add(size, &osh->cmn->malloced);
859 
860 	return (addr);
861 }
862 
863 void *
864 osl_vmallocz(osl_t *osh, uint size)
865 {
866 	void *ptr;
867 
868 	ptr = osl_vmalloc(osh, size);
869 
870 	if (ptr != NULL) {
871 		bzero(ptr, size);
872 	}
873 
874 	return ptr;
875 }
876 
877 void
878 osl_vmfree(osl_t *osh, void *addr, uint size)
879 {
880 	if (osh && osh->cmn) {
881 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
882 
883 		ASSERT(size <= osl_malloced(osh));
884 
885 		atomic_sub(size, &osh->cmn->malloced);
886 	}
887 	vfree(addr);
888 }
889 
890 uint
891 osl_check_memleak(osl_t *osh)
892 {
893 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
894 	if (atomic_read(&osh->cmn->refcount) == 1)
895 		return (atomic_read(&osh->cmn->malloced));
896 	else
897 		return 0;
898 }
899 
900 uint
901 osl_malloced(osl_t *osh)
902 {
903 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
904 	return (atomic_read(&osh->cmn->malloced));
905 }
906 
907 uint
908 osl_malloc_failed(osl_t *osh)
909 {
910 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
911 	return (osh->failed);
912 }
913 
914 uint
915 osl_dma_consistent_align(void)
916 {
917 	return (PAGE_SIZE);
918 }
919 
920 void*
921 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
922 {
923 	void *va;
924 	uint16 align = (1 << align_bits);
925 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
926 
927 	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
928 		size += align;
929 	*alloced = size;
930 
931 #ifndef	BCM_SECURE_DMA
932 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
933 	defined(STB_SOC_WIFI)
934 	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
935 	if (va)
936 		*pap = (ulong)__virt_to_phys((ulong)va);
937 #else
938 	{
939 		dma_addr_t pap_lin;
940 		struct pci_dev *hwdev = osh->pdev;
941 		gfp_t flags;
942 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
943 		flags = GFP_ATOMIC;
944 #else
945 		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
946 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
947 		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
948 #ifdef BCMDMA64OSL
949 		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
950 		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
951 #else
952 		*pap = (dmaaddr_t)pap_lin;
953 #endif /* BCMDMA64OSL */
954 	}
955 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
956 #else
957 	va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
958 #endif /* BCM_SECURE_DMA */
959 	return va;
960 }
961 
962 void
963 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
964 {
965 #ifdef BCMDMA64OSL
966 	dma_addr_t paddr;
967 #endif /* BCMDMA64OSL */
968 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
969 
970 #ifndef BCM_SECURE_DMA
971 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
972 	defined(STB_SOC_WIFI)
973 	kfree(va);
974 #else
975 #ifdef BCMDMA64OSL
976 	PHYSADDRTOULONG(pa, paddr);
977 	pci_free_consistent(osh->pdev, size, va, paddr);
978 #else
979 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
980 #endif /* BCMDMA64OSL */
981 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
982 #else
983 	osl_sec_dma_free_consistent(osh, va, size, pa);
984 #endif /* BCM_SECURE_DMA */
985 }
986 
987 void *
988 osl_virt_to_phys(void *va)
989 {
990 	return (void *)(uintptr)virt_to_phys(va);
991 }
992 
993 #include <asm/cacheflush.h>
994 void BCMFASTPATH
995 osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
996 {
997 	return;
998 }
999 
1000 dmaaddr_t BCMFASTPATH
1001 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1002 {
1003 	int dir;
1004 	dmaaddr_t ret_addr;
1005 	dma_addr_t map_addr;
1006 	int ret;
1007 
1008 	DMA_LOCK(osh);
1009 
1010 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1011 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1012 
1013 #ifdef STB_SOC_WIFI
1014 #if (__LINUX_ARM_ARCH__ == 8)
1015 	/* need to flush or invalidate the cache here */
1016 	if (dir == DMA_TX) { /* to device */
1017 		osl_cache_flush(va, size);
1018 	} else if (dir == DMA_RX) { /* from device */
1019 		osl_cache_inv(va, size);
1020 	} else { /* both */
1021 		osl_cache_flush(va, size);
1022 		osl_cache_inv(va, size);
1023 	}
1024 	DMA_UNLOCK(osh);
1025 	return virt_to_phys(va);
1026 #else /* (__LINUX_ARM_ARCH__ == 8) */
1027 	map_addr = dma_map_single(osh->pdev, va, size, dir);
1028 	DMA_UNLOCK(osh);
1029 	return map_addr;
1030 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1031 #else /* ! STB_SOC_WIFI */
1032 	map_addr = pci_map_single(osh->pdev, va, size, dir);
1033 #endif	/* ! STB_SOC_WIFI */
1034 
1035 	ret = pci_dma_mapping_error(osh->pdev, map_addr);
1036 
1037 	if (ret) {
1038 		printk("%s: Failed to map memory\n", __FUNCTION__);
1039 		PHYSADDRLOSET(ret_addr, 0);
1040 		PHYSADDRHISET(ret_addr, 0);
1041 	} else {
1042 		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1043 		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1044 	}
1045 
1046 #ifdef DHD_MAP_LOGGING
1047 	osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
1048 #endif /* DHD_MAP_LOGGING */
1049 
1050 	DMA_UNLOCK(osh);
1051 
1052 	return ret_addr;
1053 }
1054 
1055 void BCMFASTPATH
1056 osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1057 {
1058 	int dir;
1059 #ifdef BCMDMA64OSL
1060 	dma_addr_t paddr;
1061 #endif /* BCMDMA64OSL */
1062 
1063 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1064 
1065 	DMA_LOCK(osh);
1066 
1067 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1068 
1069 #ifdef DHD_MAP_LOGGING
1070 	osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
1071 #endif /* DHD_MAP_LOGGING */
1072 
1073 #ifdef BCMDMA64OSL
1074 	PHYSADDRTOULONG(pa, paddr);
1075 	pci_unmap_single(osh->pdev, paddr, size, dir);
1076 #else /* BCMDMA64OSL */
1077 
1078 #ifdef STB_SOC_WIFI
1079 #if (__LINUX_ARM_ARCH__ == 8)
1080 	if (dir == DMA_TX) { /* to device */
1081 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1082 	} else if (dir == DMA_RX) { /* from device */
1083 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1084 	} else { /* both */
1085 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1086 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1087 	}
1088 #else /* (__LINUX_ARM_ARCH__ == 8) */
1089 	dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
1090 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1091 #else /* STB_SOC_WIFI */
1092 	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1093 #endif /* STB_SOC_WIFI */
1094 
1095 #endif /* BCMDMA64OSL */
1096 
1097 	DMA_UNLOCK(osh);
1098 }
1099 
1100 /* OSL function for CPU relax */
1101 inline void BCMFASTPATH
1102 osl_cpu_relax(void)
1103 {
1104 	cpu_relax();
1105 }
1106 
1107 extern void osl_preempt_disable(osl_t *osh)
1108 {
1109 	preempt_disable();
1110 }
1111 
1112 extern void osl_preempt_enable(osl_t *osh)
1113 {
1114 	preempt_enable();
1115 }
1116 
1117 #if defined(BCMASSERT_LOG)
1118 void
1119 osl_assert(const char *exp, const char *file, int line)
1120 {
1121 	char tempbuf[256];
1122 	const char *basename;
1123 
1124 	basename = strrchr(file, '/');
1125 	/* skip the '/' */
1126 	if (basename)
1127 		basename++;
1128 
1129 	if (!basename)
1130 		basename = file;
1131 
1132 #ifdef BCMASSERT_LOG
1133 	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1134 		exp, basename, line);
1135 #ifndef OEM_ANDROID
1136 	bcm_assert_log(tempbuf);
1137 #endif /* OEM_ANDROID */
1138 #endif /* BCMASSERT_LOG */
1139 
1140 	switch (g_assert_type) {
1141 	case 0:
1142 		panic("%s", tempbuf);
1143 		break;
1144 	case 1:
1145 		/* fall through */
1146 	case 3:
1147 		printk("%s", tempbuf);
1148 		break;
1149 	case 2:
1150 		printk("%s", tempbuf);
1151 		BUG();
1152 		break;
1153 	default:
1154 		break;
1155 	}
1156 }
1157 #endif // endif
1158 void
1159 osl_delay(uint usec)
1160 {
1161 	uint d;
1162 
1163 	while (usec > 0) {
1164 		d = MIN(usec, 1000);
1165 		udelay(d);
1166 		usec -= d;
1167 	}
1168 }
1169 
1170 void
1171 osl_sleep(uint ms)
1172 {
1173 	if (ms < 20)
1174 		usleep_range(ms*1000, ms*1000 + 1000);
1175 	else
1176 		msleep(ms);
1177 }
1178 
1179 uint64
1180 osl_sysuptime_us(void)
1181 {
1182 	struct timespec64 ts;
1183 	uint64 usec;
1184 
1185 	ktime_get_real_ts64(&ts);
1186 	/* tv_usec content is fraction of a second */
1187 	usec = (uint64)ts.tv_sec * 1000000ul + (ts.tv_nsec / NSEC_PER_USEC);
1188 	return usec;
1189 }
1190 
1191 uint64
1192 osl_localtime_ns(void)
1193 {
1194 	uint64 ts_nsec = 0;
1195 
1196 	ts_nsec = local_clock();
1197 
1198 	return ts_nsec;
1199 }
1200 
1201 void
1202 osl_get_localtime(uint64 *sec, uint64 *usec)
1203 {
1204 	uint64 ts_nsec = 0;
1205 	unsigned long rem_nsec = 0;
1206 
1207 	ts_nsec = local_clock();
1208 	rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
1209 	*sec = (uint64)ts_nsec;
1210 	*usec = (uint64)(rem_nsec / MSEC_PER_SEC);
1211 }
1212 
1213 uint64
1214 osl_systztime_us(void)
1215 {
1216 	struct timespec64 ts;
1217 	uint64 tzusec;
1218 
1219 	ktime_get_real_ts64(&ts);
1220 	/* apply timezone */
1221 	tzusec = (uint64)((ts.tv_sec - (sys_tz.tz_minuteswest * 60)) *
1222 		USEC_PER_SEC);
1223 	tzusec += ts.tv_nsec / NSEC_PER_USEC;
1224 
1225 	return tzusec;
1226 }
1227 
1228 /*
1229  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1230  */
1231 
1232 /*
1233  * BINOSL selects the slightly slower function-call-based binary compatible osl.
1234  */
1235 
1236 uint32
1237 osl_rand(void)
1238 {
1239 	uint32 rand;
1240 
1241 	get_random_bytes(&rand, sizeof(rand));
1242 
1243 	return rand;
1244 }
1245 
1246 /* Linux Kernel: File Operations: start */
1247 void *
1248 osl_os_open_image(char *filename)
1249 {
1250 	struct file *fp;
1251 
1252 	fp = filp_open(filename, O_RDONLY, 0);
1253 	/*
1254 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1255 	 * Alternative:
1256 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1257 	 * ???
1258 	 */
1259 	 if (IS_ERR(fp))
1260 		 fp = NULL;
1261 
1262 	 return fp;
1263 }
1264 
1265 int
1266 osl_os_get_image_block(char *buf, int len, void *image)
1267 {
1268 	struct file *fp = (struct file *)image;
1269 	int rdlen;
1270 
1271 	if (!image)
1272 		return 0;
1273 
1274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1275 	rdlen = kernel_read(fp, buf, len, &fp->f_pos);
1276 #else
1277 	rdlen = kernel_read(fp, fp->f_pos, buf, len);
1278 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1279 
1280 	if (rdlen > 0)
1281 		fp->f_pos += rdlen;
1282 
1283 	return rdlen;
1284 }
1285 
1286 void
1287 osl_os_close_image(void *image)
1288 {
1289 	if (image)
1290 		filp_close((struct file *)image, NULL);
1291 }
1292 
1293 int
1294 osl_os_image_size(void *image)
1295 {
1296 	int len = 0, curroffset;
1297 
1298 	if (image) {
1299 		/* store the current offset */
1300 		curroffset = generic_file_llseek(image, 0, 1);
1301 		/* goto end of file to get length */
1302 		len = generic_file_llseek(image, 0, 2);
1303 		/* restore back the offset */
1304 		generic_file_llseek(image, curroffset, 0);
1305 	}
1306 	return len;
1307 }
1308 
1309 /* Linux Kernel: File Operations: end */
1310 
1311 #if (defined(STB) && defined(__arm__))
1312 inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1313 {
1314 	unsigned long flags = 0;
1315 	int pci_access = 0;
1316 	int acp_war_enab = ACP_WAR_ENAB();
1317 
1318 	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
1319 		pci_access = 1;
1320 
1321 	if (pci_access && acp_war_enab)
1322 		spin_lock_irqsave(&l2x0_reg_lock, flags);
1323 
1324 	switch (size) {
1325 	case sizeof(uint8):
1326 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1327 		break;
1328 	case sizeof(uint16):
1329 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1330 		break;
1331 	case sizeof(uint32):
1332 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1333 		break;
1334 	case sizeof(uint64):
1335 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1336 		break;
1337 	}
1338 
1339 	if (pci_access && acp_war_enab)
1340 		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1341 }
1342 #endif // endif
1343 
1344 #if defined(BCM_BACKPLANE_TIMEOUT)
1345 inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1346 {
1347 	bool poll_timeout = FALSE;
1348 	static int in_si_clear = FALSE;
1349 
1350 	switch (size) {
1351 	case sizeof(uint8):
1352 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1353 		if (*(volatile uint8*)v == 0xff)
1354 			poll_timeout = TRUE;
1355 		break;
1356 	case sizeof(uint16):
1357 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1358 		if (*(volatile uint16*)v == 0xffff)
1359 			poll_timeout = TRUE;
1360 		break;
1361 	case sizeof(uint32):
1362 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1363 		if (*(volatile uint32*)v == 0xffffffff)
1364 			poll_timeout = TRUE;
1365 		break;
1366 	case sizeof(uint64):
1367 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1368 		if (*(volatile uint64*)v == 0xffffffffffffffff)
1369 			poll_timeout = TRUE;
1370 		break;
1371 	}
1372 
1373 	if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
1374 		in_si_clear = TRUE;
1375 		osh->bpt_cb((void *)osh->sih, (void *)addr);
1376 		in_si_clear = FALSE;
1377 	}
1378 }
1379 #endif /* BCM_BACKPLANE_TIMEOUT */
1380 
1381 #ifdef BCM_SECURE_DMA
1382 static void *
1383 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
1384 {
1385 
1386 	struct page **map;
1387 	int order, i;
1388 	void *addr = NULL;
1389 
1390 	size = PAGE_ALIGN(size);
1391 	order = get_order(size);
1392 
1393 	map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1394 
1395 	if (map == NULL)
1396 		return NULL;
1397 
1398 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
1399 		map[i] = page + i;
1400 
1401 	if (iscache) {
1402 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1403 		if (isdecr) {
1404 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1405 		}
1406 	} else {
1407 
1408 #if defined(__ARM_ARCH_7A__)
1409 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1410 			pgprot_noncached(__pgprot(PAGE_KERNEL)));
1411 #endif // endif
1412 		if (isdecr) {
1413 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1414 		}
1415 	}
1416 
1417 	kfree(map);
1418 	return (void *)addr;
1419 }
1420 
1421 static void
1422 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1423 {
1424 	vunmap(contig_base_va);
1425 }
1426 
1427 static int
1428 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
1429 {
1430 	int i;
1431 	int ret = BCME_OK;
1432 	sec_mem_elem_t *sec_mem_elem;
1433 
1434 	if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
1435 
1436 		*list = sec_mem_elem;
1437 		bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
1438 		for (i = 0; i < max-1; i++) {
1439 			sec_mem_elem->next = (sec_mem_elem + 1);
1440 			sec_mem_elem->size = mbsize;
1441 			sec_mem_elem->pa_cma = osh->contig_base_alloc;
1442 			sec_mem_elem->vac = osh->contig_base_alloc_va;
1443 
1444 			sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1445 			osh->contig_base_alloc += mbsize;
1446 			osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
1447 
1448 			sec_mem_elem = sec_mem_elem + 1;
1449 		}
1450 		sec_mem_elem->next = NULL;
1451 		sec_mem_elem->size = mbsize;
1452 		sec_mem_elem->pa_cma = osh->contig_base_alloc;
1453 		sec_mem_elem->vac = osh->contig_base_alloc_va;
1454 
1455 		sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1456 		osh->contig_base_alloc += mbsize;
1457 		osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
1458 
1459 	} else {
1460 		printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1461 		ret = BCME_ERROR;
1462 	}
1463 	return ret;
1464 }
1465 
1466 static void
1467 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
1468 {
1469 	if (sec_list_base)
1470 		kfree(sec_list_base);
1471 }
1472 
1473 static sec_mem_elem_t * BCMFASTPATH
1474 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1475 	struct sec_cma_info *ptr_cma_info, uint offset)
1476 {
1477 	sec_mem_elem_t *sec_mem_elem = NULL;
1478 
1479 		ASSERT(osh->sec_list_4096);
1480 		sec_mem_elem = osh->sec_list_4096;
1481 		osh->sec_list_4096 = sec_mem_elem->next;
1482 
1483 		sec_mem_elem->next = NULL;
1484 
1485 	if (ptr_cma_info->sec_alloc_list_tail) {
1486 		ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1487 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1488 	}
1489 	else {
1490 		/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1491 		ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1492 		ptr_cma_info->sec_alloc_list = sec_mem_elem;
1493 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1494 	}
1495 	return sec_mem_elem;
1496 }
1497 
1498 static void BCMFASTPATH
1499 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
1500 {
1501 	sec_mem_elem->dma_handle = 0x0;
1502 	sec_mem_elem->va = NULL;
1503 		sec_mem_elem->next = osh->sec_list_4096;
1504 		osh->sec_list_4096 = sec_mem_elem;
1505 }
1506 
1507 static sec_mem_elem_t * BCMFASTPATH
1508 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1509 {
1510 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1511 	sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1512 
1513 	if (sec_mem_elem->dma_handle == dma_handle) {
1514 
1515 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1516 
1517 		if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1518 			ptr_cma_info->sec_alloc_list_tail = NULL;
1519 			ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1520 		}
1521 
1522 		return sec_mem_elem;
1523 	}
1524 	sec_mem_elem = sec_mem_elem->next;
1525 
1526 	while (sec_mem_elem != NULL) {
1527 
1528 		if (sec_mem_elem->dma_handle == dma_handle) {
1529 
1530 			sec_prv_elem->next = sec_mem_elem->next;
1531 			if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
1532 				ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1533 
1534 			return sec_mem_elem;
1535 		}
1536 		sec_prv_elem = sec_mem_elem;
1537 		sec_mem_elem = sec_mem_elem->next;
1538 	}
1539 	return NULL;
1540 }
1541 
1542 static sec_mem_elem_t *
1543 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1544 {
1545 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1546 
1547 	if (sec_mem_elem) {
1548 
1549 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1550 
1551 		if (ptr_cma_info->sec_alloc_list == NULL)
1552 			ptr_cma_info->sec_alloc_list_tail = NULL;
1553 
1554 		return sec_mem_elem;
1555 
1556 	} else
1557 		return NULL;
1558 }
1559 
1560 static void * BCMFASTPATH
1561 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1562 {
1563 	return ptr_cma_info->sec_alloc_list_tail;
1564 }
1565 
1566 dma_addr_t BCMFASTPATH
1567 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
1568 	hnddma_seg_map_t *dmah, void *ptr_cma_info)
1569 {
1570 	sec_mem_elem_t *sec_mem_elem;
1571 	struct page *pa_cma_page;
1572 	uint loffset;
1573 	void *vaorig = ((uint8 *)va + size);
1574 	dma_addr_t dma_handle = 0x0;
1575 	/* packet will be the one added with osl_sec_dma_map() just before this call */
1576 
1577 	sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1578 
1579 	if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1580 
1581 		pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1582 		loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1583 
1584 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1585 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1586 
1587 	} else {
1588 		printf("%s: error orig va not found va = 0x%p \n",
1589 			__FUNCTION__, vaorig);
1590 	}
1591 	return dma_handle;
1592 }
1593 
1594 dma_addr_t BCMFASTPATH
1595 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1596 	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
1597 {
1598 
1599 	sec_mem_elem_t *sec_mem_elem;
1600 	struct page *pa_cma_page;
1601 	void *pa_cma_kmap_va = NULL;
1602 	uint buflen = 0;
1603 	dma_addr_t dma_handle = 0x0;
1604 	uint loffset;
1605 
1606 	ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1607 	sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
1608 
1609 	sec_mem_elem->va = va;
1610 	sec_mem_elem->direction = direction;
1611 	pa_cma_page = sec_mem_elem->pa_cma_page;
1612 
1613 	loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1614 	/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1615 	* pa_cma_kmap_va += loffset;
1616 	*/
1617 
1618 	pa_cma_kmap_va = sec_mem_elem->vac;
1619 	pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1620 	buflen = size;
1621 
1622 	if (direction == DMA_TX) {
1623 		memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
1624 
1625 		if (dmah) {
1626 			dmah->nsegs = 1;
1627 			dmah->origsize = buflen;
1628 		}
1629 	}
1630 	else
1631 	{
1632 		if ((p != NULL) && (dmah != NULL)) {
1633 			dmah->nsegs = 1;
1634 			dmah->origsize = buflen;
1635 		}
1636 		*(uint32 *)(pa_cma_kmap_va) = 0x0;
1637 	}
1638 
1639 	if (direction == DMA_RX) {
1640 		flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1641 	}
1642 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
1643 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1644 	if (dmah) {
1645 		dmah->segs[0].addr = dma_handle;
1646 		dmah->segs[0].length = buflen;
1647 	}
1648 	sec_mem_elem->dma_handle = dma_handle;
1649 	/* kunmap_atomic(pa_cma_kmap_va-loffset); */
1650 	return dma_handle;
1651 }
1652 
1653 dma_addr_t BCMFASTPATH
1654 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
1655 {
1656 
1657 	struct page *pa_cma_page;
1658 	phys_addr_t pa_cma;
1659 	dma_addr_t dma_handle = 0x0;
1660 	uint loffset;
1661 
1662 	pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
1663 	pa_cma_page = phys_to_page(pa_cma);
1664 	loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
1665 
1666 	dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1667 		(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1668 
1669 	return dma_handle;
1670 }
1671 
1672 void BCMFASTPATH
1673 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1674 void *p, hnddma_seg_map_t *map,	void *ptr_cma_info, uint offset)
1675 {
1676 	sec_mem_elem_t *sec_mem_elem;
1677 	void *pa_cma_kmap_va = NULL;
1678 	uint buflen = 0;
1679 	dma_addr_t pa_cma;
1680 	void *va;
1681 	int read_count = 0;
1682 	BCM_REFERENCE(buflen);
1683 	BCM_REFERENCE(read_count);
1684 
1685 	sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1686 	ASSERT(sec_mem_elem);
1687 
1688 	va = sec_mem_elem->va;
1689 	va = (uint8 *)va - offset;
1690 	pa_cma = sec_mem_elem->pa_cma;
1691 
1692 	if (direction == DMA_RX) {
1693 
1694 		if (p == NULL) {
1695 
1696 			/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1697 			* pa_cma_kmap_va += loffset;
1698 			*/
1699 
1700 			pa_cma_kmap_va = sec_mem_elem->vac;
1701 
1702 			do {
1703 				invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1704 
1705 				buflen = *(uint *)(pa_cma_kmap_va);
1706 				if (buflen)
1707 					break;
1708 
1709 				OSL_DELAY(1);
1710 				read_count++;
1711 			} while (read_count < 200);
1712 			dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1713 			memcpy(va, pa_cma_kmap_va, size);
1714 			/* kunmap_atomic(pa_cma_kmap_va); */
1715 		}
1716 	} else {
1717 		dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
1718 	}
1719 
1720 	osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1721 }
1722 
1723 void
1724 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1725 {
1726 
1727 	sec_mem_elem_t *sec_mem_elem;
1728 
1729 	sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1730 
1731 	while (sec_mem_elem != NULL) {
1732 
1733 		dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1734 			sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1735 		osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1736 
1737 		sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1738 	}
1739 }
1740 
1741 static void
1742 osl_sec_dma_init_consistent(osl_t *osh)
1743 {
1744 	int i;
1745 	void *temp_va = osh->contig_base_alloc_coherent_va;
1746 	phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1747 
1748 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1749 		osh->sec_cma_coherent[i].avail = TRUE;
1750 		osh->sec_cma_coherent[i].va = temp_va;
1751 		osh->sec_cma_coherent[i].pa = temp_pa;
1752 		temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
1753 		temp_pa += SEC_CMA_COHERENT_BLK;
1754 	}
1755 }
1756 
1757 static void *
1758 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
1759 {
1760 
1761 	void *temp_va = NULL;
1762 	ulong temp_pa = 0;
1763 	int i;
1764 
1765 	if (size > SEC_CMA_COHERENT_BLK) {
1766 		printf("%s unsupported size\n", __FUNCTION__);
1767 		return NULL;
1768 	}
1769 
1770 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1771 		if (osh->sec_cma_coherent[i].avail == TRUE) {
1772 			temp_va = osh->sec_cma_coherent[i].va;
1773 			temp_pa = osh->sec_cma_coherent[i].pa;
1774 			osh->sec_cma_coherent[i].avail = FALSE;
1775 			break;
1776 		}
1777 	}
1778 
1779 	if (i == SEC_CMA_COHERENT_MAX)
1780 		printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1781 			temp_va, (ulong)temp_pa, size);
1782 
1783 	*pap = (unsigned long)temp_pa;
1784 	return temp_va;
1785 }
1786 
1787 static void
1788 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1789 {
1790 	int i = 0;
1791 
1792 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1793 		if (osh->sec_cma_coherent[i].va == va) {
1794 			osh->sec_cma_coherent[i].avail = TRUE;
1795 			break;
1796 		}
1797 	}
1798 	if (i == SEC_CMA_COHERENT_MAX)
1799 		printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1800 			va, (ulong)pa, size);
1801 }
1802 #endif /* BCM_SECURE_DMA */
1803 
1804 /* timer apis */
1805 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1806 
1807 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1808 void
1809 timer_cb_compat(struct timer_list *tl)
1810 {
1811 	timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
1812 	t->callback((ulong)t->arg);
1813 }
1814 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
1815 
1816 osl_timer_t *
1817 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
1818 {
1819 	osl_timer_t *t;
1820 	BCM_REFERENCE(fn);
1821 	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1822 		printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1823 			(int)sizeof(osl_timer_t));
1824 		return (NULL);
1825 	}
1826 	bzero(t, sizeof(osl_timer_t));
1827 	if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
1828 		printf("osl_timer_init: malloc failed\n");
1829 		MFREE(NULL, t, sizeof(osl_timer_t));
1830 		return (NULL);
1831 	}
1832 	t->set = TRUE;
1833 
1834 	init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
1835 
1836 	return (t);
1837 }
1838 
1839 void
1840 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1841 {
1842 	if (t == NULL) {
1843 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1844 		return;
1845 	}
1846 	ASSERT(!t->set);
1847 
1848 	t->set = TRUE;
1849 	if (periodic) {
1850 		printf("Periodic timers are not supported by Linux timer apis\n");
1851 	}
1852 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1853 
1854 	add_timer(t->timer);
1855 
1856 	return;
1857 }
1858 
1859 void
1860 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1861 {
1862 	if (t == NULL) {
1863 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1864 		return;
1865 	}
1866 	if (periodic) {
1867 		printf("Periodic timers are not supported by Linux timer apis\n");
1868 	}
1869 	t->set = TRUE;
1870 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1871 
1872 	mod_timer(t->timer, timer_expires(t->timer));
1873 
1874 	return;
1875 }
1876 
1877 /*
1878  * Return TRUE if timer successfully deleted, FALSE if still pending
1879  */
1880 bool
1881 osl_timer_del(osl_t *osh, osl_timer_t *t)
1882 {
1883 	if (t == NULL) {
1884 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1885 		return (FALSE);
1886 	}
1887 	if (t->set) {
1888 		t->set = FALSE;
1889 		if (t->timer) {
1890 			del_timer(t->timer);
1891 			MFREE(NULL, t->timer, sizeof(struct timer_list));
1892 		}
1893 		MFREE(NULL, t, sizeof(osl_timer_t));
1894 	}
1895 	return (TRUE);
1896 }
1897 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1898 int
1899 kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
1900 {
1901 	return (int)kernel_read(file, addr, (size_t)count, &offset);
1902 }
1903 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1904 
1905 void *
1906 osl_spin_lock_init(osl_t *osh)
1907 {
1908 	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
1909 	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
1910 	/* and this results in kernel asserts in internal builds */
1911 	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
1912 	if (lock)
1913 		spin_lock_init(lock);
1914 	return ((void *)lock);
1915 }
1916 
1917 void
1918 osl_spin_lock_deinit(osl_t *osh, void *lock)
1919 {
1920 	if (lock)
1921 		MFREE(osh, lock, sizeof(spinlock_t) + 4);
1922 }
1923 
1924 unsigned long
1925 osl_spin_lock(void *lock)
1926 {
1927 	unsigned long flags = 0;
1928 
1929 	if (lock)
1930 		spin_lock_irqsave((spinlock_t *)lock, flags);
1931 
1932 	return flags;
1933 }
1934 
1935 void
1936 osl_spin_unlock(void *lock, unsigned long flags)
1937 {
1938 	if (lock)
1939 		spin_unlock_irqrestore((spinlock_t *)lock, flags);
1940 }
1941 
1942 #ifdef USE_DMA_LOCK
1943 static void
1944 osl_dma_lock(osl_t *osh)
1945 {
1946 	if (likely(in_irq() || irqs_disabled())) {
1947 		spin_lock(&osh->dma_lock);
1948 	} else {
1949 		spin_lock_bh(&osh->dma_lock);
1950 		osh->dma_lock_bh = TRUE;
1951 	}
1952 }
1953 
1954 static void
1955 osl_dma_unlock(osl_t *osh)
1956 {
1957 	if (unlikely(osh->dma_lock_bh)) {
1958 		osh->dma_lock_bh = FALSE;
1959 		spin_unlock_bh(&osh->dma_lock);
1960 	} else {
1961 		spin_unlock(&osh->dma_lock);
1962 	}
1963 }
1964 
1965 static void
1966 osl_dma_lock_init(osl_t *osh)
1967 {
1968 	spin_lock_init(&osh->dma_lock);
1969 	osh->dma_lock_bh = FALSE;
1970 }
1971 #endif /* USE_DMA_LOCK */
1972