xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/linux_osl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Linux OS Independent Layer
3  *
4  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: linux_osl.c 697654 2017-05-04 11:59:40Z $
30  */
31 
32 #define LINUX_PORT
33 
34 #include <typedefs.h>
35 #include <bcmendian.h>
36 #include <linuxver.h>
37 #include <bcmdefs.h>
38 
39 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
40 #include <asm/cacheflush.h>
41 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
42 
43 #include <linux/random.h>
44 
45 #include <osl.h>
46 #include <bcmutils.h>
47 #include <linux/delay.h>
48 #include <linux/vmalloc.h>
49 #include <pcicfg.h>
50 
51 #if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
52 #include <bcm_assert_log.h>
53 #endif // endif
54 
55 #ifdef BCM_SECURE_DMA
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/io.h>
59 #include <linux/printk.h>
60 #include <linux/errno.h>
61 #include <linux/mm.h>
62 #include <linux/moduleparam.h>
63 #include <asm/io.h>
64 #include <linux/skbuff.h>
65 #include <stbutils.h>
66 #include <linux/highmem.h>
67 #include <linux/dma-mapping.h>
68 #include <asm/memory.h>
69 #endif /* BCM_SECURE_DMA */
70 
71 #include <linux/fs.h>
72 
73 #if defined(STB)
74 #include <linux/spinlock.h>
75 extern spinlock_t l2x0_reg_lock;
76 #endif // endif
77 
78 #ifdef BCM_OBJECT_TRACE
79 #include <bcmutils.h>
80 #endif /* BCM_OBJECT_TRACE */
81 #include "linux_osl_priv.h"
82 
83 #define PCI_CFG_RETRY		10
84 
85 #define DUMPBUFSZ 1024
86 
87 #ifdef BCM_SECURE_DMA
88 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
89 	bool iscache, bool isdecr);
90 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
91 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
92 	sec_mem_elem_t **list);
93 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
94 	void *sec_list_base);
95 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
96 	int direction, struct sec_cma_info *ptr_cma_info, uint offset);
97 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
98 static void osl_sec_dma_init_consistent(osl_t *osh);
99 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
100 	ulong *pap);
101 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
102 #endif /* BCM_SECURE_DMA */
103 
104 /* PCMCIA attribute space access macros */
105 
106 #ifdef CUSTOMER_HW4_DEBUG
107 uint32 g_assert_type = 1; /* By Default not cause Kernel Panic */
108 #else
109 uint32 g_assert_type = 0; /* By Default Kernel Panic */
110 #endif /* CUSTOMER_HW4_DEBUG */
111 
112 module_param(g_assert_type, int, 0);
113 #ifdef	BCM_SECURE_DMA
114 #define	SECDMA_MODULE_PARAMS	0
115 #define	SECDMA_EXT_FILE	1
116 unsigned long secdma_addr = 0;
117 unsigned long secdma_addr2 = 0;
118 u32 secdma_size = 0;
119 u32 secdma_size2 = 0;
120 module_param(secdma_addr, ulong, 0);
121 module_param(secdma_size, int, 0);
122 module_param(secdma_addr2, ulong, 0);
123 module_param(secdma_size2, int, 0);
124 static int secdma_found = 0;
125 #endif /* BCM_SECURE_DMA */
126 
127 #ifdef USE_DMA_LOCK
128 static void osl_dma_lock(osl_t *osh);
129 static void osl_dma_unlock(osl_t *osh);
130 static void osl_dma_lock_init(osl_t *osh);
131 
132 #define DMA_LOCK(osh)		osl_dma_lock(osh)
133 #define DMA_UNLOCK(osh)		osl_dma_unlock(osh)
134 #define DMA_LOCK_INIT(osh)	osl_dma_lock_init(osh);
135 #else
136 #define DMA_LOCK(osh)		do { /* noop */ } while(0)
137 #define DMA_UNLOCK(osh)		do { /* noop */ } while(0)
138 #define DMA_LOCK_INIT(osh)	do { /* noop */ } while(0)
139 #endif /* USE_DMA_LOCK */
140 
141 static int16 linuxbcmerrormap[] =
142 {	0,				/* 0 */
143 	-EINVAL,		/* BCME_ERROR */
144 	-EINVAL,		/* BCME_BADARG */
145 	-EINVAL,		/* BCME_BADOPTION */
146 	-EINVAL,		/* BCME_NOTUP */
147 	-EINVAL,		/* BCME_NOTDOWN */
148 	-EINVAL,		/* BCME_NOTAP */
149 	-EINVAL,		/* BCME_NOTSTA */
150 	-EINVAL,		/* BCME_BADKEYIDX */
151 	-EINVAL,		/* BCME_RADIOOFF */
152 	-EINVAL,		/* BCME_NOTBANDLOCKED */
153 	-EINVAL, 		/* BCME_NOCLK */
154 	-EINVAL, 		/* BCME_BADRATESET */
155 	-EINVAL, 		/* BCME_BADBAND */
156 	-E2BIG,			/* BCME_BUFTOOSHORT */
157 	-E2BIG,			/* BCME_BUFTOOLONG */
158 	-EBUSY, 		/* BCME_BUSY */
159 	-EINVAL, 		/* BCME_NOTASSOCIATED */
160 	-EINVAL, 		/* BCME_BADSSIDLEN */
161 	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
162 	-EINVAL, 		/* BCME_BADCHAN */
163 	-EFAULT, 		/* BCME_BADADDR */
164 	-ENOMEM, 		/* BCME_NORESOURCE */
165 	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
166 	-EMSGSIZE,		/* BCME_BADLENGTH */
167 	-EINVAL,		/* BCME_NOTREADY */
168 	-EPERM,			/* BCME_EPERM */
169 	-ENOMEM, 		/* BCME_NOMEM */
170 	-EINVAL, 		/* BCME_ASSOCIATED */
171 	-ERANGE, 		/* BCME_RANGE */
172 	-EINVAL, 		/* BCME_NOTFOUND */
173 	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
174 	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
175 	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
176 	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
177 	-EIO,			/* BCME_SDIO_ERROR */
178 	-ENODEV,		/* BCME_DONGLE_DOWN */
179 	-EINVAL,		/* BCME_VERSION */
180 	-EIO,			/* BCME_TXFAIL */
181 	-EIO,			/* BCME_RXFAIL */
182 	-ENODEV,		/* BCME_NODEVICE */
183 	-EINVAL,		/* BCME_NMODE_DISABLED */
184 	-ENODATA,		/* BCME_NONRESIDENT */
185 	-EINVAL,		/* BCME_SCANREJECT */
186 	-EINVAL,		/* BCME_USAGE_ERROR */
187 	-EIO,     		/* BCME_IOCTL_ERROR */
188 	-EIO,			/* BCME_SERIAL_PORT_ERR */
189 	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
190 	-EIO,			/* BCME_DECERR */
191 	-EIO,			/* BCME_ENCERR */
192 	-EIO,			/* BCME_MICERR */
193 	-ERANGE,		/* BCME_REPLAY */
194 	-EINVAL,		/* BCME_IE_NOTFOUND */
195 	-EINVAL,		/* BCME_DATA_NOTFOUND */
196 	-EINVAL,        /* BCME_NOT_GC */
197 	-EINVAL,        /* BCME_PRS_REQ_FAILED */
198 	-EINVAL,        /* BCME_NO_P2P_SE */
199 	-EINVAL,        /* BCME_NOA_PND */
200 	-EINVAL,        /* BCME_FRAG_Q_FAILED */
201 	-EINVAL,        /* BCME_GET_AF_FAILED */
202 	-EINVAL,	/* BCME_MSCH_NOTREADY */
203 	-EINVAL,	/* BCME_IOV_LAST_CMD */
204 	-EINVAL,	/* BCME_MINIPMU_CAL_FAIL */
205 	-EINVAL,	/* BCME_RCAL_FAIL */
206 	-EINVAL,	/* BCME_LPF_RCCAL_FAIL */
207 	-EINVAL,	/* BCME_DACBUF_RCCAL_FAIL */
208 	-EINVAL,	/* BCME_VCOCAL_FAIL */
209 	-EINVAL,	/* BCME_BANDLOCKED */
210 	-EINVAL,	/* BCME_DNGL_DEVRESET */
211 
212 /* When an new error code is added to bcmutils.h, add os
213  * specific error translation here as well
214  */
215 /* check if BCME_LAST changed since the last time this function was updated */
216 #if BCME_LAST != -68
217 #error "You need to add a OS error translation in the linuxbcmerrormap \
218 	for new error code defined in bcmutils.h"
219 #endif // endif
220 };
221 uint lmtest = FALSE;
222 
223 #ifdef DHD_MAP_LOGGING
224 #define DHD_MAP_LOG_SIZE 2048
225 
226 typedef struct dhd_map_item {
227 	dmaaddr_t pa;		/* DMA address (physical) */
228 	uint64 ts_nsec;		/* timestamp: nsec */
229 	uint32 size;		/* mapping size */
230 	uint8 rsvd[4];		/* reserved for future use */
231 } dhd_map_item_t;
232 
233 typedef struct dhd_map_record {
234 	uint32 items;		/* number of total items */
235 	uint32 idx;		/* current index of metadata */
236 	dhd_map_item_t map[0];	/* metadata storage */
237 } dhd_map_log_t;
238 
239 void
osl_dma_map_dump(osl_t * osh)240 osl_dma_map_dump(osl_t *osh)
241 {
242 	dhd_map_log_t *map_log, *unmap_log;
243 	uint64 ts_sec, ts_usec;
244 
245 	map_log = (dhd_map_log_t *)(osh->dhd_map_log);
246 	unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
247 	osl_get_localtime(&ts_sec, &ts_usec);
248 
249 	if (map_log && unmap_log) {
250 		printk("%s: map_idx=%d unmap_idx=%d "
251 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
252 			map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
253 			(unsigned long)ts_usec);
254 		printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
255 			" dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
256 			(uint64)__virt_to_phys((ulong)(map_log->map)),
257 			(uint32)(sizeof(dhd_map_item_t) * map_log->items),
258 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
259 			(uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
260 	}
261 }
262 
263 static void *
osl_dma_map_log_init(uint32 item_len)264 osl_dma_map_log_init(uint32 item_len)
265 {
266 	dhd_map_log_t *map_log;
267 	gfp_t flags;
268 	uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
269 		(item_len * sizeof(dhd_map_item_t)));
270 
271 	flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
272 	map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
273 	if (map_log) {
274 		memset(map_log, 0, alloc_size);
275 		map_log->items = item_len;
276 		map_log->idx = 0;
277 	}
278 
279 	return (void *)map_log;
280 }
281 
282 static void
osl_dma_map_log_deinit(osl_t * osh)283 osl_dma_map_log_deinit(osl_t *osh)
284 {
285 	if (osh->dhd_map_log) {
286 		kfree(osh->dhd_map_log);
287 		osh->dhd_map_log = NULL;
288 	}
289 
290 	if (osh->dhd_unmap_log) {
291 		kfree(osh->dhd_unmap_log);
292 		osh->dhd_unmap_log = NULL;
293 	}
294 }
295 
296 static void
osl_dma_map_logging(osl_t * osh,void * handle,dmaaddr_t pa,uint32 len)297 osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
298 {
299 	dhd_map_log_t *log = (dhd_map_log_t *)handle;
300 	uint32 idx;
301 
302 	if (log == NULL) {
303 		printk("%s: log is NULL\n", __FUNCTION__);
304 		return;
305 	}
306 
307 	idx = log->idx;
308 	log->map[idx].ts_nsec = osl_localtime_ns();
309 	log->map[idx].pa = pa;
310 	log->map[idx].size = len;
311 	log->idx = (idx + 1) % log->items;
312 }
313 #endif /* DHD_MAP_LOGGING */
314 
315 /* translate bcmerrors into linux errors */
316 int
osl_error(int bcmerror)317 osl_error(int bcmerror)
318 {
319 	if (bcmerror > 0)
320 		bcmerror = 0;
321 	else if (bcmerror < BCME_LAST)
322 		bcmerror = BCME_ERROR;
323 
324 	/* Array bounds covered by ASSERT in osl_attach */
325 	return linuxbcmerrormap[-bcmerror];
326 }
327 osl_t *
osl_attach(void * pdev,uint bustype,bool pkttag)328 osl_attach(void *pdev, uint bustype, bool pkttag)
329 {
330 	void **osl_cmn = NULL;
331 	osl_t *osh;
332 	gfp_t flags;
333 #ifdef BCM_SECURE_DMA
334 	u32 secdma_memsize;
335 #endif // endif
336 
337 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
338 	if (!(osh = kmalloc(sizeof(osl_t), flags)))
339 		return osh;
340 
341 	ASSERT(osh);
342 
343 	bzero(osh, sizeof(osl_t));
344 
345 	if (osl_cmn == NULL || *osl_cmn == NULL) {
346 		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
347 			kfree(osh);
348 			return NULL;
349 		}
350 		bzero(osh->cmn, sizeof(osl_cmn_t));
351 		if (osl_cmn)
352 			*osl_cmn = osh->cmn;
353 		atomic_set(&osh->cmn->malloced, 0);
354 		osh->cmn->dbgmem_list = NULL;
355 		spin_lock_init(&(osh->cmn->dbgmem_lock));
356 
357 		spin_lock_init(&(osh->cmn->pktalloc_lock));
358 
359 	} else {
360 		osh->cmn = *osl_cmn;
361 	}
362 	atomic_add(1, &osh->cmn->refcount);
363 
364 	bcm_object_trace_init();
365 
366 	/* Check that error map has the right number of entries in it */
367 	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
368 
369 	osh->failed = 0;
370 	osh->pdev = pdev;
371 	osh->pub.pkttag = pkttag;
372 	osh->bustype = bustype;
373 	osh->magic = OS_HANDLE_MAGIC;
374 #ifdef BCM_SECURE_DMA
375 
376 	if ((secdma_addr != 0) && (secdma_size != 0)) {
377 		printk("linux_osl.c: Buffer info passed via module params, using it.\n");
378 		if (secdma_found == 0) {
379 			osh->contig_base_alloc = (phys_addr_t)secdma_addr;
380 			secdma_memsize = secdma_size;
381 		} else if (secdma_found == 1) {
382 			osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
383 			secdma_memsize = secdma_size2;
384 		} else {
385 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
386 			kfree(osh);
387 			return NULL;
388 		}
389 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
390 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
391 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
392 			(unsigned int)osh->contig_base_alloc);
393 		osh->stb_ext_params = SECDMA_MODULE_PARAMS;
394 	}
395 	else if (stbpriv_init(osh) == 0) {
396 		printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
397 		if (secdma_found == 0) {
398 			osh->contig_base_alloc =
399 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
400 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
401 		} else if (secdma_found == 1) {
402 			osh->contig_base_alloc =
403 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
404 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
405 		} else {
406 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
407 			kfree(osh);
408 			return NULL;
409 		}
410 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
411 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
412 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
413 			(unsigned int)osh->contig_base_alloc);
414 		osh->stb_ext_params = SECDMA_EXT_FILE;
415 	}
416 	else {
417 		printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
418 		kfree(osh);
419 		return NULL;
420 	}
421 	secdma_found++;
422 	osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
423 		phys_to_page((u32)osh->contig_base_alloc),
424 		CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
425 
426 	if (osh->contig_base_alloc_coherent_va == NULL) {
427 		if (osh->cmn)
428 			kfree(osh->cmn);
429 	    kfree(osh);
430 	    return NULL;
431 	}
432 	osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
433 	osh->contig_base_alloc_coherent = osh->contig_base_alloc;
434 	osl_sec_dma_init_consistent(osh);
435 
436 	osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
437 
438 	osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
439 		phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
440 	if (osh->contig_base_alloc_va == NULL) {
441 		osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
442 		if (osh->cmn)
443 			kfree(osh->cmn);
444 		kfree(osh);
445 		return NULL;
446 	}
447 	osh->contig_base_va = osh->contig_base_alloc_va;
448 
449 	if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
450 		CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
451 	    osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
452 	    osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
453 		if (osh->cmn)
454 			kfree(osh->cmn);
455 		kfree(osh);
456 		return NULL;
457 	}
458 	osh->sec_list_base_4096 = osh->sec_list_4096;
459 
460 #endif /* BCM_SECURE_DMA */
461 
462 	switch (bustype) {
463 		case PCI_BUS:
464 		case SI_BUS:
465 		case PCMCIA_BUS:
466 			osh->pub.mmbus = TRUE;
467 			break;
468 		case JTAG_BUS:
469 		case SDIO_BUS:
470 		case USB_BUS:
471 		case SPI_BUS:
472 		case RPC_BUS:
473 			osh->pub.mmbus = FALSE;
474 			break;
475 		default:
476 			ASSERT(FALSE);
477 			break;
478 	}
479 
480 	DMA_LOCK_INIT(osh);
481 
482 #ifdef DHD_MAP_LOGGING
483 	osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
484 	if (osh->dhd_map_log == NULL) {
485 		printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
486 	}
487 
488 	osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
489 	if (osh->dhd_unmap_log == NULL) {
490 		printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
491 	}
492 #endif /* DHD_MAP_LOGGING */
493 
494 	return osh;
495 }
496 
osl_set_bus_handle(osl_t * osh,void * bus_handle)497 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
498 {
499 	osh->bus_handle = bus_handle;
500 }
501 
osl_get_bus_handle(osl_t * osh)502 void* osl_get_bus_handle(osl_t *osh)
503 {
504 	return osh->bus_handle;
505 }
506 
507 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_set_bpt_cb(osl_t * osh,void * bpt_cb,void * bpt_ctx)508 void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
509 {
510 	if (osh) {
511 		osh->bpt_cb = (bpt_cb_fn)bpt_cb;
512 		osh->sih = bpt_ctx;
513 	}
514 }
515 #endif	/* BCM_BACKPLANE_TIMEOUT */
516 
517 void
osl_detach(osl_t * osh)518 osl_detach(osl_t *osh)
519 {
520 	if (osh == NULL)
521 		return;
522 
523 #ifdef BCM_SECURE_DMA
524 	if (osh->stb_ext_params == SECDMA_EXT_FILE)
525 		stbpriv_exit(osh);
526 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
527 	osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
528 	osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
529 	secdma_found--;
530 #endif /* BCM_SECURE_DMA */
531 
532 	bcm_object_trace_deinit();
533 
534 #ifdef DHD_MAP_LOGGING
535 	osl_dma_map_log_deinit(osh->dhd_map_log);
536 	osl_dma_map_log_deinit(osh->dhd_unmap_log);
537 #endif /* DHD_MAP_LOGGING */
538 
539 	ASSERT(osh->magic == OS_HANDLE_MAGIC);
540 	atomic_sub(1, &osh->cmn->refcount);
541 	if (atomic_read(&osh->cmn->refcount) == 0) {
542 			kfree(osh->cmn);
543 	}
544 	kfree(osh);
545 }
546 
547 /* APIs to set/get specific quirks in OSL layer */
548 void BCMFASTPATH
osl_flag_set(osl_t * osh,uint32 mask)549 osl_flag_set(osl_t *osh, uint32 mask)
550 {
551 	osh->flags |= mask;
552 }
553 
554 void
osl_flag_clr(osl_t * osh,uint32 mask)555 osl_flag_clr(osl_t *osh, uint32 mask)
556 {
557 	osh->flags &= ~mask;
558 }
559 
560 #if defined(STB)
561 inline bool BCMFASTPATH
562 #else
563 bool
564 #endif // endif
osl_is_flag_set(osl_t * osh,uint32 mask)565 osl_is_flag_set(osl_t *osh, uint32 mask)
566 {
567 	return (osh->flags & mask);
568 }
569 
570 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
571 	defined(STB_SOC_WIFI)
572 
573 inline int BCMFASTPATH
osl_arch_is_coherent(void)574 osl_arch_is_coherent(void)
575 {
576 	return 0;
577 }
578 
579 inline int BCMFASTPATH
osl_acp_war_enab(void)580 osl_acp_war_enab(void)
581 {
582 	return 0;
583 }
584 
585 inline void BCMFASTPATH
osl_cache_flush(void * va,uint size)586 osl_cache_flush(void *va, uint size)
587 {
588 
589 	if (size > 0)
590 #ifdef STB_SOC_WIFI
591 		dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
592 #else /* STB_SOC_WIFI */
593 		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
594 			DMA_TO_DEVICE);
595 #endif /* STB_SOC_WIFI */
596 }
597 
598 inline void BCMFASTPATH
osl_cache_inv(void * va,uint size)599 osl_cache_inv(void *va, uint size)
600 {
601 
602 #ifdef STB_SOC_WIFI
603 	dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
604 #else /* STB_SOC_WIFI */
605 	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
606 #endif /* STB_SOC_WIFI */
607 }
608 
609 inline void BCMFASTPATH
osl_prefetch(const void * ptr)610 osl_prefetch(const void *ptr)
611 {
612 #if !defined(STB_SOC_WIFI)
613 	__asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
614 #endif // endif
615 }
616 
617 #endif // endif
618 
619 uint32
osl_pci_read_config(osl_t * osh,uint offset,uint size)620 osl_pci_read_config(osl_t *osh, uint offset, uint size)
621 {
622 	uint val = 0;
623 	uint retry = PCI_CFG_RETRY;
624 
625 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
626 
627 	/* only 4byte access supported */
628 	ASSERT(size == 4);
629 
630 	do {
631 		pci_read_config_dword(osh->pdev, offset, &val);
632 		if (val != 0xffffffff)
633 			break;
634 	} while (retry--);
635 
636 	return (val);
637 }
638 
639 void
osl_pci_write_config(osl_t * osh,uint offset,uint size,uint val)640 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
641 {
642 	uint retry = PCI_CFG_RETRY;
643 
644 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
645 
646 	/* only 4byte access supported */
647 	ASSERT(size == 4);
648 
649 	do {
650 		pci_write_config_dword(osh->pdev, offset, val);
651 		if (offset != PCI_BAR0_WIN)
652 			break;
653 		if (osl_pci_read_config(osh, offset, size) == val)
654 			break;
655 	} while (retry--);
656 
657 }
658 
659 /* return bus # for the pci device pointed by osh->pdev */
660 uint
osl_pci_bus(osl_t * osh)661 osl_pci_bus(osl_t *osh)
662 {
663 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
664 
665 #if defined(__ARM_ARCH_7A__)
666 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
667 #else
668 	return ((struct pci_dev *)osh->pdev)->bus->number;
669 #endif // endif
670 }
671 
672 /* return slot # for the pci device pointed by osh->pdev */
673 uint
osl_pci_slot(osl_t * osh)674 osl_pci_slot(osl_t *osh)
675 {
676 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
677 
678 #if defined(__ARM_ARCH_7A__)
679 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
680 #else
681 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
682 #endif // endif
683 }
684 
685 /* return domain # for the pci device pointed by osh->pdev */
686 uint
osl_pcie_domain(osl_t * osh)687 osl_pcie_domain(osl_t *osh)
688 {
689 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
690 
691 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
692 }
693 
694 /* return bus # for the pci device pointed by osh->pdev */
695 uint
osl_pcie_bus(osl_t * osh)696 osl_pcie_bus(osl_t *osh)
697 {
698 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
699 
700 	return ((struct pci_dev *)osh->pdev)->bus->number;
701 }
702 
703 /* return the pci device pointed by osh->pdev */
704 struct pci_dev *
osl_pci_device(osl_t * osh)705 osl_pci_device(osl_t *osh)
706 {
707 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
708 
709 	return osh->pdev;
710 }
711 
712 static void
osl_pcmcia_attr(osl_t * osh,uint offset,char * buf,int size,bool write)713 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
714 {
715 }
716 
717 void
osl_pcmcia_read_attr(osl_t * osh,uint offset,void * buf,int size)718 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
719 {
720 	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
721 }
722 
723 void
osl_pcmcia_write_attr(osl_t * osh,uint offset,void * buf,int size)724 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
725 {
726 	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
727 }
728 
729 void *
osl_malloc(osl_t * osh,uint size)730 osl_malloc(osl_t *osh, uint size)
731 {
732 	void *addr;
733 	gfp_t flags;
734 
735 	/* only ASSERT if osh is defined */
736 	if (osh)
737 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
738 #ifdef CONFIG_DHD_USE_STATIC_BUF
739 	if (bcm_static_buf)
740 	{
741 		unsigned long irq_flags;
742 		int i = 0;
743 		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
744 		{
745 			spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
746 
747 			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
748 			{
749 				if (bcm_static_buf->buf_use[i] == 0)
750 					break;
751 			}
752 
753 			if (i == STATIC_BUF_MAX_NUM)
754 			{
755 				spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
756 				printk("all static buff in use!\n");
757 				goto original;
758 			}
759 
760 			bcm_static_buf->buf_use[i] = 1;
761 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
762 
763 			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
764 			if (osh)
765 				atomic_add(size, &osh->cmn->malloced);
766 
767 			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
768 		}
769 	}
770 original:
771 #endif /* CONFIG_DHD_USE_STATIC_BUF */
772 
773 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
774 	if ((addr = kmalloc(size, flags)) == NULL) {
775 		if (osh)
776 			osh->failed++;
777 		return (NULL);
778 	}
779 	if (osh && osh->cmn)
780 		atomic_add(size, &osh->cmn->malloced);
781 
782 	return (addr);
783 }
784 
785 void *
osl_mallocz(osl_t * osh,uint size)786 osl_mallocz(osl_t *osh, uint size)
787 {
788 	void *ptr;
789 
790 	ptr = osl_malloc(osh, size);
791 
792 	if (ptr != NULL) {
793 		bzero(ptr, size);
794 	}
795 
796 	return ptr;
797 }
798 
799 void
osl_mfree(osl_t * osh,void * addr,uint size)800 osl_mfree(osl_t *osh, void *addr, uint size)
801 {
802 #ifdef CONFIG_DHD_USE_STATIC_BUF
803 	unsigned long flags;
804 
805 	if (bcm_static_buf)
806 	{
807 		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
808 			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
809 		{
810 			int buf_idx = 0;
811 
812 			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
813 
814 			spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
815 			bcm_static_buf->buf_use[buf_idx] = 0;
816 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
817 
818 			if (osh && osh->cmn) {
819 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
820 				atomic_sub(size, &osh->cmn->malloced);
821 			}
822 			return;
823 		}
824 	}
825 #endif /* CONFIG_DHD_USE_STATIC_BUF */
826 	if (osh && osh->cmn) {
827 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
828 
829 		ASSERT(size <= osl_malloced(osh));
830 
831 		atomic_sub(size, &osh->cmn->malloced);
832 	}
833 	kfree(addr);
834 }
835 
836 void *
osl_vmalloc(osl_t * osh,uint size)837 osl_vmalloc(osl_t *osh, uint size)
838 {
839 	void *addr;
840 
841 	/* only ASSERT if osh is defined */
842 	if (osh)
843 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
844 	if ((addr = vmalloc(size)) == NULL) {
845 		if (osh)
846 			osh->failed++;
847 		return (NULL);
848 	}
849 	if (osh && osh->cmn)
850 		atomic_add(size, &osh->cmn->malloced);
851 
852 	return (addr);
853 }
854 
855 void *
osl_vmallocz(osl_t * osh,uint size)856 osl_vmallocz(osl_t *osh, uint size)
857 {
858 	void *ptr;
859 
860 	ptr = osl_vmalloc(osh, size);
861 
862 	if (ptr != NULL) {
863 		bzero(ptr, size);
864 	}
865 
866 	return ptr;
867 }
868 
869 void
osl_vmfree(osl_t * osh,void * addr,uint size)870 osl_vmfree(osl_t *osh, void *addr, uint size)
871 {
872 	if (osh && osh->cmn) {
873 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
874 
875 		ASSERT(size <= osl_malloced(osh));
876 
877 		atomic_sub(size, &osh->cmn->malloced);
878 	}
879 	vfree(addr);
880 }
881 
882 uint
osl_check_memleak(osl_t * osh)883 osl_check_memleak(osl_t *osh)
884 {
885 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
886 	if (atomic_read(&osh->cmn->refcount) == 1)
887 		return (atomic_read(&osh->cmn->malloced));
888 	else
889 		return 0;
890 }
891 
892 uint
osl_malloced(osl_t * osh)893 osl_malloced(osl_t *osh)
894 {
895 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
896 	return (atomic_read(&osh->cmn->malloced));
897 }
898 
899 uint
osl_malloc_failed(osl_t * osh)900 osl_malloc_failed(osl_t *osh)
901 {
902 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
903 	return (osh->failed);
904 }
905 
906 uint
osl_dma_consistent_align(void)907 osl_dma_consistent_align(void)
908 {
909 	return (PAGE_SIZE);
910 }
911 
912 void*
osl_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,uint * alloced,dmaaddr_t * pap)913 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
914 {
915 	void *va;
916 	uint16 align = (1 << align_bits);
917 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
918 
919 	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
920 		size += align;
921 	*alloced = size;
922 
923 #ifndef	BCM_SECURE_DMA
924 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
925 	defined(STB_SOC_WIFI)
926 	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
927 	if (va)
928 		*pap = (ulong)__virt_to_phys((ulong)va);
929 #else
930 	{
931 		dma_addr_t pap_lin;
932 		struct pci_dev *hwdev = osh->pdev;
933 		gfp_t flags;
934 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
935 		flags = GFP_ATOMIC;
936 #else
937 		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
938 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
939 		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
940 #ifdef BCMDMA64OSL
941 		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
942 		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
943 #else
944 		*pap = (dmaaddr_t)pap_lin;
945 #endif /* BCMDMA64OSL */
946 	}
947 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
948 #else
949 	va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
950 #endif /* BCM_SECURE_DMA */
951 	return va;
952 }
953 
954 void
osl_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)955 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
956 {
957 #ifdef BCMDMA64OSL
958 	dma_addr_t paddr;
959 #endif /* BCMDMA64OSL */
960 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
961 
962 #ifndef BCM_SECURE_DMA
963 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
964 	defined(STB_SOC_WIFI)
965 	kfree(va);
966 #else
967 #ifdef BCMDMA64OSL
968 	PHYSADDRTOULONG(pa, paddr);
969 	pci_free_consistent(osh->pdev, size, va, paddr);
970 #else
971 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
972 #endif /* BCMDMA64OSL */
973 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
974 #else
975 	osl_sec_dma_free_consistent(osh, va, size, pa);
976 #endif /* BCM_SECURE_DMA */
977 }
978 
979 void *
osl_virt_to_phys(void * va)980 osl_virt_to_phys(void *va)
981 {
982 	return (void *)(uintptr)virt_to_phys(va);
983 }
984 
985 #include <asm/cacheflush.h>
986 void BCMFASTPATH
osl_dma_flush(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)987 osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
988 {
989 	return;
990 }
991 
992 dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)993 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
994 {
995 	int dir;
996 	dmaaddr_t ret_addr;
997 	dma_addr_t map_addr;
998 	int ret;
999 
1000 	DMA_LOCK(osh);
1001 
1002 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1003 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1004 
1005 #ifdef STB_SOC_WIFI
1006 #if (__LINUX_ARM_ARCH__ == 8)
1007 	/* need to flush or invalidate the cache here */
1008 	if (dir == DMA_TX) { /* to device */
1009 		osl_cache_flush(va, size);
1010 	} else if (dir == DMA_RX) { /* from device */
1011 		osl_cache_inv(va, size);
1012 	} else { /* both */
1013 		osl_cache_flush(va, size);
1014 		osl_cache_inv(va, size);
1015 	}
1016 	DMA_UNLOCK(osh);
1017 	return virt_to_phys(va);
1018 #else /* (__LINUX_ARM_ARCH__ == 8) */
1019 	map_addr = dma_map_single(osh->pdev, va, size, dir);
1020 	DMA_UNLOCK(osh);
1021 	return map_addr;
1022 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1023 #else /* ! STB_SOC_WIFI */
1024 	map_addr = pci_map_single(osh->pdev, va, size, dir);
1025 #endif	/* ! STB_SOC_WIFI */
1026 
1027 	ret = pci_dma_mapping_error(osh->pdev, map_addr);
1028 
1029 	if (ret) {
1030 		printk("%s: Failed to map memory\n", __FUNCTION__);
1031 		PHYSADDRLOSET(ret_addr, 0);
1032 		PHYSADDRHISET(ret_addr, 0);
1033 	} else {
1034 		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1035 		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1036 	}
1037 
1038 #ifdef DHD_MAP_LOGGING
1039 	osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
1040 #endif /* DHD_MAP_LOGGING */
1041 
1042 	DMA_UNLOCK(osh);
1043 
1044 	return ret_addr;
1045 }
1046 
1047 void BCMFASTPATH
osl_dma_unmap(osl_t * osh,dmaaddr_t pa,uint size,int direction)1048 osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1049 {
1050 	int dir;
1051 #ifdef BCMDMA64OSL
1052 	dma_addr_t paddr;
1053 #endif /* BCMDMA64OSL */
1054 
1055 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1056 
1057 	DMA_LOCK(osh);
1058 
1059 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1060 
1061 #ifdef DHD_MAP_LOGGING
1062 	osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
1063 #endif /* DHD_MAP_LOGGING */
1064 
1065 #ifdef BCMDMA64OSL
1066 	PHYSADDRTOULONG(pa, paddr);
1067 	pci_unmap_single(osh->pdev, paddr, size, dir);
1068 #else /* BCMDMA64OSL */
1069 
1070 #ifdef STB_SOC_WIFI
1071 #if (__LINUX_ARM_ARCH__ == 8)
1072 	if (dir == DMA_TX) { /* to device */
1073 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1074 	} else if (dir == DMA_RX) { /* from device */
1075 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1076 	} else { /* both */
1077 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1078 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1079 	}
1080 #else /* (__LINUX_ARM_ARCH__ == 8) */
1081 	dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
1082 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1083 #else /* STB_SOC_WIFI */
1084 	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1085 #endif /* STB_SOC_WIFI */
1086 
1087 #endif /* BCMDMA64OSL */
1088 
1089 	DMA_UNLOCK(osh);
1090 }
1091 
1092 /* OSL function for CPU relax */
1093 inline void BCMFASTPATH
osl_cpu_relax(void)1094 osl_cpu_relax(void)
1095 {
1096 	cpu_relax();
1097 }
1098 
osl_preempt_disable(osl_t * osh)1099 extern void osl_preempt_disable(osl_t *osh)
1100 {
1101 	preempt_disable();
1102 }
1103 
osl_preempt_enable(osl_t * osh)1104 extern void osl_preempt_enable(osl_t *osh)
1105 {
1106 	preempt_enable();
1107 }
1108 
1109 #if defined(BCMASSERT_LOG)
1110 void
osl_assert(const char * exp,const char * file,int line)1111 osl_assert(const char *exp, const char *file, int line)
1112 {
1113 	char tempbuf[256];
1114 	const char *basename;
1115 
1116 	basename = strrchr(file, '/');
1117 	/* skip the '/' */
1118 	if (basename)
1119 		basename++;
1120 
1121 	if (!basename)
1122 		basename = file;
1123 
1124 #ifdef BCMASSERT_LOG
1125 	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1126 		exp, basename, line);
1127 #ifndef OEM_ANDROID
1128 	bcm_assert_log(tempbuf);
1129 #endif /* OEM_ANDROID */
1130 #endif /* BCMASSERT_LOG */
1131 
1132 	switch (g_assert_type) {
1133 	case 0:
1134 		panic("%s", tempbuf);
1135 		break;
1136 	case 1:
1137 		/* fall through */
1138 	case 3:
1139 		printk("%s", tempbuf);
1140 		break;
1141 	case 2:
1142 		printk("%s", tempbuf);
1143 		BUG();
1144 		break;
1145 	default:
1146 		break;
1147 	}
1148 }
1149 #endif // endif
1150 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 1))
do_gettimeofday(struct timeval * tv)1151 void do_gettimeofday(struct timeval *tv)
1152 {
1153 	struct timespec64 ts;
1154 	ktime_get_real_ts64(&ts);
1155 	tv->tv_sec = ts.tv_sec;
1156 	tv->tv_usec = ts.tv_nsec;
1157 }
1158 #endif  /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 1) */
1159 void
osl_delay(uint usec)1160 osl_delay(uint usec)
1161 {
1162 	uint d;
1163 
1164 	while (usec > 0) {
1165 		d = MIN(usec, 1000);
1166 		udelay(d);
1167 		usec -= d;
1168 	}
1169 }
1170 
1171 void
osl_sleep(uint ms)1172 osl_sleep(uint ms)
1173 {
1174 	if (ms < 20)
1175 		usleep_range(ms*1000, ms*1000 + 1000);
1176 	else
1177 		msleep(ms);
1178 }
1179 
1180 uint64
osl_sysuptime_us(void)1181 osl_sysuptime_us(void)
1182 {
1183 	struct timeval tv;
1184 	uint64 usec;
1185 
1186 	do_gettimeofday(&tv);
1187 	/* tv_usec content is fraction of a second */
1188 	usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1189 	return usec;
1190 }
1191 
1192 uint64
osl_localtime_ns(void)1193 osl_localtime_ns(void)
1194 {
1195 	uint64 ts_nsec = 0;
1196 
1197 	ts_nsec = local_clock();
1198 
1199 	return ts_nsec;
1200 }
1201 
1202 void
osl_get_localtime(uint64 * sec,uint64 * usec)1203 osl_get_localtime(uint64 *sec, uint64 *usec)
1204 {
1205 	uint64 ts_nsec = 0;
1206 	unsigned long rem_nsec = 0;
1207 
1208 	ts_nsec = local_clock();
1209 	rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
1210 	*sec = (uint64)ts_nsec;
1211 	*usec = (uint64)(rem_nsec / MSEC_PER_SEC);
1212 }
1213 
1214 uint64
osl_systztime_us(void)1215 osl_systztime_us(void)
1216 {
1217 	struct timeval tv;
1218 	uint64 tzusec;
1219 
1220 	do_gettimeofday(&tv);
1221 	/* apply timezone */
1222 	tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
1223 		USEC_PER_SEC);
1224 	tzusec += tv.tv_usec;
1225 
1226 	return tzusec;
1227 }
1228 
1229 /*
1230  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1231  */
1232 
1233 /*
1234  * BINOSL selects the slightly slower function-call-based binary compatible osl.
1235  */
1236 
1237 uint32
osl_rand(void)1238 osl_rand(void)
1239 {
1240 	uint32 rand;
1241 
1242 	get_random_bytes(&rand, sizeof(rand));
1243 
1244 	return rand;
1245 }
1246 
1247 /* Linux Kernel: File Operations: start */
1248 void *
osl_os_open_image(char * filename)1249 osl_os_open_image(char *filename)
1250 {
1251 	struct file *fp;
1252 
1253 	fp = filp_open(filename, O_RDONLY, 0);
1254 	/*
1255 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1256 	 * Alternative:
1257 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1258 	 * ???
1259 	 */
1260 	 if (IS_ERR(fp))
1261 		 fp = NULL;
1262 
1263 	 return fp;
1264 }
1265 
1266 int
osl_os_get_image_block(char * buf,int len,void * image)1267 osl_os_get_image_block(char *buf, int len, void *image)
1268 {
1269 	struct file *fp = (struct file *)image;
1270 	int rdlen;
1271 
1272 	if (!image)
1273 		return 0;
1274 
1275 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1276 	rdlen = kernel_read(fp, buf, len, &fp->f_pos);
1277 #else
1278 	rdlen = kernel_read(fp, fp->f_pos, buf, len);
1279 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1280 
1281 	if (rdlen > 0)
1282 		fp->f_pos += rdlen;
1283 
1284 	return rdlen;
1285 }
1286 
1287 void
osl_os_close_image(void * image)1288 osl_os_close_image(void *image)
1289 {
1290 	if (image)
1291 		filp_close((struct file *)image, NULL);
1292 }
1293 
1294 int
osl_os_image_size(void * image)1295 osl_os_image_size(void *image)
1296 {
1297 	int len = 0, curroffset;
1298 
1299 	if (image) {
1300 		/* store the current offset */
1301 		curroffset = generic_file_llseek(image, 0, 1);
1302 		/* goto end of file to get length */
1303 		len = generic_file_llseek(image, 0, 2);
1304 		/* restore back the offset */
1305 		generic_file_llseek(image, curroffset, 0);
1306 	}
1307 	return len;
1308 }
1309 
1310 /* Linux Kernel: File Operations: end */
1311 
1312 #if (defined(STB) && defined(__arm__))
osl_pcie_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1313 inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1314 {
1315 	unsigned long flags = 0;
1316 	int pci_access = 0;
1317 	int acp_war_enab = ACP_WAR_ENAB();
1318 
1319 	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
1320 		pci_access = 1;
1321 
1322 	if (pci_access && acp_war_enab)
1323 		spin_lock_irqsave(&l2x0_reg_lock, flags);
1324 
1325 	switch (size) {
1326 	case sizeof(uint8):
1327 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1328 		break;
1329 	case sizeof(uint16):
1330 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1331 		break;
1332 	case sizeof(uint32):
1333 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1334 		break;
1335 	case sizeof(uint64):
1336 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1337 		break;
1338 	}
1339 
1340 	if (pci_access && acp_war_enab)
1341 		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1342 }
1343 #endif // endif
1344 
1345 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_bpt_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1346 inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1347 {
1348 	bool poll_timeout = FALSE;
1349 	static int in_si_clear = FALSE;
1350 
1351 	switch (size) {
1352 	case sizeof(uint8):
1353 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1354 		if (*(volatile uint8*)v == 0xff)
1355 			poll_timeout = TRUE;
1356 		break;
1357 	case sizeof(uint16):
1358 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1359 		if (*(volatile uint16*)v == 0xffff)
1360 			poll_timeout = TRUE;
1361 		break;
1362 	case sizeof(uint32):
1363 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1364 		if (*(volatile uint32*)v == 0xffffffff)
1365 			poll_timeout = TRUE;
1366 		break;
1367 	case sizeof(uint64):
1368 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1369 		if (*(volatile uint64*)v == 0xffffffffffffffff)
1370 			poll_timeout = TRUE;
1371 		break;
1372 	}
1373 
1374 	if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
1375 		in_si_clear = TRUE;
1376 		osh->bpt_cb((void *)osh->sih, (void *)addr);
1377 		in_si_clear = FALSE;
1378 	}
1379 }
1380 #endif /* BCM_BACKPLANE_TIMEOUT */
1381 
1382 #ifdef BCM_SECURE_DMA
1383 static void *
osl_sec_dma_ioremap(osl_t * osh,struct page * page,size_t size,bool iscache,bool isdecr)1384 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
1385 {
1386 
1387 	struct page **map;
1388 	int order, i;
1389 	void *addr = NULL;
1390 
1391 	size = PAGE_ALIGN(size);
1392 	order = get_order(size);
1393 
1394 	map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1395 
1396 	if (map == NULL)
1397 		return NULL;
1398 
1399 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
1400 		map[i] = page + i;
1401 
1402 	if (iscache) {
1403 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1404 		if (isdecr) {
1405 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1406 		}
1407 	} else {
1408 
1409 #if defined(__ARM_ARCH_7A__)
1410 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1411 			pgprot_noncached(__pgprot(PAGE_KERNEL)));
1412 #endif // endif
1413 		if (isdecr) {
1414 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1415 		}
1416 	}
1417 
1418 	kfree(map);
1419 	return (void *)addr;
1420 }
1421 
1422 static void
osl_sec_dma_iounmap(osl_t * osh,void * contig_base_va,size_t size)1423 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1424 {
1425 	vunmap(contig_base_va);
1426 }
1427 
1428 static int
osl_sec_dma_init_elem_mem_block(osl_t * osh,size_t mbsize,int max,sec_mem_elem_t ** list)1429 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
1430 {
1431 	int i;
1432 	int ret = BCME_OK;
1433 	sec_mem_elem_t *sec_mem_elem;
1434 
1435 	if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
1436 
1437 		*list = sec_mem_elem;
1438 		bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
1439 		for (i = 0; i < max-1; i++) {
1440 			sec_mem_elem->next = (sec_mem_elem + 1);
1441 			sec_mem_elem->size = mbsize;
1442 			sec_mem_elem->pa_cma = osh->contig_base_alloc;
1443 			sec_mem_elem->vac = osh->contig_base_alloc_va;
1444 
1445 			sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1446 			osh->contig_base_alloc += mbsize;
1447 			osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
1448 
1449 			sec_mem_elem = sec_mem_elem + 1;
1450 		}
1451 		sec_mem_elem->next = NULL;
1452 		sec_mem_elem->size = mbsize;
1453 		sec_mem_elem->pa_cma = osh->contig_base_alloc;
1454 		sec_mem_elem->vac = osh->contig_base_alloc_va;
1455 
1456 		sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1457 		osh->contig_base_alloc += mbsize;
1458 		osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
1459 
1460 	} else {
1461 		printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1462 		ret = BCME_ERROR;
1463 	}
1464 	return ret;
1465 }
1466 
1467 static void
osl_sec_dma_deinit_elem_mem_block(osl_t * osh,size_t mbsize,int max,void * sec_list_base)1468 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
1469 {
1470 	if (sec_list_base)
1471 		kfree(sec_list_base);
1472 }
1473 
1474 static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_alloc_mem_elem(osl_t * osh,void * va,uint size,int direction,struct sec_cma_info * ptr_cma_info,uint offset)1475 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1476 	struct sec_cma_info *ptr_cma_info, uint offset)
1477 {
1478 	sec_mem_elem_t *sec_mem_elem = NULL;
1479 
1480 		ASSERT(osh->sec_list_4096);
1481 		sec_mem_elem = osh->sec_list_4096;
1482 		osh->sec_list_4096 = sec_mem_elem->next;
1483 
1484 		sec_mem_elem->next = NULL;
1485 
1486 	if (ptr_cma_info->sec_alloc_list_tail) {
1487 		ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1488 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1489 	}
1490 	else {
1491 		/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1492 		ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1493 		ptr_cma_info->sec_alloc_list = sec_mem_elem;
1494 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1495 	}
1496 	return sec_mem_elem;
1497 }
1498 
1499 static void BCMFASTPATH
osl_sec_dma_free_mem_elem(osl_t * osh,sec_mem_elem_t * sec_mem_elem)1500 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
1501 {
1502 	sec_mem_elem->dma_handle = 0x0;
1503 	sec_mem_elem->va = NULL;
1504 		sec_mem_elem->next = osh->sec_list_4096;
1505 		osh->sec_list_4096 = sec_mem_elem;
1506 }
1507 
1508 static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_find_rem_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info,dma_addr_t dma_handle)1509 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1510 {
1511 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1512 	sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1513 
1514 	if (sec_mem_elem->dma_handle == dma_handle) {
1515 
1516 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1517 
1518 		if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1519 			ptr_cma_info->sec_alloc_list_tail = NULL;
1520 			ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1521 		}
1522 
1523 		return sec_mem_elem;
1524 	}
1525 	sec_mem_elem = sec_mem_elem->next;
1526 
1527 	while (sec_mem_elem != NULL) {
1528 
1529 		if (sec_mem_elem->dma_handle == dma_handle) {
1530 
1531 			sec_prv_elem->next = sec_mem_elem->next;
1532 			if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
1533 				ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1534 
1535 			return sec_mem_elem;
1536 		}
1537 		sec_prv_elem = sec_mem_elem;
1538 		sec_mem_elem = sec_mem_elem->next;
1539 	}
1540 	return NULL;
1541 }
1542 
1543 static sec_mem_elem_t *
osl_sec_dma_rem_first_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)1544 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1545 {
1546 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1547 
1548 	if (sec_mem_elem) {
1549 
1550 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1551 
1552 		if (ptr_cma_info->sec_alloc_list == NULL)
1553 			ptr_cma_info->sec_alloc_list_tail = NULL;
1554 
1555 		return sec_mem_elem;
1556 
1557 	} else
1558 		return NULL;
1559 }
1560 
1561 static void * BCMFASTPATH
osl_sec_dma_last_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)1562 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1563 {
1564 	return ptr_cma_info->sec_alloc_list_tail;
1565 }
1566 
1567 dma_addr_t BCMFASTPATH
osl_sec_dma_map_txmeta(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info)1568 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
1569 	hnddma_seg_map_t *dmah, void *ptr_cma_info)
1570 {
1571 	sec_mem_elem_t *sec_mem_elem;
1572 	struct page *pa_cma_page;
1573 	uint loffset;
1574 	void *vaorig = ((uint8 *)va + size);
1575 	dma_addr_t dma_handle = 0x0;
1576 	/* packet will be the one added with osl_sec_dma_map() just before this call */
1577 
1578 	sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1579 
1580 	if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1581 
1582 		pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1583 		loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1584 
1585 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1586 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1587 
1588 	} else {
1589 		printf("%s: error orig va not found va = 0x%p \n",
1590 			__FUNCTION__, vaorig);
1591 	}
1592 	return dma_handle;
1593 }
1594 
1595 dma_addr_t BCMFASTPATH
osl_sec_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info,uint offset)1596 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1597 	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
1598 {
1599 
1600 	sec_mem_elem_t *sec_mem_elem;
1601 	struct page *pa_cma_page;
1602 	void *pa_cma_kmap_va = NULL;
1603 	uint buflen = 0;
1604 	dma_addr_t dma_handle = 0x0;
1605 	uint loffset;
1606 
1607 	ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1608 	sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
1609 
1610 	sec_mem_elem->va = va;
1611 	sec_mem_elem->direction = direction;
1612 	pa_cma_page = sec_mem_elem->pa_cma_page;
1613 
1614 	loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1615 	/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1616 	* pa_cma_kmap_va += loffset;
1617 	*/
1618 
1619 	pa_cma_kmap_va = sec_mem_elem->vac;
1620 	pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1621 	buflen = size;
1622 
1623 	if (direction == DMA_TX) {
1624 		memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
1625 
1626 		if (dmah) {
1627 			dmah->nsegs = 1;
1628 			dmah->origsize = buflen;
1629 		}
1630 	}
1631 	else
1632 	{
1633 		if ((p != NULL) && (dmah != NULL)) {
1634 			dmah->nsegs = 1;
1635 			dmah->origsize = buflen;
1636 		}
1637 		*(uint32 *)(pa_cma_kmap_va) = 0x0;
1638 	}
1639 
1640 	if (direction == DMA_RX) {
1641 		flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1642 	}
1643 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
1644 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1645 	if (dmah) {
1646 		dmah->segs[0].addr = dma_handle;
1647 		dmah->segs[0].length = buflen;
1648 	}
1649 	sec_mem_elem->dma_handle = dma_handle;
1650 	/* kunmap_atomic(pa_cma_kmap_va-loffset); */
1651 	return dma_handle;
1652 }
1653 
1654 dma_addr_t BCMFASTPATH
osl_sec_dma_dd_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * map)1655 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
1656 {
1657 
1658 	struct page *pa_cma_page;
1659 	phys_addr_t pa_cma;
1660 	dma_addr_t dma_handle = 0x0;
1661 	uint loffset;
1662 
1663 	pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
1664 	pa_cma_page = phys_to_page(pa_cma);
1665 	loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
1666 
1667 	dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1668 		(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1669 
1670 	return dma_handle;
1671 }
1672 
1673 void BCMFASTPATH
osl_sec_dma_unmap(osl_t * osh,dma_addr_t dma_handle,uint size,int direction,void * p,hnddma_seg_map_t * map,void * ptr_cma_info,uint offset)1674 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1675 void *p, hnddma_seg_map_t *map,	void *ptr_cma_info, uint offset)
1676 {
1677 	sec_mem_elem_t *sec_mem_elem;
1678 	void *pa_cma_kmap_va = NULL;
1679 	uint buflen = 0;
1680 	dma_addr_t pa_cma;
1681 	void *va;
1682 	int read_count = 0;
1683 	BCM_REFERENCE(buflen);
1684 	BCM_REFERENCE(read_count);
1685 
1686 	sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1687 	ASSERT(sec_mem_elem);
1688 
1689 	va = sec_mem_elem->va;
1690 	va = (uint8 *)va - offset;
1691 	pa_cma = sec_mem_elem->pa_cma;
1692 
1693 	if (direction == DMA_RX) {
1694 
1695 		if (p == NULL) {
1696 
1697 			/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1698 			* pa_cma_kmap_va += loffset;
1699 			*/
1700 
1701 			pa_cma_kmap_va = sec_mem_elem->vac;
1702 
1703 			do {
1704 				invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1705 
1706 				buflen = *(uint *)(pa_cma_kmap_va);
1707 				if (buflen)
1708 					break;
1709 
1710 				OSL_DELAY(1);
1711 				read_count++;
1712 			} while (read_count < 200);
1713 			dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1714 			memcpy(va, pa_cma_kmap_va, size);
1715 			/* kunmap_atomic(pa_cma_kmap_va); */
1716 		}
1717 	} else {
1718 		dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
1719 	}
1720 
1721 	osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1722 }
1723 
1724 void
osl_sec_dma_unmap_all(osl_t * osh,void * ptr_cma_info)1725 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1726 {
1727 
1728 	sec_mem_elem_t *sec_mem_elem;
1729 
1730 	sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1731 
1732 	while (sec_mem_elem != NULL) {
1733 
1734 		dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1735 			sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1736 		osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1737 
1738 		sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1739 	}
1740 }
1741 
1742 static void
osl_sec_dma_init_consistent(osl_t * osh)1743 osl_sec_dma_init_consistent(osl_t *osh)
1744 {
1745 	int i;
1746 	void *temp_va = osh->contig_base_alloc_coherent_va;
1747 	phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1748 
1749 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1750 		osh->sec_cma_coherent[i].avail = TRUE;
1751 		osh->sec_cma_coherent[i].va = temp_va;
1752 		osh->sec_cma_coherent[i].pa = temp_pa;
1753 		temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
1754 		temp_pa += SEC_CMA_COHERENT_BLK;
1755 	}
1756 }
1757 
1758 static void *
osl_sec_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,ulong * pap)1759 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
1760 {
1761 
1762 	void *temp_va = NULL;
1763 	ulong temp_pa = 0;
1764 	int i;
1765 
1766 	if (size > SEC_CMA_COHERENT_BLK) {
1767 		printf("%s unsupported size\n", __FUNCTION__);
1768 		return NULL;
1769 	}
1770 
1771 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1772 		if (osh->sec_cma_coherent[i].avail == TRUE) {
1773 			temp_va = osh->sec_cma_coherent[i].va;
1774 			temp_pa = osh->sec_cma_coherent[i].pa;
1775 			osh->sec_cma_coherent[i].avail = FALSE;
1776 			break;
1777 		}
1778 	}
1779 
1780 	if (i == SEC_CMA_COHERENT_MAX)
1781 		printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1782 			temp_va, (ulong)temp_pa, size);
1783 
1784 	*pap = (unsigned long)temp_pa;
1785 	return temp_va;
1786 }
1787 
1788 static void
osl_sec_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)1789 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1790 {
1791 	int i = 0;
1792 
1793 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1794 		if (osh->sec_cma_coherent[i].va == va) {
1795 			osh->sec_cma_coherent[i].avail = TRUE;
1796 			break;
1797 		}
1798 	}
1799 	if (i == SEC_CMA_COHERENT_MAX)
1800 		printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1801 			va, (ulong)pa, size);
1802 }
1803 #endif /* BCM_SECURE_DMA */
1804 
1805 /* timer apis */
1806 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1807 
1808 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1809 void
timer_cb_compat(struct timer_list * tl)1810 timer_cb_compat(struct timer_list *tl)
1811 {
1812 	timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
1813 	t->callback((ulong)t->arg);
1814 }
1815 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
1816 
1817 osl_timer_t *
osl_timer_init(osl_t * osh,const char * name,void (* fn)(void * arg),void * arg)1818 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
1819 {
1820 	osl_timer_t *t;
1821 	BCM_REFERENCE(fn);
1822 	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1823 		printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1824 			(int)sizeof(osl_timer_t));
1825 		return (NULL);
1826 	}
1827 	bzero(t, sizeof(osl_timer_t));
1828 	if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
1829 		printf("osl_timer_init: malloc failed\n");
1830 		MFREE(NULL, t, sizeof(osl_timer_t));
1831 		return (NULL);
1832 	}
1833 	t->set = TRUE;
1834 
1835 	init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
1836 
1837 	return (t);
1838 }
1839 
1840 void
osl_timer_add(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1841 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1842 {
1843 	if (t == NULL) {
1844 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1845 		return;
1846 	}
1847 	ASSERT(!t->set);
1848 
1849 	t->set = TRUE;
1850 	if (periodic) {
1851 		printf("Periodic timers are not supported by Linux timer apis\n");
1852 	}
1853 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1854 
1855 	add_timer(t->timer);
1856 
1857 	return;
1858 }
1859 
1860 void
osl_timer_update(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1861 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1862 {
1863 	if (t == NULL) {
1864 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1865 		return;
1866 	}
1867 	if (periodic) {
1868 		printf("Periodic timers are not supported by Linux timer apis\n");
1869 	}
1870 	t->set = TRUE;
1871 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1872 
1873 	mod_timer(t->timer, timer_expires(t->timer));
1874 
1875 	return;
1876 }
1877 
1878 /*
1879  * Return TRUE if timer successfully deleted, FALSE if still pending
1880  */
1881 bool
osl_timer_del(osl_t * osh,osl_timer_t * t)1882 osl_timer_del(osl_t *osh, osl_timer_t *t)
1883 {
1884 	if (t == NULL) {
1885 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1886 		return (FALSE);
1887 	}
1888 	if (t->set) {
1889 		t->set = FALSE;
1890 		if (t->timer) {
1891 			del_timer(t->timer);
1892 			MFREE(NULL, t->timer, sizeof(struct timer_list));
1893 		}
1894 		MFREE(NULL, t, sizeof(osl_timer_t));
1895 	}
1896 	return (TRUE);
1897 }
1898 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1899 int
kernel_read_compat(struct file * file,loff_t offset,char * addr,unsigned long count)1900 kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
1901 {
1902 	return (int)kernel_read(file, addr, (size_t)count, &offset);
1903 }
1904 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1905 
1906 void *
osl_spin_lock_init(osl_t * osh)1907 osl_spin_lock_init(osl_t *osh)
1908 {
1909 	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
1910 	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
1911 	/* and this results in kernel asserts in internal builds */
1912 	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
1913 	if (lock)
1914 		spin_lock_init(lock);
1915 	return ((void *)lock);
1916 }
1917 
1918 void
osl_spin_lock_deinit(osl_t * osh,void * lock)1919 osl_spin_lock_deinit(osl_t *osh, void *lock)
1920 {
1921 	if (lock)
1922 		MFREE(osh, lock, sizeof(spinlock_t) + 4);
1923 }
1924 
1925 unsigned long
osl_spin_lock(void * lock)1926 osl_spin_lock(void *lock)
1927 {
1928 	unsigned long flags = 0;
1929 
1930 	if (lock)
1931 		spin_lock_irqsave((spinlock_t *)lock, flags);
1932 
1933 	return flags;
1934 }
1935 
1936 void
osl_spin_unlock(void * lock,unsigned long flags)1937 osl_spin_unlock(void *lock, unsigned long flags)
1938 {
1939 	if (lock)
1940 		spin_unlock_irqrestore((spinlock_t *)lock, flags);
1941 }
1942 
1943 #ifdef USE_DMA_LOCK
1944 static void
osl_dma_lock(osl_t * osh)1945 osl_dma_lock(osl_t *osh)
1946 {
1947 	if (likely(in_irq() || irqs_disabled())) {
1948 		spin_lock(&osh->dma_lock);
1949 	} else {
1950 		spin_lock_bh(&osh->dma_lock);
1951 		osh->dma_lock_bh = TRUE;
1952 	}
1953 }
1954 
1955 static void
osl_dma_unlock(osl_t * osh)1956 osl_dma_unlock(osl_t *osh)
1957 {
1958 	if (unlikely(osh->dma_lock_bh)) {
1959 		osh->dma_lock_bh = FALSE;
1960 		spin_unlock_bh(&osh->dma_lock);
1961 	} else {
1962 		spin_unlock(&osh->dma_lock);
1963 	}
1964 }
1965 
1966 static void
osl_dma_lock_init(osl_t * osh)1967 osl_dma_lock_init(osl_t *osh)
1968 {
1969 	spin_lock_init(&osh->dma_lock);
1970 	osh->dma_lock_bh = FALSE;
1971 }
1972 #endif /* USE_DMA_LOCK */
1973