xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/linux_osl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Linux OS Independent Layer
3  *
4  * Copyright (C) 2020, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *
21  * <<Broadcom-WL-IPTag/Dual:>>
22  */
23 
24 #define LINUX_PORT
25 
26 #include <typedefs.h>
27 #include <bcmendian.h>
28 #include <linuxver.h>
29 #include <bcmdefs.h>
30 
31 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
32 #include <asm/cacheflush.h>
33 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
34 
35 #include <linux/random.h>
36 
37 #include <osl.h>
38 #include <bcmutils.h>
39 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <pcicfg.h>
42 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
43 #include <asm-generic/pci-dma-compat.h>
44 #endif
45 
46 #if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
47 #include <bcm_assert_log.h>
48 #endif
49 
50 #include <linux/fs.h>
51 
52 #ifdef BCM_OBJECT_TRACE
53 #include <bcmutils.h>
54 #endif /* BCM_OBJECT_TRACE */
55 #include "linux_osl_priv.h"
56 
57 #define PCI_CFG_RETRY		10	/* PR15065: retry count for pci cfg accesses */
58 
59 #define DUMPBUFSZ 1024
60 
61 #ifdef CUSTOMER_HW4_DEBUG
62 uint32 g_assert_type = 1; /* By Default not cause Kernel Panic */
63 #else
64 uint32 g_assert_type = 0; /* By Default Kernel Panic */
65 #endif /* CUSTOMER_HW4_DEBUG */
66 
67 module_param(g_assert_type, int, 0);
68 
69 #if defined(BCMSLTGT)
70 /* !!!make sure htclkratio is not 0!!! */
71 extern uint htclkratio;
72 #endif
73 
74 #ifdef USE_DMA_LOCK
75 static void osl_dma_lock(osl_t *osh);
76 static void osl_dma_unlock(osl_t *osh);
77 static void osl_dma_lock_init(osl_t *osh);
78 
79 #define DMA_LOCK(osh)		osl_dma_lock(osh)
80 #define DMA_UNLOCK(osh)		osl_dma_unlock(osh)
81 #define DMA_LOCK_INIT(osh)	osl_dma_lock_init(osh);
82 #else
83 #define DMA_LOCK(osh)		do { /* noop */ } while(0)
84 #define DMA_UNLOCK(osh)		do { /* noop */ } while(0)
85 #define DMA_LOCK_INIT(osh)	do { /* noop */ } while(0)
86 #endif /* USE_DMA_LOCK */
87 
88 static int16 linuxbcmerrormap[] =
89 {	0,				/* 0 */
90 	-EINVAL,		/* BCME_ERROR */
91 	-EINVAL,		/* BCME_BADARG */
92 	-EINVAL,		/* BCME_BADOPTION */
93 	-EINVAL,		/* BCME_NOTUP */
94 	-EINVAL,		/* BCME_NOTDOWN */
95 	-EINVAL,		/* BCME_NOTAP */
96 	-EINVAL,		/* BCME_NOTSTA */
97 	-EINVAL,		/* BCME_BADKEYIDX */
98 	-EINVAL,		/* BCME_RADIOOFF */
99 	-EINVAL,		/* BCME_NOTBANDLOCKED */
100 	-EINVAL,		/* BCME_NOCLK */
101 	-EINVAL,		/* BCME_BADRATESET */
102 	-EINVAL,		/* BCME_BADBAND */
103 	-E2BIG,			/* BCME_BUFTOOSHORT */
104 	-E2BIG,			/* BCME_BUFTOOLONG */
105 	-EBUSY,			/* BCME_BUSY */
106 	-EINVAL,		/* BCME_NOTASSOCIATED */
107 	-EINVAL,		/* BCME_BADSSIDLEN */
108 	-EINVAL,		/* BCME_OUTOFRANGECHAN */
109 	-EINVAL,		/* BCME_BADCHAN */
110 	-EFAULT,		/* BCME_BADADDR */
111 	-ENOMEM,		/* BCME_NORESOURCE */
112 	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
113 	-EMSGSIZE,		/* BCME_BADLENGTH */
114 	-EINVAL,		/* BCME_NOTREADY */
115 	-EPERM,			/* BCME_EPERM */
116 	-ENOMEM,		/* BCME_NOMEM */
117 	-EINVAL,		/* BCME_ASSOCIATED */
118 	-ERANGE,		/* BCME_RANGE */
119 	-EINVAL,		/* BCME_NOTFOUND */
120 	-EINVAL,		/* BCME_WME_NOT_ENABLED */
121 	-EINVAL,		/* BCME_TSPEC_NOTFOUND */
122 	-EINVAL,		/* BCME_ACM_NOTSUPPORTED */
123 	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
124 	-EIO,			/* BCME_SDIO_ERROR */
125 	-ENODEV,		/* BCME_DONGLE_DOWN */
126 	-EINVAL,		/* BCME_VERSION */
127 	-EIO,			/* BCME_TXFAIL */
128 	-EIO,			/* BCME_RXFAIL */
129 	-ENODEV,		/* BCME_NODEVICE */
130 	-EINVAL,		/* BCME_NMODE_DISABLED */
131 	-ENODATA,		/* BCME_NONRESIDENT */
132 	-EINVAL,		/* BCME_SCANREJECT */
133 	-EINVAL,		/* BCME_USAGE_ERROR */
134 	-EIO,			/* BCME_IOCTL_ERROR */
135 	-EIO,			/* BCME_SERIAL_PORT_ERR */
136 	-EOPNOTSUPP,		/* BCME_DISABLED, BCME_NOTENABLED */
137 	-EIO,			/* BCME_DECERR */
138 	-EIO,			/* BCME_ENCERR */
139 	-EIO,			/* BCME_MICERR */
140 	-ERANGE,		/* BCME_REPLAY */
141 	-EINVAL,		/* BCME_IE_NOTFOUND */
142 	-EINVAL,		/* BCME_DATA_NOTFOUND */
143 	-EINVAL,		/* BCME_NOT_GC */
144 	-EINVAL,		/* BCME_PRS_REQ_FAILED */
145 	-EINVAL,		/* BCME_NO_P2P_SE */
146 	-EINVAL,		/* BCME_NOA_PND */
147 	-EINVAL,		/* BCME_FRAG_Q_FAILED */
148 	-EINVAL,		/* BCME_GET_AF_FAILED */
149 	-EINVAL,		/* BCME_MSCH_NOTREADY */
150 	-EINVAL,		/* BCME_IOV_LAST_CMD */
151 	-EINVAL,		/* BCME_MINIPMU_CAL_FAIL */
152 	-EINVAL,		/* BCME_RCAL_FAIL */
153 	-EINVAL,		/* BCME_LPF_RCCAL_FAIL */
154 	-EINVAL,		/* BCME_DACBUF_RCCAL_FAIL */
155 	-EINVAL,		/* BCME_VCOCAL_FAIL */
156 	-EINVAL,		/* BCME_BANDLOCKED */
157 	-EINVAL,		/* BCME_BAD_IE_DATA */
158 	-EINVAL,		/* BCME_REG_FAILED */
159 	-EINVAL,		/* BCME_NOCHAN */
160 	-EINVAL,		/* BCME_PKTTOSS */
161 	-EINVAL,		/* BCME_DNGL_DEVRESET */
162 	-EINVAL,		/* BCME_ROAM */
163 	-EOPNOTSUPP,		/* BCME_NO_SIG_FILE */
164 
165 /* When an new error code is added to bcmutils.h, add os
166  * specific error translation here as well
167  */
168 /* check if BCME_LAST changed since the last time this function was updated */
169 #if BCME_LAST != BCME_NO_SIG_FILE
170 #error "You need to add a OS error translation in the linuxbcmerrormap \
171 	for new error code defined in bcmutils.h"
172 #endif
173 };
174 uint lmtest = FALSE;
175 
176 #ifdef DHD_MAP_LOGGING
177 #define DHD_MAP_LOG_SIZE 2048
178 
179 typedef struct dhd_map_item {
180 	dmaaddr_t pa;		/* DMA address (physical) */
181 	uint64 ts_nsec;		/* timestamp: nsec */
182 	uint32 size;		/* mapping size */
183 	uint8 rsvd[4];		/* reserved for future use */
184 } dhd_map_item_t;
185 
186 typedef struct dhd_map_record {
187 	uint32 items;		/* number of total items */
188 	uint32 idx;		/* current index of metadata */
189 	dhd_map_item_t map[0];	/* metadata storage */
190 } dhd_map_log_t;
191 
192 void
osl_dma_map_dump(osl_t * osh)193 osl_dma_map_dump(osl_t *osh)
194 {
195 	dhd_map_log_t *map_log, *unmap_log;
196 	uint64 ts_sec, ts_usec;
197 
198 	map_log = (dhd_map_log_t *)(osh->dhd_map_log);
199 	unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
200 	osl_get_localtime(&ts_sec, &ts_usec);
201 
202 	if (map_log && unmap_log) {
203 		printf("%s: map_idx=%d unmap_idx=%d "
204 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
205 			map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
206 			(unsigned long)ts_usec);
207 		printf("%s: dhd_map_log(pa)=0x%llx size=%d,"
208 			" dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
209 			(uint64)__virt_to_phys((ulong)(map_log->map)),
210 			(uint32)(sizeof(dhd_map_item_t) * map_log->items),
211 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
212 			(uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
213 	}
214 }
215 
216 static void *
osl_dma_map_log_init(uint32 item_len)217 osl_dma_map_log_init(uint32 item_len)
218 {
219 	dhd_map_log_t *map_log;
220 	gfp_t flags;
221 	uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
222 		(item_len * sizeof(dhd_map_item_t)));
223 
224 	flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
225 	map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
226 	if (map_log) {
227 		memset(map_log, 0, alloc_size);
228 		map_log->items = item_len;
229 		map_log->idx = 0;
230 	}
231 
232 	return (void *)map_log;
233 }
234 
235 static void
osl_dma_map_log_deinit(osl_t * osh)236 osl_dma_map_log_deinit(osl_t *osh)
237 {
238 	if (osh->dhd_map_log) {
239 		kfree(osh->dhd_map_log);
240 		osh->dhd_map_log = NULL;
241 	}
242 
243 	if (osh->dhd_unmap_log) {
244 		kfree(osh->dhd_unmap_log);
245 		osh->dhd_unmap_log = NULL;
246 	}
247 }
248 
249 static void
osl_dma_map_logging(osl_t * osh,void * handle,dmaaddr_t pa,uint32 len)250 osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
251 {
252 	dhd_map_log_t *log = (dhd_map_log_t *)handle;
253 	uint32 idx;
254 
255 	if (log == NULL) {
256 		printf("%s: log is NULL\n", __FUNCTION__);
257 		return;
258 	}
259 
260 	idx = log->idx;
261 	log->map[idx].ts_nsec = osl_localtime_ns();
262 	log->map[idx].pa = pa;
263 	log->map[idx].size = len;
264 	log->idx = (idx + 1) % log->items;
265 }
266 #endif /* DHD_MAP_LOGGING */
267 
268 /* translate bcmerrors into linux errors */
269 int
osl_error(int bcmerror)270 osl_error(int bcmerror)
271 {
272 	if (bcmerror > 0)
273 		bcmerror = 0;
274 	else if (bcmerror < BCME_LAST)
275 		bcmerror = BCME_ERROR;
276 
277 	/* Array bounds covered by ASSERT in osl_attach */
278 	return linuxbcmerrormap[-bcmerror];
279 }
280 
281 osl_t *
osl_attach(void * pdev,uint bustype,bool pkttag,void ** osl_cmn)282 osl_attach(void *pdev, uint bustype, bool pkttag
283 #ifdef SHARED_OSL_CMN
284 	, void **osl_cmn
285 #endif /* SHARED_OSL_CMN */
286 )
287 {
288 #ifndef SHARED_OSL_CMN
289 	void **osl_cmn = NULL;
290 #endif /* SHARED_OSL_CMN */
291 	osl_t *osh;
292 	gfp_t flags;
293 
294 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
295 	if (!(osh = kmalloc(sizeof(osl_t), flags)))
296 		return osh;
297 
298 	ASSERT(osh);
299 
300 	bzero(osh, sizeof(osl_t));
301 
302 	if (osl_cmn == NULL || *osl_cmn == NULL) {
303 		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
304 			kfree(osh);
305 			return NULL;
306 		}
307 		bzero(osh->cmn, sizeof(osl_cmn_t));
308 		if (osl_cmn)
309 			*osl_cmn = osh->cmn;
310 		atomic_set(&osh->cmn->malloced, 0);
311 		osh->cmn->dbgmem_list = NULL;
312 		spin_lock_init(&(osh->cmn->dbgmem_lock));
313 
314 #ifdef BCMDBG_PKT
315 		spin_lock_init(&(osh->cmn->pktlist_lock));
316 #endif
317 		spin_lock_init(&(osh->cmn->pktalloc_lock));
318 
319 	} else {
320 		osh->cmn = *osl_cmn;
321 	}
322 	atomic_add(1, &osh->cmn->refcount);
323 
324 	bcm_object_trace_init();
325 	/* Check that error map has the right number of entries in it */
326 	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
327 	osh->failed = 0;
328 	osh->pdev = pdev;
329 	osh->pub.pkttag = pkttag;
330 	osh->bustype = bustype;
331 	osh->magic = OS_HANDLE_MAGIC;
332 
333 	switch (bustype) {
334 		case PCI_BUS:
335 		case SI_BUS:
336 			osh->pub.mmbus = TRUE;
337 			break;
338 		case SDIO_BUS:
339 		case USB_BUS:
340 		case SPI_BUS:
341 		case RPC_BUS:
342 			osh->pub.mmbus = FALSE;
343 			break;
344 		default:
345 			ASSERT(FALSE);
346 			break;
347 	}
348 
349 #ifdef BCMDBG_CTRACE
350 	spin_lock_init(&osh->ctrace_lock);
351 	INIT_LIST_HEAD(&osh->ctrace_list);
352 	osh->ctrace_num = 0;
353 #endif /* BCMDBG_CTRACE */
354 
355 	DMA_LOCK_INIT(osh);
356 
357 #ifdef BCMDBG_ASSERT
358 	if (pkttag) {
359 		struct sk_buff *skb;
360 		BCM_REFERENCE(skb);
361 		ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
362 	}
363 #endif
364 
365 #ifdef DHD_MAP_LOGGING
366 	osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
367 	if (osh->dhd_map_log == NULL) {
368 		printf("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
369 	}
370 
371 	osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
372 	if (osh->dhd_unmap_log == NULL) {
373 		printf("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
374 	}
375 #endif /* DHD_MAP_LOGGING */
376 
377 	return osh;
378 }
379 
osl_set_bus_handle(osl_t * osh,void * bus_handle)380 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
381 {
382 	osh->bus_handle = bus_handle;
383 }
384 
osl_get_bus_handle(osl_t * osh)385 void* osl_get_bus_handle(osl_t *osh)
386 {
387 	return osh->bus_handle;
388 }
389 
390 #if defined(AXI_TIMEOUTS_NIC)
osl_set_bpt_cb(osl_t * osh,void * bpt_cb,void * bpt_ctx)391 void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
392 {
393 	if (osh) {
394 		osh->bpt_cb = (bpt_cb_fn)bpt_cb;
395 		osh->sih = bpt_ctx;
396 	}
397 }
398 #endif	/* AXI_TIMEOUTS_NIC */
399 
400 void
osl_detach(osl_t * osh)401 osl_detach(osl_t *osh)
402 {
403 	if (osh == NULL)
404 		return;
405 
406 #ifdef BCMDBG_MEM
407 	if (MEMORY_LEFTOVER(osh)) {
408 		static char dumpbuf[DUMPBUFSZ];
409 		struct bcmstrbuf b;
410 
411 		printf("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh));
412 		bcm_binit(&b, dumpbuf, DUMPBUFSZ);
413 		MALLOC_DUMP(osh, &b);
414 		printf("%s", b.origbuf);
415 	}
416 #endif
417 
418 	bcm_object_trace_deinit();
419 
420 #ifdef DHD_MAP_LOGGING
421 	osl_dma_map_log_deinit(osh);
422 #endif /* DHD_MAP_LOGGING */
423 
424 	ASSERT(osh->magic == OS_HANDLE_MAGIC);
425 	atomic_sub(1, &osh->cmn->refcount);
426 	if (atomic_read(&osh->cmn->refcount) == 0) {
427 			kfree(osh->cmn);
428 	}
429 	kfree(osh);
430 }
431 
432 /* APIs to set/get specific quirks in OSL layer */
433 void
BCMFASTPATH(osl_flag_set)434 BCMFASTPATH(osl_flag_set)(osl_t *osh, uint32 mask)
435 {
436 	osh->flags |= mask;
437 }
438 
439 void
osl_flag_clr(osl_t * osh,uint32 mask)440 osl_flag_clr(osl_t *osh, uint32 mask)
441 {
442 	osh->flags &= ~mask;
443 }
444 
445 bool
osl_is_flag_set(osl_t * osh,uint32 mask)446 osl_is_flag_set(osl_t *osh, uint32 mask)
447 {
448 	return (osh->flags & mask);
449 }
450 
451 #if (defined(BCMPCIE) && defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
452 
453 inline void
BCMFASTPATH(osl_cache_flush)454 BCMFASTPATH(osl_cache_flush)(void *va, uint size)
455 {
456 	if (size > 0)
457 		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
458 			DMA_TO_DEVICE);
459 }
460 
461 inline void
BCMFASTPATH(osl_cache_inv)462 BCMFASTPATH(osl_cache_inv)(void *va, uint size)
463 {
464 	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
465 }
466 
467 inline void
BCMFASTPATH(osl_prefetch)468 BCMFASTPATH(osl_prefetch)(const void *ptr)
469 {
470 	__asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
471 }
472 
473 #endif /* !__ARM_ARCH_7A__ */
474 
475 uint32
osl_pci_read_config(osl_t * osh,uint offset,uint size)476 osl_pci_read_config(osl_t *osh, uint offset, uint size)
477 {
478 	uint val = 0;
479 	uint retry = PCI_CFG_RETRY;	 /* PR15065: faulty cardbus controller bug */
480 
481 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
482 
483 	/* only 4byte access supported */
484 	ASSERT(size == 4);
485 
486 	do {
487 		pci_read_config_dword(osh->pdev, offset, &val);
488 		if (val != 0xffffffff)
489 			break;
490 	} while (retry--);
491 
492 #ifdef BCMDBG
493 	if (retry < PCI_CFG_RETRY)
494 		printf("PCI CONFIG READ access to %d required %d retries\n", offset,
495 		       (PCI_CFG_RETRY - retry));
496 #endif /* BCMDBG */
497 
498 	return (val);
499 }
500 
501 void
osl_pci_write_config(osl_t * osh,uint offset,uint size,uint val)502 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
503 {
504 	uint retry = PCI_CFG_RETRY;	 /* PR15065: faulty cardbus controller bug */
505 
506 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
507 
508 	/* only 4byte access supported */
509 	ASSERT(size == 4);
510 
511 	do {
512 		pci_write_config_dword(osh->pdev, offset, val);
513 		/* PR15065: PCI_BAR0_WIN is believed to be the only pci cfg write that can occur
514 		 * when dma activity is possible
515 		 */
516 		if (offset != PCI_BAR0_WIN)
517 			break;
518 		if (osl_pci_read_config(osh, offset, size) == val)
519 			break;
520 	} while (retry--);
521 
522 #ifdef BCMDBG
523 	if (retry < PCI_CFG_RETRY)
524 		printf("PCI CONFIG WRITE access to %d required %d retries\n", offset,
525 		       (PCI_CFG_RETRY - retry));
526 #endif /* BCMDBG */
527 }
528 
529 #ifdef BCMPCIE
530 /* return bus # for the pci device pointed by osh->pdev */
531 uint
osl_pci_bus(osl_t * osh)532 osl_pci_bus(osl_t *osh)
533 {
534 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
535 
536 #if defined(__ARM_ARCH_7A__)
537 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
538 #else
539 	return ((struct pci_dev *)osh->pdev)->bus->number;
540 #endif
541 }
542 
543 /* return slot # for the pci device pointed by osh->pdev */
544 uint
osl_pci_slot(osl_t * osh)545 osl_pci_slot(osl_t *osh)
546 {
547 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
548 
549 #if defined(__ARM_ARCH_7A__)
550 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
551 #else
552 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
553 #endif
554 }
555 
556 /* return domain # for the pci device pointed by osh->pdev */
557 uint
osl_pcie_domain(osl_t * osh)558 osl_pcie_domain(osl_t *osh)
559 {
560 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
561 
562 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
563 }
564 
565 /* return bus # for the pci device pointed by osh->pdev */
566 uint
osl_pcie_bus(osl_t * osh)567 osl_pcie_bus(osl_t *osh)
568 {
569 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
570 
571 	return ((struct pci_dev *)osh->pdev)->bus->number;
572 }
573 
574 /* return the pci device pointed by osh->pdev */
575 struct pci_dev *
osl_pci_device(osl_t * osh)576 osl_pci_device(osl_t *osh)
577 {
578 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
579 
580 	return osh->pdev;
581 }
582 #endif
583 
584 #ifdef BCMDBG_MEM
585 /* In BCMDBG_MEM configurations osl_malloc is only used internally in
586  * the implementation of osl_debug_malloc.  Because we are using the GCC
587  * -Wstrict-prototypes compile option, we must always have a prototype
588  * for a global/external function.  So make osl_malloc static in
589  * the BCMDBG_MEM case.
590  */
591 static
592 #endif
593 void *
osl_malloc(osl_t * osh,uint size)594 osl_malloc(osl_t *osh, uint size)
595 {
596 	void *addr;
597 	gfp_t flags;
598 
599 	/* only ASSERT if osh is defined */
600 	if (osh)
601 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
602 #ifdef CONFIG_DHD_USE_STATIC_BUF
603 	if (bcm_static_buf)
604 	{
605 		unsigned long irq_flags;
606 		int i = 0;
607 		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
608 		{
609 			OSL_STATIC_BUF_LOCK(&bcm_static_buf->static_lock, irq_flags);
610 
611 			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
612 			{
613 				if (bcm_static_buf->buf_use[i] == 0)
614 					break;
615 			}
616 
617 			if (i == STATIC_BUF_MAX_NUM)
618 			{
619 				OSL_STATIC_BUF_UNLOCK(&bcm_static_buf->static_lock, irq_flags);
620 				printf("all static buff in use!\n");
621 				goto original;
622 			}
623 
624 			bcm_static_buf->buf_use[i] = 1;
625 			OSL_STATIC_BUF_UNLOCK(&bcm_static_buf->static_lock, irq_flags);
626 
627 			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
628 			if (osh)
629 				atomic_add(size, &osh->cmn->malloced);
630 
631 			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
632 		}
633 	}
634 original:
635 #endif /* CONFIG_DHD_USE_STATIC_BUF */
636 
637 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
638 	if ((addr = kmalloc(size, flags)) == NULL) {
639 		if (osh)
640 			osh->failed++;
641 		return (NULL);
642 	}
643 	if (osh && osh->cmn)
644 		atomic_add(size, &osh->cmn->malloced);
645 
646 	return (addr);
647 }
648 
649 #ifndef BCMDBG_MEM
650 void *
osl_mallocz(osl_t * osh,uint size)651 osl_mallocz(osl_t *osh, uint size)
652 {
653 	void *ptr;
654 
655 	ptr = osl_malloc(osh, size);
656 
657 	if (ptr != NULL) {
658 		bzero(ptr, size);
659 	}
660 
661 	return ptr;
662 }
663 #endif
664 
665 #ifdef BCMDBG_MEM
666 /* In BCMDBG_MEM configurations osl_mfree is only used internally in
667  * the implementation of osl_debug_mfree.  Because we are using the GCC
668  * -Wstrict-prototypes compile option, we must always have a prototype
669  * for a global/external function.  So make osl_mfree static in
670  * the BCMDBG_MEM case.
671  */
672 static
673 #endif
674 void
osl_mfree(osl_t * osh,void * addr,uint size)675 osl_mfree(osl_t *osh, void *addr, uint size)
676 {
677 #ifdef CONFIG_DHD_USE_STATIC_BUF
678 	unsigned long flags;
679 
680 	if (addr == NULL) {
681 		return;
682 	}
683 
684 	if (bcm_static_buf)
685 	{
686 		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
687 			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
688 		{
689 			int buf_idx = 0;
690 
691 			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
692 
693 			OSL_STATIC_BUF_LOCK(&bcm_static_buf->static_lock, flags);
694 			bcm_static_buf->buf_use[buf_idx] = 0;
695 			OSL_STATIC_BUF_UNLOCK(&bcm_static_buf->static_lock, flags);
696 
697 			if (osh && osh->cmn) {
698 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
699 				atomic_sub(size, &osh->cmn->malloced);
700 			}
701 			return;
702 		}
703 	}
704 #endif /* CONFIG_DHD_USE_STATIC_BUF */
705 	if (osh && osh->cmn) {
706 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
707 
708 		ASSERT(size <= osl_malloced(osh));
709 
710 		atomic_sub(size, &osh->cmn->malloced);
711 	}
712 	kfree(addr);
713 }
714 
715 #ifdef BCMDBG_MEM
716 /* In BCMDBG_MEM configurations osl_vmalloc is only used internally in
717  * the implementation of osl_debug_vmalloc.  Because we are using the GCC
718  * -Wstrict-prototypes compile option, we must always have a prototype
719  * for a global/external function.  So make osl_vmalloc static in
720  * the BCMDBG_MEM case.
721  */
722 static
723 #endif
724 void *
osl_vmalloc(osl_t * osh,uint size)725 osl_vmalloc(osl_t *osh, uint size)
726 {
727 	void *addr;
728 
729 	/* only ASSERT if osh is defined */
730 	if (osh)
731 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
732 	if ((addr = vmalloc(size)) == NULL) {
733 		if (osh)
734 			osh->failed++;
735 		return (NULL);
736 	}
737 	if (osh && osh->cmn)
738 		atomic_add(size, &osh->cmn->malloced);
739 
740 	return (addr);
741 }
742 
743 #ifndef BCMDBG_MEM
744 void *
osl_vmallocz(osl_t * osh,uint size)745 osl_vmallocz(osl_t *osh, uint size)
746 {
747 	void *ptr;
748 
749 	ptr = osl_vmalloc(osh, size);
750 
751 	if (ptr != NULL) {
752 		bzero(ptr, size);
753 	}
754 
755 	return ptr;
756 }
757 #endif
758 
759 #ifdef BCMDBG_MEM
760 /* In BCMDBG_MEM configurations osl_vmfree is only used internally in
761  * the implementation of osl_debug_vmfree.  Because we are using the GCC
762  * -Wstrict-prototypes compile option, we must always have a prototype
763  * for a global/external function.  So make osl_vmfree static in
764  * the BCMDBG_MEM case.
765  */
766 static
767 #endif
768 void
osl_vmfree(osl_t * osh,void * addr,uint size)769 osl_vmfree(osl_t *osh, void *addr, uint size)
770 {
771 	if (osh && osh->cmn) {
772 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
773 
774 		ASSERT(size <= osl_malloced(osh));
775 
776 		atomic_sub(size, &osh->cmn->malloced);
777 	}
778 	vfree(addr);
779 }
780 
781 uint
osl_check_memleak(osl_t * osh)782 osl_check_memleak(osl_t *osh)
783 {
784 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
785 	if (atomic_read(&osh->cmn->refcount) == 1)
786 		return (atomic_read(&osh->cmn->malloced));
787 	else
788 		return 0;
789 }
790 
791 uint
osl_malloced(osl_t * osh)792 osl_malloced(osl_t *osh)
793 {
794 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
795 	return (atomic_read(&osh->cmn->malloced));
796 }
797 
798 uint
osl_malloc_failed(osl_t * osh)799 osl_malloc_failed(osl_t *osh)
800 {
801 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
802 	return (osh->failed);
803 }
804 
805 #ifdef BCMDBG_MEM
806 void *
osl_debug_malloc(osl_t * osh,uint size,int line,const char * file)807 osl_debug_malloc(osl_t *osh, uint size, int line, const char* file)
808 {
809 	bcm_mem_link_t *p;
810 	const char* basename;
811 	unsigned long flags = 0;
812 	if (!size) {
813 		printf("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
814 		ASSERT(0);
815 	}
816 
817 	if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
818 		return (NULL);
819 	}
820 
821 	if (osh) {
822 		OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
823 	}
824 
825 	p->size = size;
826 	p->line = line;
827 	p->osh = (void *)osh;
828 
829 	basename = strrchr(file, '/');
830 	/* skip the '/' */
831 	if (basename)
832 		basename++;
833 
834 	if (!basename)
835 		basename = file;
836 
837 	strlcpy(p->file, basename, sizeof(p->file));
838 
839 	/* link this block */
840 	if (osh) {
841 		p->prev = NULL;
842 		p->next = osh->cmn->dbgmem_list;
843 		if (p->next)
844 			p->next->prev = p;
845 		osh->cmn->dbgmem_list = p;
846 		OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
847 	}
848 
849 	return p + 1;
850 }
851 
852 void *
osl_debug_mallocz(osl_t * osh,uint size,int line,const char * file)853 osl_debug_mallocz(osl_t *osh, uint size, int line, const char* file)
854 {
855 	void *ptr;
856 
857 	ptr = osl_debug_malloc(osh, size, line, file);
858 
859 	if (ptr != NULL) {
860 		bzero(ptr, size);
861 	}
862 
863 	return ptr;
864 }
865 
866 void
osl_debug_mfree(osl_t * osh,void * addr,uint size,int line,const char * file)867 osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, const char* file)
868 {
869 	bcm_mem_link_t *p;
870 	unsigned long flags = 0;
871 
872 	ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
873 
874 	if (addr == NULL) {
875 		return;
876 	}
877 
878 	p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
879 	if (p->size == 0) {
880 		printf("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
881 			addr, size, line, file);
882 		prhex("bcm_mem_link_t", (void *)p, sizeof(*p));
883 		ASSERT(p->size);
884 		return;
885 	}
886 
887 	if (p->size != size) {
888 		printf("%s: dealloca size does not match alloc size\n", __FUNCTION__);
889 		printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
890 		printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
891 		prhex("bcm_mem_link_t", (void *)p, sizeof(*p));
892 		ASSERT(p->size == size);
893 		return;
894 	}
895 
896 	if (osh && ((osl_t*)p->osh)->cmn != osh->cmn) {
897 		printf("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
898 			((osl_t*)p->osh)->cmn, osh->cmn);
899 		printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
900 		printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
901 		prhex("bcm_mem_link_t", (void *)p, sizeof(*p));
902 		ASSERT(((osl_t*)p->osh)->cmn == osh->cmn);
903 		return;
904 	}
905 
906 	/* unlink this block */
907 	if (osh && osh->cmn) {
908 		OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
909 		if (p->prev)
910 			p->prev->next = p->next;
911 		if (p->next)
912 			p->next->prev = p->prev;
913 		if (osh->cmn->dbgmem_list == p)
914 			osh->cmn->dbgmem_list = p->next;
915 		p->next = p->prev = NULL;
916 	}
917 	p->size = 0;
918 
919 	if (osh && osh->cmn) {
920 		OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
921 	}
922 	osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
923 }
924 
925 void *
osl_debug_vmalloc(osl_t * osh,uint size,int line,const char * file)926 osl_debug_vmalloc(osl_t *osh, uint size, int line, const char* file)
927 {
928 	bcm_mem_link_t *p;
929 	const char* basename;
930 	unsigned long flags = 0;
931 	if (!size) {
932 		printf("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
933 		ASSERT(0);
934 	}
935 
936 	if ((p = (bcm_mem_link_t*)osl_vmalloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
937 		return (NULL);
938 	}
939 
940 	if (osh) {
941 		OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
942 	}
943 
944 	p->size = size;
945 	p->line = line;
946 	p->osh = (void *)osh;
947 
948 	basename = strrchr(file, '/');
949 	/* skip the '/' */
950 	if (basename)
951 		basename++;
952 
953 	if (!basename)
954 		basename = file;
955 
956 	strlcpy(p->file, basename, sizeof(p->file));
957 
958 	/* link this block */
959 	if (osh) {
960 		p->prev = NULL;
961 		p->next = osh->cmn->dbgvmem_list;
962 		if (p->next)
963 			p->next->prev = p;
964 		osh->cmn->dbgvmem_list = p;
965 		OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
966 	}
967 
968 	return p + 1;
969 }
970 
971 void *
osl_debug_vmallocz(osl_t * osh,uint size,int line,const char * file)972 osl_debug_vmallocz(osl_t *osh, uint size, int line, const char* file)
973 {
974 	void *ptr;
975 
976 	ptr = osl_debug_vmalloc(osh, size, line, file);
977 
978 	if (ptr != NULL) {
979 		bzero(ptr, size);
980 	}
981 
982 	return ptr;
983 }
984 
985 void
osl_debug_vmfree(osl_t * osh,void * addr,uint size,int line,const char * file)986 osl_debug_vmfree(osl_t *osh, void *addr, uint size, int line, const char* file)
987 {
988 	bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
989 	unsigned long flags = 0;
990 
991 	ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
992 
993 	if (p->size == 0) {
994 		printf("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
995 			addr, size, line, file);
996 		ASSERT(p->size);
997 		return;
998 	}
999 
1000 	if (p->size != size) {
1001 		printf("%s: dealloca size does not match alloc size\n", __FUNCTION__);
1002 		printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
1003 		printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
1004 		ASSERT(p->size == size);
1005 		return;
1006 	}
1007 
1008 	if (osh && ((osl_t*)p->osh)->cmn != osh->cmn) {
1009 		printf("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
1010 			((osl_t*)p->osh)->cmn, osh->cmn);
1011 		printf("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
1012 		printf("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
1013 		ASSERT(((osl_t*)p->osh)->cmn == osh->cmn);
1014 		return;
1015 	}
1016 
1017 	/* unlink this block */
1018 	if (osh && osh->cmn) {
1019 		OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
1020 		if (p->prev)
1021 			p->prev->next = p->next;
1022 		if (p->next)
1023 			p->next->prev = p->prev;
1024 		if (osh->cmn->dbgvmem_list == p)
1025 			osh->cmn->dbgvmem_list = p->next;
1026 		p->next = p->prev = NULL;
1027 	}
1028 	p->size = 0;
1029 
1030 	if (osh && osh->cmn) {
1031 		OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
1032 	}
1033 	osl_vmfree(osh, p, size + sizeof(bcm_mem_link_t));
1034 }
1035 
1036 int
osl_debug_memdump(osl_t * osh,struct bcmstrbuf * b)1037 osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b)
1038 {
1039 	bcm_mem_link_t *p;
1040 	unsigned long flags = 0;
1041 
1042 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1043 
1044 	OSL_MEMLIST_LOCK(&osh->cmn->dbgmem_lock, flags);
1045 
1046 	if (osl_check_memleak(osh) && osh->cmn->dbgmem_list) {
1047 		if (b != NULL)
1048 			bcm_bprintf(b, "   Address   Size File:line\n");
1049 		else
1050 			printf("   Address   Size File:line\n");
1051 
1052 		for (p = osh->cmn->dbgmem_list; p; p = p->next) {
1053 			if (b != NULL)
1054 				bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
1055 					p->size, p->file, p->line);
1056 			else
1057 				printk("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
1058 					p->size, p->file, p->line);
1059 
1060 			/* Detects loop-to-self so we don't enter infinite loop */
1061 			if (p == p->next) {
1062 				if (b != NULL)
1063 					bcm_bprintf(b, "WARNING: loop-to-self "
1064 						"p %p p->next %p\n", p, p->next);
1065 				else
1066 					printk("WARNING: loop-to-self "
1067 						"p %p p->next %p\n", p, p->next);
1068 
1069 				break;
1070 			}
1071 		}
1072 	}
1073 	if (osl_check_memleak(osh) && osh->cmn->dbgvmem_list) {
1074 		if (b != NULL)
1075 			bcm_bprintf(b, "Vmem\n   Address   Size File:line\n");
1076 		else
1077 			printf("Vmem\n   Address   Size File:line\n");
1078 
1079 		for (p = osh->cmn->dbgvmem_list; p; p = p->next) {
1080 			if (b != NULL)
1081 				bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
1082 					p->size, p->file, p->line);
1083 			else
1084 				printk("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
1085 					p->size, p->file, p->line);
1086 
1087 			/* Detects loop-to-self so we don't enter infinite loop */
1088 			if (p == p->next) {
1089 				if (b != NULL)
1090 					bcm_bprintf(b, "WARNING: loop-to-self "
1091 						"p %p p->next %p\n", p, p->next);
1092 				else
1093 					printk("WARNING: loop-to-self "
1094 						"p %p p->next %p\n", p, p->next);
1095 
1096 				break;
1097 			}
1098 		}
1099 	}
1100 
1101 	OSL_MEMLIST_UNLOCK(&osh->cmn->dbgmem_lock, flags);
1102 
1103 	return 0;
1104 }
1105 
1106 #endif	/* BCMDBG_MEM */
1107 
1108 uint
osl_dma_consistent_align(void)1109 osl_dma_consistent_align(void)
1110 {
1111 	return (PAGE_SIZE);
1112 }
1113 
1114 void*
osl_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,uint * alloced,dmaaddr_t * pap)1115 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
1116 {
1117 	void *va;
1118 	uint16 align = (1 << align_bits);
1119 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1120 
1121 	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1122 		size += align;
1123 	*alloced = size;
1124 
1125 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
1126 	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1127 	if (va)
1128 		*pap = (ulong)__virt_to_phys((ulong)va);
1129 #else
1130 	{
1131 		dma_addr_t pap_lin;
1132 		struct pci_dev *hwdev = osh->pdev;
1133 		gfp_t flags;
1134 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
1135 		flags = GFP_ATOMIC;
1136 #else
1137 		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
1138 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
1139 #ifdef DHD_ALLOC_COHERENT_MEM_WITH_GFP_COMP
1140 		flags |= __GFP_COMP;
1141 #endif /* DHD_ALLOC_COHERENT_MEM_WITH_GFP_COMP */
1142 		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
1143 #ifdef BCMDMA64OSL
1144 		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
1145 		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
1146 #else
1147 		*pap = (dmaaddr_t)pap_lin;
1148 #endif /* BCMDMA64OSL */
1149 	}
1150 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1151 
1152 	return va;
1153 }
1154 
1155 void
osl_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)1156 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1157 {
1158 #ifdef BCMDMA64OSL
1159 	dma_addr_t paddr;
1160 #endif /* BCMDMA64OSL */
1161 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1162 
1163 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
1164 	kfree(va);
1165 #else
1166 #ifdef BCMDMA64OSL
1167 	PHYSADDRTOULONG(pa, paddr);
1168 	pci_free_consistent(osh->pdev, size, va, paddr);
1169 #else
1170 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1171 #endif /* BCMDMA64OSL */
1172 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
1173 }
1174 
1175 void *
osl_virt_to_phys(void * va)1176 osl_virt_to_phys(void *va)
1177 {
1178 	return (void *)(uintptr)virt_to_phys(va);
1179 }
1180 
1181 #include <asm/cacheflush.h>
1182 void
BCMFASTPATH(osl_dma_flush)1183 BCMFASTPATH(osl_dma_flush)(osl_t *osh, void *va, uint size, int direction, void *p,
1184 	hnddma_seg_map_t *dmah)
1185 {
1186 	return;
1187 }
1188 
1189 dmaaddr_t
BCMFASTPATH(osl_dma_map)1190 BCMFASTPATH(osl_dma_map)(osl_t *osh, void *va, uint size, int direction, void *p,
1191 	hnddma_seg_map_t *dmah)
1192 {
1193 	int dir;
1194 	dmaaddr_t ret_addr;
1195 	dma_addr_t map_addr;
1196 	int ret;
1197 
1198 	DMA_LOCK(osh);
1199 
1200 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1201 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1202 
1203 	map_addr = pci_map_single(osh->pdev, va, size, dir);
1204 
1205 	ret = pci_dma_mapping_error(osh->pdev, map_addr);
1206 
1207 	if (ret) {
1208 		printf("%s: Failed to map memory\n", __FUNCTION__);
1209 		PHYSADDRLOSET(ret_addr, 0);
1210 		PHYSADDRHISET(ret_addr, 0);
1211 	} else {
1212 		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1213 		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1214 	}
1215 
1216 #ifdef DHD_MAP_LOGGING
1217 	osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
1218 #endif /* DHD_MAP_LOGGING */
1219 
1220 	DMA_UNLOCK(osh);
1221 
1222 	return ret_addr;
1223 }
1224 
1225 void
BCMFASTPATH(osl_dma_unmap)1226 BCMFASTPATH(osl_dma_unmap)(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1227 {
1228 	int dir;
1229 #ifdef BCMDMA64OSL
1230 	dma_addr_t paddr;
1231 #endif /* BCMDMA64OSL */
1232 
1233 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1234 
1235 	DMA_LOCK(osh);
1236 
1237 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1238 
1239 #ifdef DHD_MAP_LOGGING
1240 	osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
1241 #endif /* DHD_MAP_LOGGING */
1242 
1243 #ifdef BCMDMA64OSL
1244 	PHYSADDRTOULONG(pa, paddr);
1245 	pci_unmap_single(osh->pdev, paddr, size, dir);
1246 #else /* BCMDMA64OSL */
1247 	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1248 #endif /* BCMDMA64OSL */
1249 
1250 	DMA_UNLOCK(osh);
1251 }
1252 
1253 /* OSL function for CPU relax */
1254 inline void
BCMFASTPATH(osl_cpu_relax)1255 BCMFASTPATH(osl_cpu_relax)(void)
1256 {
1257 	cpu_relax();
1258 }
1259 
osl_preempt_disable(osl_t * osh)1260 extern void osl_preempt_disable(osl_t *osh)
1261 {
1262 	preempt_disable();
1263 }
1264 
osl_preempt_enable(osl_t * osh)1265 extern void osl_preempt_enable(osl_t *osh)
1266 {
1267 	preempt_enable();
1268 }
1269 
1270 #if defined(BCMDBG_ASSERT) || defined(BCMASSERT_LOG)
1271 void
osl_assert(const char * exp,const char * file,int line)1272 osl_assert(const char *exp, const char *file, int line)
1273 {
1274 	char tempbuf[256];
1275 	const char *basename;
1276 
1277 	basename = strrchr(file, '/');
1278 	/* skip the '/' */
1279 	if (basename)
1280 		basename++;
1281 
1282 	if (!basename)
1283 		basename = file;
1284 
1285 #ifdef BCMASSERT_LOG
1286 	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1287 		exp, basename, line);
1288 #ifndef OEM_ANDROID
1289 	bcm_assert_log(tempbuf);
1290 #endif /* OEM_ANDROID */
1291 #endif /* BCMASSERT_LOG */
1292 
1293 #ifdef BCMDBG_ASSERT
1294 	snprintf(tempbuf, 256, "assertion \"%s\" failed: file \"%s\", line %d\n",
1295 		exp, basename, line);
1296 
1297 	/* Print assert message and give it time to be written to /var/log/messages */
1298 	if (!in_interrupt() && g_assert_type != 1 && g_assert_type != 3) {
1299 		const int delay = 3;
1300 		printf("%s", tempbuf);
1301 		printf("panic in %d seconds\n", delay);
1302 		set_current_state(TASK_INTERRUPTIBLE);
1303 		schedule_timeout(delay * HZ);
1304 	}
1305 #endif /* BCMDBG_ASSERT */
1306 
1307 	switch (g_assert_type) {
1308 	case 0:
1309 		printf("%s", tempbuf);
1310 		BUG();
1311 		break;
1312 	case 1:
1313 		/* fall through */
1314 	case 3:
1315 		printf("%s", tempbuf);
1316 		break;
1317 	case 2:
1318 		printf("%s", tempbuf);
1319 		BUG();
1320 		break;
1321 	default:
1322 		break;
1323 	}
1324 }
1325 #endif /* BCMDBG_ASSERT || BCMASSERT_LOG */
1326 
1327 void
osl_delay(uint usec)1328 osl_delay(uint usec)
1329 {
1330 	uint d;
1331 
1332 #ifdef BCMSLTGT
1333 	usec *= htclkratio;
1334 #endif
1335 
1336 	while (usec > 0) {
1337 		d = MIN(usec, 1000);
1338 		udelay(d);
1339 		usec -= d;
1340 	}
1341 }
1342 
1343 void
osl_sleep(uint ms)1344 osl_sleep(uint ms)
1345 {
1346 #ifdef BCMSLTGT
1347 	ms *= htclkratio;
1348 #endif
1349 
1350 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1351 	if (ms < 20)
1352 		usleep_range(ms*1000, ms*1000 + 1000);
1353 	else
1354 #endif
1355 		msleep(ms);
1356 }
1357 
1358 uint64
osl_sysuptime_us(void)1359 osl_sysuptime_us(void)
1360 {
1361 	struct osl_timespec tv;
1362 	uint64 usec;
1363 
1364 	osl_do_gettimeofday(&tv);
1365 	/* tv_usec content is fraction of a second */
1366 	usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1367 #ifdef BCMSLTGT
1368 	/* scale down the time to match the slow target roughly */
1369 	usec /= htclkratio;
1370 #endif
1371 	return usec;
1372 }
1373 
1374 uint64
osl_localtime_ns(void)1375 osl_localtime_ns(void)
1376 {
1377 	uint64 ts_nsec = 0;
1378 
1379 #ifdef BCMDONGLEHOST
1380 	/* Some Linux based platform cannot use local_clock()
1381 	 * since it is defined by EXPORT_SYMBOL_GPL().
1382 	 * GPL-incompatible module (NIC builds wl.ko)
1383 	 * cannnot use the GPL-only symbol.
1384 	 */
1385 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1386 	ts_nsec = local_clock();
1387 #else
1388 	ts_nsec = cpu_clock(smp_processor_id());
1389 #endif
1390 #endif /* BCMDONGLEHOST */
1391 	return ts_nsec;
1392 }
1393 
1394 void
osl_get_localtime(uint64 * sec,uint64 * usec)1395 osl_get_localtime(uint64 *sec, uint64 *usec)
1396 {
1397 	uint64 ts_nsec = 0;
1398 	unsigned long rem_nsec = 0;
1399 
1400 #ifdef BCMDONGLEHOST
1401 	/* Some Linux based platform cannot use local_clock()
1402 	 * since it is defined by EXPORT_SYMBOL_GPL().
1403 	 * GPL-incompatible module (NIC builds wl.ko) can
1404 	 * not use the GPL-only symbol.
1405 	 */
1406 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1407 	ts_nsec = local_clock();
1408 #else
1409 	ts_nsec = cpu_clock(smp_processor_id());
1410 #endif
1411 	rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
1412 #endif /* BCMDONGLEHOST */
1413 	*sec = (uint64)ts_nsec;
1414 	*usec = (uint64)(rem_nsec / MSEC_PER_SEC);
1415 }
1416 
1417 uint64
osl_systztime_us(void)1418 osl_systztime_us(void)
1419 {
1420 	struct osl_timespec tv;
1421 	uint64 tzusec;
1422 
1423 	osl_do_gettimeofday(&tv);
1424 	/* apply timezone */
1425 	tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
1426 		USEC_PER_SEC);
1427 	tzusec += tv.tv_usec;
1428 
1429 	return tzusec;
1430 }
1431 
1432 /*
1433  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1434  */
1435 #ifdef OSLREGOPS
1436 uint8
osl_readb(osl_t * osh,volatile uint8 * r)1437 osl_readb(osl_t *osh, volatile uint8 *r)
1438 {
1439 	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1440 	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1441 
1442 	return (uint8)((rreg)(ctx, (volatile void*)r, sizeof(uint8)));
1443 }
1444 
1445 uint16
osl_readw(osl_t * osh,volatile uint16 * r)1446 osl_readw(osl_t *osh, volatile uint16 *r)
1447 {
1448 	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1449 	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1450 
1451 	return (uint16)((rreg)(ctx, (volatile void*)r, sizeof(uint16)));
1452 }
1453 
1454 uint32
osl_readl(osl_t * osh,volatile uint32 * r)1455 osl_readl(osl_t *osh, volatile uint32 *r)
1456 {
1457 	osl_rreg_fn_t rreg	= ((osl_pubinfo_t*)osh)->rreg_fn;
1458 	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1459 
1460 	return (uint32)((rreg)(ctx, (volatile void*)r, sizeof(uint32)));
1461 }
1462 
1463 void
osl_writeb(osl_t * osh,volatile uint8 * r,uint8 v)1464 osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
1465 {
1466 	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1467 	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1468 
1469 	((wreg)(ctx, (volatile void*)r, v, sizeof(uint8)));
1470 }
1471 
1472 void
osl_writew(osl_t * osh,volatile uint16 * r,uint16 v)1473 osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
1474 {
1475 	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1476 	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1477 
1478 	((wreg)(ctx, (volatile void*)r, v, sizeof(uint16)));
1479 }
1480 
1481 void
osl_writel(osl_t * osh,volatile uint32 * r,uint32 v)1482 osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
1483 {
1484 	osl_wreg_fn_t wreg	= ((osl_pubinfo_t*)osh)->wreg_fn;
1485 	void *ctx		= ((osl_pubinfo_t*)osh)->reg_ctx;
1486 
1487 	((wreg)(ctx, (volatile void*)r, v, sizeof(uint32)));
1488 }
1489 #endif /* OSLREGOPS */
1490 
1491 /*
1492  * BINOSL selects the slightly slower function-call-based binary compatible osl.
1493  */
1494 #ifdef BINOSL
1495 
1496 uint32
osl_sysuptime(void)1497 osl_sysuptime(void)
1498 {
1499 	uint32 msec = ((uint32)jiffies * (1000 / HZ));
1500 #ifdef BCMSLTGT
1501 	/* scale down the time to match the slow target roughly */
1502 	msec /= htclkratio;
1503 #endif
1504 	return msec;
1505 }
1506 
1507 int
osl_printf(const char * format,...)1508 osl_printf(const char *format, ...)
1509 {
1510 	va_list args;
1511 	static char printbuf[1024];
1512 	int len;
1513 
1514 	/* sprintf into a local buffer because there *is* no "vprintk()".. */
1515 	va_start(args, format);
1516 	len = vsnprintf(printbuf, 1024, format, args);
1517 	va_end(args);
1518 
1519 	if (len > sizeof(printbuf)) {
1520 		printf("osl_printf: buffer overrun\n");
1521 		return (0);
1522 	}
1523 
1524 	return (printf("%s", printbuf));
1525 }
1526 
1527 int
osl_sprintf(char * buf,const char * format,...)1528 osl_sprintf(char *buf, const char *format, ...)
1529 {
1530 	va_list args;
1531 	int rc;
1532 
1533 	va_start(args, format);
1534 	rc = vsprintf(buf, format, args);
1535 	va_end(args);
1536 	return (rc);
1537 }
1538 
1539 int
osl_snprintf(char * buf,size_t n,const char * format,...)1540 osl_snprintf(char *buf, size_t n, const char *format, ...)
1541 {
1542 	va_list args;
1543 	int rc;
1544 
1545 	va_start(args, format);
1546 	rc = vsnprintf(buf, n, format, args);
1547 	va_end(args);
1548 	return (rc);
1549 }
1550 
1551 int
osl_vsprintf(char * buf,const char * format,va_list ap)1552 osl_vsprintf(char *buf, const char *format, va_list ap)
1553 {
1554 	return (vsprintf(buf, format, ap));
1555 }
1556 
1557 int
osl_vsnprintf(char * buf,size_t n,const char * format,va_list ap)1558 osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
1559 {
1560 	return (vsnprintf(buf, n, format, ap));
1561 }
1562 
1563 int
osl_strcmp(const char * s1,const char * s2)1564 osl_strcmp(const char *s1, const char *s2)
1565 {
1566 	return (strcmp(s1, s2));
1567 }
1568 
1569 int
osl_strncmp(const char * s1,const char * s2,uint n)1570 osl_strncmp(const char *s1, const char *s2, uint n)
1571 {
1572 	return (strncmp(s1, s2, n));
1573 }
1574 
1575 int
osl_strlen(const char * s)1576 osl_strlen(const char *s)
1577 {
1578 	return (strlen(s));
1579 }
1580 
1581 char*
osl_strcpy(char * d,const char * s)1582 osl_strcpy(char *d, const char *s)
1583 {
1584 	return (strcpy(d, s));
1585 }
1586 
1587 char*
osl_strncpy(char * d,const char * s,uint n)1588 osl_strncpy(char *d, const char *s, uint n)
1589 {
1590 	return (strlcpy(d, s, n));
1591 }
1592 
1593 char*
osl_strchr(const char * s,int c)1594 osl_strchr(const char *s, int c)
1595 {
1596 	return (strchr(s, c));
1597 }
1598 
1599 char*
osl_strrchr(const char * s,int c)1600 osl_strrchr(const char *s, int c)
1601 {
1602 	return (strrchr(s, c));
1603 }
1604 
1605 void*
osl_memset(void * d,int c,size_t n)1606 osl_memset(void *d, int c, size_t n)
1607 {
1608 	return memset(d, c, n);
1609 }
1610 
1611 void*
osl_memcpy(void * d,const void * s,size_t n)1612 osl_memcpy(void *d, const void *s, size_t n)
1613 {
1614 	return memcpy(d, s, n);
1615 }
1616 
1617 void*
osl_memmove(void * d,const void * s,size_t n)1618 osl_memmove(void *d, const void *s, size_t n)
1619 {
1620 	return memmove(d, s, n);
1621 }
1622 
1623 int
osl_memcmp(const void * s1,const void * s2,size_t n)1624 osl_memcmp(const void *s1, const void *s2, size_t n)
1625 {
1626 	return memcmp(s1, s2, n);
1627 }
1628 
1629 uint32
osl_readl(volatile uint32 * r)1630 osl_readl(volatile uint32 *r)
1631 {
1632 	return (readl(r));
1633 }
1634 
1635 uint16
osl_readw(volatile uint16 * r)1636 osl_readw(volatile uint16 *r)
1637 {
1638 	return (readw(r));
1639 }
1640 
1641 uint8
osl_readb(volatile uint8 * r)1642 osl_readb(volatile uint8 *r)
1643 {
1644 	return (readb(r));
1645 }
1646 
1647 void
osl_writel(uint32 v,volatile uint32 * r)1648 osl_writel(uint32 v, volatile uint32 *r)
1649 {
1650 	writel(v, r);
1651 }
1652 
1653 void
osl_writew(uint16 v,volatile uint16 * r)1654 osl_writew(uint16 v, volatile uint16 *r)
1655 {
1656 	writew(v, r);
1657 }
1658 
1659 void
osl_writeb(uint8 v,volatile uint8 * r)1660 osl_writeb(uint8 v, volatile uint8 *r)
1661 {
1662 	writeb(v, r);
1663 }
1664 
1665 void *
osl_uncached(void * va)1666 osl_uncached(void *va)
1667 {
1668 	return ((void*)va);
1669 }
1670 
1671 void *
osl_cached(void * va)1672 osl_cached(void *va)
1673 {
1674 	return ((void*)va);
1675 }
1676 
1677 uint
osl_getcycles(void)1678 osl_getcycles(void)
1679 {
1680 	uint cycles;
1681 
1682 #if defined(__i386__)
1683 	rdtscl(cycles);
1684 #else
1685 	cycles = 0;
1686 #endif /* __i386__ */
1687 	return cycles;
1688 }
1689 
1690 void *
osl_reg_map(uint32 pa,uint size)1691 osl_reg_map(uint32 pa, uint size)
1692 {
1693 	return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
1694 }
1695 
1696 void
osl_reg_unmap(void * va)1697 osl_reg_unmap(void *va)
1698 {
1699 	iounmap(va);
1700 }
1701 
1702 int
osl_busprobe(uint32 * val,uint32 addr)1703 osl_busprobe(uint32 *val, uint32 addr)
1704 {
1705 	*val = readl((uint32 *)(uintptr)addr);
1706 
1707 	return 0;
1708 }
1709 #endif	/* BINOSL */
1710 
1711 uint32
osl_rand(void)1712 osl_rand(void)
1713 {
1714 	uint32 rand;
1715 
1716 	get_random_bytes(&rand, sizeof(rand));
1717 
1718 	return rand;
1719 }
1720 
1721 /* Linux Kernel: File Operations: start */
1722 void *
osl_os_open_image(char * filename)1723 osl_os_open_image(char *filename)
1724 {
1725 	struct file *fp;
1726 
1727 	fp = filp_open(filename, O_RDONLY, 0);
1728 	/*
1729 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1730 	 * Alternative:
1731 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1732 	 * ???
1733 	 */
1734 	if (IS_ERR(fp)) {
1735 		printf("ERROR %ld: Unable to open file %s\n", PTR_ERR(fp), filename);
1736 		fp = NULL;
1737 	}
1738 
1739 	return fp;
1740 }
1741 
1742 int
osl_os_get_image_block(char * buf,int len,void * image)1743 osl_os_get_image_block(char *buf, int len, void *image)
1744 {
1745 	struct file *fp = (struct file *)image;
1746 	int rdlen;
1747 
1748 	if (fp == NULL) {
1749 		return 0;
1750 	}
1751 
1752 	rdlen = kernel_read_compat(fp, fp->f_pos, buf, len);
1753 	if (rdlen > 0) {
1754 		fp->f_pos += rdlen;
1755 	}
1756 
1757 	return rdlen;
1758 }
1759 
1760 void
osl_os_close_image(void * image)1761 osl_os_close_image(void *image)
1762 {
1763 	struct file *fp = (struct file *)image;
1764 
1765 	if (fp != NULL) {
1766 		filp_close(fp, NULL);
1767 	}
1768 }
1769 
1770 int
osl_os_image_size(void * image)1771 osl_os_image_size(void *image)
1772 {
1773 	int len = 0, curroffset;
1774 
1775 	if (image) {
1776 		/* store the current offset */
1777 		curroffset = generic_file_llseek(image, 0, 1);
1778 		/* goto end of file to get length */
1779 		len = generic_file_llseek(image, 0, 2);
1780 		/* restore back the offset */
1781 		generic_file_llseek(image, curroffset, 0);
1782 	}
1783 	return len;
1784 }
1785 
1786 /* Linux Kernel: File Operations: end */
1787 
1788 #if defined(AXI_TIMEOUTS_NIC)
osl_bpt_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1789 inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1790 {
1791 	bool poll_timeout = FALSE;
1792 	static int in_si_clear = FALSE;
1793 
1794 	switch (size) {
1795 	case sizeof(uint8):
1796 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1797 		if (*(volatile uint8*)v == 0xff)
1798 			poll_timeout = TRUE;
1799 		break;
1800 	case sizeof(uint16):
1801 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1802 		if (*(volatile uint16*)v == 0xffff)
1803 			poll_timeout = TRUE;
1804 		break;
1805 	case sizeof(uint32):
1806 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1807 		if (*(volatile uint32*)v == 0xffffffff)
1808 			poll_timeout = TRUE;
1809 		break;
1810 	case sizeof(uint64):
1811 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1812 		if (*(volatile uint64*)v == 0xffffffffffffffff)
1813 			poll_timeout = TRUE;
1814 		break;
1815 	}
1816 
1817 	if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
1818 		in_si_clear = TRUE;
1819 		osh->bpt_cb((void *)osh->sih, (void *)addr);
1820 		in_si_clear = FALSE;
1821 	}
1822 }
1823 #endif /* AXI_TIMEOUTS_NIC */
1824 
1825 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1826 void
timer_cb_compat(struct timer_list * tl)1827 timer_cb_compat(struct timer_list *tl)
1828 {
1829 	timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
1830 	t->callback((ulong)t->arg);
1831 }
1832 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
1833 
1834 /* timer apis */
1835 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1836 
1837 osl_timer_t *
osl_timer_init(osl_t * osh,const char * name,void (* fn)(void * arg),void * arg)1838 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
1839 {
1840 	osl_timer_t *t;
1841 	BCM_REFERENCE(fn);
1842 	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1843 		printf(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1844 			(int)sizeof(osl_timer_t));
1845 		return (NULL);
1846 	}
1847 	bzero(t, sizeof(osl_timer_t));
1848 	if ((t->timer = MALLOCZ(NULL, sizeof(timer_list_compat_t))) == NULL) {
1849 		printf("osl_timer_init: malloc failed\n");
1850 		MFREE(NULL, t, sizeof(osl_timer_t));
1851 		return (NULL);
1852 	}
1853 
1854 	t->set = TRUE;
1855 #ifdef BCMDBG
1856 	if ((t->name = MALLOCZ(NULL, strlen(name) + 1)) != NULL) {
1857 		strcpy(t->name, name);
1858 	}
1859 #endif
1860 
1861 	init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
1862 
1863 	return (t);
1864 }
1865 
1866 void
osl_timer_add(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1867 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1868 {
1869 	if (t == NULL) {
1870 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1871 		return;
1872 	}
1873 	ASSERT(!t->set);
1874 
1875 	t->set = TRUE;
1876 	if (periodic) {
1877 		printf("Periodic timers are not supported by Linux timer apis\n");
1878 	}
1879 #if defined(BCMSLTGT)
1880 	timer_expires(t->timer) = jiffies + ms*HZ/1000*htclkratio;
1881 #else
1882 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1883 #endif /* defined(BCMSLTGT) */
1884 
1885 	add_timer(t->timer);
1886 
1887 	return;
1888 }
1889 
1890 void
osl_timer_update(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1891 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1892 {
1893 	if (t == NULL) {
1894 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1895 		return;
1896 	}
1897 	if (periodic) {
1898 		printf("Periodic timers are not supported by Linux timer apis\n");
1899 	}
1900 	t->set = TRUE;
1901 #if defined(BCMSLTGT)
1902 	timer_expires(t->timer) = jiffies + ms*HZ/1000*htclkratio;
1903 #else
1904 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1905 #endif /* defined(BCMSLTGT) */
1906 
1907 	mod_timer(t->timer, timer_expires(t->timer));
1908 
1909 	return;
1910 }
1911 
1912 /*
1913  * Return TRUE if timer successfully deleted, FALSE if still pending
1914  */
1915 bool
osl_timer_del(osl_t * osh,osl_timer_t * t)1916 osl_timer_del(osl_t *osh, osl_timer_t *t)
1917 {
1918 	if (t == NULL) {
1919 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1920 		return (FALSE);
1921 	}
1922 	if (t->set) {
1923 		t->set = FALSE;
1924 		if (t->timer) {
1925 			del_timer(t->timer);
1926 			MFREE(NULL, t->timer, sizeof(struct timer_list));
1927 		}
1928 #ifdef BCMDBG
1929 		if (t->name) {
1930 			MFREE(NULL, t->name, strlen(t->name) + 1);
1931 		}
1932 #endif
1933 		MFREE(NULL, t, sizeof(osl_timer_t));
1934 	}
1935 	return (TRUE);
1936 }
1937 
1938 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1939 int
kernel_read_compat(struct file * file,loff_t offset,char * addr,unsigned long count)1940 kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
1941 {
1942 	if (!IS_ENABLED(CONFIG_NO_GKI))
1943 		return -EPERM;
1944 	return (int)kernel_read(file, addr, (size_t)count, &offset);
1945 }
1946 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1947 
1948 /* Linux specific multipurpose spinlock API */
1949 void *
osl_spin_lock_init(osl_t * osh)1950 osl_spin_lock_init(osl_t *osh)
1951 {
1952 	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
1953 	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
1954 	/* and this results in kernel asserts in internal builds */
1955 	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
1956 	if (lock)
1957 		spin_lock_init(lock);
1958 	return ((void *)lock);
1959 }
1960 void
osl_spin_lock_deinit(osl_t * osh,void * lock)1961 osl_spin_lock_deinit(osl_t *osh, void *lock)
1962 {
1963 	if (lock)
1964 		MFREE(osh, lock, sizeof(spinlock_t) + 4);
1965 }
1966 
1967 unsigned long
osl_spin_lock(void * lock)1968 osl_spin_lock(void *lock)
1969 {
1970 	unsigned long flags = 0;
1971 
1972 	if (lock) {
1973 #ifdef DHD_USE_SPIN_LOCK_BH
1974 		/* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
1975 		ASSERT(!in_irq());
1976 		spin_lock_bh((spinlock_t *)lock);
1977 #else
1978 		spin_lock_irqsave((spinlock_t *)lock, flags);
1979 #endif /* DHD_USE_SPIN_LOCK_BH */
1980 	}
1981 
1982 	return flags;
1983 }
1984 
1985 void
osl_spin_unlock(void * lock,unsigned long flags)1986 osl_spin_unlock(void *lock, unsigned long flags)
1987 {
1988 	if (lock) {
1989 #ifdef DHD_USE_SPIN_LOCK_BH
1990 		/* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
1991 		ASSERT(!in_irq());
1992 		spin_unlock_bh((spinlock_t *)lock);
1993 #else
1994 		spin_unlock_irqrestore((spinlock_t *)lock, flags);
1995 #endif /* DHD_USE_SPIN_LOCK_BH */
1996 	}
1997 }
1998 
1999 unsigned long
osl_spin_lock_irq(void * lock)2000 osl_spin_lock_irq(void *lock)
2001 {
2002 	unsigned long flags = 0;
2003 
2004 	if (lock)
2005 		spin_lock_irqsave((spinlock_t *)lock, flags);
2006 
2007 	return flags;
2008 }
2009 
2010 void
osl_spin_unlock_irq(void * lock,unsigned long flags)2011 osl_spin_unlock_irq(void *lock, unsigned long flags)
2012 {
2013 	if (lock)
2014 		spin_unlock_irqrestore((spinlock_t *)lock, flags);
2015 }
2016 
2017 unsigned long
osl_spin_lock_bh(void * lock)2018 osl_spin_lock_bh(void *lock)
2019 {
2020 	unsigned long flags = 0;
2021 
2022 	if (lock) {
2023 		/* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
2024 		ASSERT(!in_irq());
2025 		spin_lock_bh((spinlock_t *)lock);
2026 	}
2027 
2028 	return flags;
2029 }
2030 
2031 void
osl_spin_unlock_bh(void * lock,unsigned long flags)2032 osl_spin_unlock_bh(void *lock, unsigned long flags)
2033 {
2034 	if (lock) {
2035 		/* Calling spin_lock_bh with both irq and non-irq context will lead to deadlock */
2036 		ASSERT(!in_irq());
2037 		spin_unlock_bh((spinlock_t *)lock);
2038 	}
2039 }
2040 
2041 void *
osl_mutex_lock_init(osl_t * osh)2042 osl_mutex_lock_init(osl_t *osh)
2043 {
2044 	struct mutex *mtx = NULL;
2045 
2046 	mtx = MALLOCZ(osh, sizeof(*mtx));
2047 	if (mtx)
2048 		mutex_init(mtx);
2049 
2050 	return mtx;
2051 }
2052 
2053 void
osl_mutex_lock_deinit(osl_t * osh,void * mutex)2054 osl_mutex_lock_deinit(osl_t *osh, void *mutex)
2055 {
2056 	struct mutex *mtx = mutex;
2057 
2058 	if (mtx) {
2059 		mutex_destroy(mtx);
2060 		MFREE(osh, mtx, sizeof(struct mutex));
2061 	}
2062 }
2063 
2064 /* For mutex lock/unlock unsigned long flags is used,
2065  * this is to keep in sync with spin lock apis, so that
2066  * locks can be easily interchanged based on contexts
2067  */
2068 unsigned long
osl_mutex_lock(void * lock)2069 osl_mutex_lock(void *lock)
2070 {
2071 	if (lock)
2072 		mutex_lock((struct mutex *)lock);
2073 
2074 	return 0;
2075 }
2076 
2077 void
osl_mutex_unlock(void * lock,unsigned long flags)2078 osl_mutex_unlock(void *lock, unsigned long flags)
2079 {
2080 	if (lock)
2081 		mutex_unlock((struct mutex *)lock);
2082 	return;
2083 }
2084 
2085 #ifdef USE_DMA_LOCK
2086 static void
osl_dma_lock(osl_t * osh)2087 osl_dma_lock(osl_t *osh)
2088 {
2089 	/* The conditional check is to avoid the scheduling bug.
2090 	 * If the spin_lock_bh is used under the spin_lock_irqsave,
2091 	 * Kernel triggered the warning message as the spin_lock_irqsave
2092 	 * disables the interrupt and the spin_lock_bh doesn't use in case
2093 	 * interrupt is disabled.
2094 	 * Please refer to the __local_bh_enable_ip() function
2095 	 * in kernel/softirq.c to understand the condtion.
2096 	 */
2097 	if (likely(in_irq() || irqs_disabled())) {
2098 		spin_lock(&osh->dma_lock);
2099 	} else {
2100 		spin_lock_bh(&osh->dma_lock);
2101 		osh->dma_lock_bh = TRUE;
2102 	}
2103 }
2104 
2105 static void
osl_dma_unlock(osl_t * osh)2106 osl_dma_unlock(osl_t *osh)
2107 {
2108 	if (unlikely(osh->dma_lock_bh)) {
2109 		osh->dma_lock_bh = FALSE;
2110 		spin_unlock_bh(&osh->dma_lock);
2111 	} else {
2112 		spin_unlock(&osh->dma_lock);
2113 	}
2114 }
2115 
2116 static void
osl_dma_lock_init(osl_t * osh)2117 osl_dma_lock_init(osl_t *osh)
2118 {
2119 	spin_lock_init(&osh->dma_lock);
2120 	osh->dma_lock_bh = FALSE;
2121 }
2122 #endif /* USE_DMA_LOCK */
2123 
2124 void
osl_do_gettimeofday(struct osl_timespec * ts)2125 osl_do_gettimeofday(struct osl_timespec *ts)
2126 {
2127 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2128 	struct timespec64 curtime;
2129 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2130 	struct timespec curtime;
2131 #else
2132 	struct timeval curtime;
2133 #endif
2134 
2135 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2136 	ktime_get_real_ts64(&curtime);
2137 	ts->tv_nsec = curtime.tv_nsec;
2138 	ts->tv_usec	= curtime.tv_nsec / 1000;
2139 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2140 	getnstimeofday(&curtime);
2141 	ts->tv_nsec = curtime.tv_nsec;
2142 	ts->tv_usec = curtime.tv_nsec / 1000;
2143 #else
2144 	do_gettimeofday(&curtime);
2145 	ts->tv_usec = curtime.tv_usec;
2146 	ts->tv_nsec = curtime.tv_usec * 1000;
2147 #endif
2148 	ts->tv_sec = curtime.tv_sec;
2149 }
2150 
2151 uint32
osl_do_gettimediff(struct osl_timespec * cur_ts,struct osl_timespec * old_ts)2152 osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts)
2153 {
2154 	uint32 diff_s, diff_us, total_diff_us;
2155 	bool pgc_g = FALSE;
2156 
2157 	diff_s = (uint32)cur_ts->tv_sec - (uint32)old_ts->tv_sec;
2158 	pgc_g = (cur_ts->tv_usec > old_ts->tv_usec) ? TRUE : FALSE;
2159 	diff_us = pgc_g ? (cur_ts->tv_usec - old_ts->tv_usec) : (old_ts->tv_usec - cur_ts->tv_usec);
2160 	total_diff_us = pgc_g ? (diff_s * 1000000 + diff_us) : (diff_s * 1000000 - diff_us);
2161 	return total_diff_us;
2162 }
2163 
2164 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
2165 void
osl_get_monotonic_boottime(struct osl_timespec * ts)2166 osl_get_monotonic_boottime(struct osl_timespec *ts)
2167 {
2168 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2169 	struct timespec64 curtime;
2170 #else
2171 	struct timespec curtime;
2172 #endif
2173 
2174 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2175 	curtime = ktime_to_timespec64(ktime_get_boottime());
2176 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2177 	curtime = ktime_to_timespec(ktime_get_boottime());
2178 #else
2179 	get_monotonic_boottime(&curtime);
2180 #endif
2181 	ts->tv_sec = curtime.tv_sec;
2182 	ts->tv_nsec = curtime.tv_nsec;
2183 	ts->tv_usec = curtime.tv_nsec / 1000;
2184 }
2185 #endif
2186