xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_indep_power/include/linux_osl.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux OS Independent Layer
4  *
5  * Copyright (C) 1999-2017, Broadcom Corporation
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: linux_osl.h 672413 2016-11-28 11:13:23Z $
29  */
30 
31 #ifndef _linux_osl_h_
32 #define _linux_osl_h_
33 
34 #include <typedefs.h>
35 #define DECLSPEC_ALIGN(x)	__attribute__ ((aligned(x)))
36 
37 /* Linux Kernel: File Operations: start */
38 extern void * osl_os_open_image(char * filename);
39 extern int osl_os_get_image_block(char * buf, int len, void * image);
40 extern void osl_os_close_image(void * image);
41 extern int osl_os_image_size(void *image);
42 /* Linux Kernel: File Operations: end */
43 
44 #ifdef BCMDRIVER
45 
46 /* OSL initialization */
47 #ifdef SHARED_OSL_CMN
48 extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
49 #else
50 extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
51 #endif /* SHARED_OSL_CMN */
52 
53 extern void osl_detach(osl_t *osh);
54 extern int osl_static_mem_init(osl_t *osh, void *adapter);
55 extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
56 extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
57 extern void* osl_get_bus_handle(osl_t *osh);
58 
59 /* Global ASSERT type */
60 extern uint32 g_assert_type;
61 
62 #ifdef CONFIG_PHYS_ADDR_T_64BIT
63 #define PRI_FMT_x       "llx"
64 #define PRI_FMT_X       "llX"
65 #define PRI_FMT_o       "llo"
66 #define PRI_FMT_d       "lld"
67 #else
68 #define PRI_FMT_x       "x"
69 #define PRI_FMT_X       "X"
70 #define PRI_FMT_o       "o"
71 #define PRI_FMT_d       "d"
72 #endif /* CONFIG_PHYS_ADDR_T_64BIT */
73 /* ASSERT */
74 #if defined(BCMASSERT_LOG)
75 	#define ASSERT(exp) \
76 	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
77 extern void osl_assert(const char *exp, const char *file, int line);
78 #else
79 	#ifdef __GNUC__
80 		#define GCC_VERSION \
81 			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
82 		#if GCC_VERSION > 30100
83 			#define ASSERT(exp)	do {} while (0)
84 		#else
85 			/* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
86 			#define ASSERT(exp)
87 		#endif /* GCC_VERSION > 30100 */
88 	#endif /* __GNUC__ */
89 #endif
90 
91 /* bcm_prefetch_32B */
bcm_prefetch_32B(const uint8 * addr,const int cachelines_32B)92 static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
93 {
94 #if (defined(STB) && defined(__arm__)) && (__LINUX_ARM_ARCH__ >= 5)
95 	switch (cachelines_32B) {
96 		case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
97 		case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
98 		case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
99 		case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr +  0) : "cc");
100 	}
101 #endif
102 }
103 
104 /* microsecond delay */
105 #define	OSL_DELAY(usec)		osl_delay(usec)
106 extern void osl_delay(uint usec);
107 
108 #define OSL_SLEEP(ms)			osl_sleep(ms)
109 extern void osl_sleep(uint ms);
110 
111 #define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
112 	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
113 #define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
114 	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
115 extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
116 extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
117 
118 /* PCI configuration space access macros */
119 #define	OSL_PCI_READ_CONFIG(osh, offset, size) \
120 	osl_pci_read_config((osh), (offset), (size))
121 #define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
122 	osl_pci_write_config((osh), (offset), (size), (val))
123 extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
124 extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
125 
126 /* PCI device bus # and slot # */
127 #define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
128 #define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
129 #define OSL_PCIE_DOMAIN(osh)	osl_pcie_domain(osh)
130 #define OSL_PCIE_BUS(osh)	osl_pcie_bus(osh)
131 extern uint osl_pci_bus(osl_t *osh);
132 extern uint osl_pci_slot(osl_t *osh);
133 extern uint osl_pcie_domain(osl_t *osh);
134 extern uint osl_pcie_bus(osl_t *osh);
135 extern struct pci_dev *osl_pci_device(osl_t *osh);
136 
137 #define OSL_ACP_COHERENCE		(1<<1L)
138 #define OSL_FWDERBUF			(1<<2L)
139 
140 /* Pkttag flag should be part of public information */
141 typedef struct {
142 	bool pkttag;
143 	bool mmbus;		/**< Bus supports memory-mapped register accesses */
144 	pktfree_cb_fn_t tx_fn;  /**< Callback function for PKTFREE */
145 	void *tx_ctx;		/**< Context to the callback function */
146 	void	*unused[3];
147 } osl_pubinfo_t;
148 
149 extern void osl_flag_set(osl_t *osh, uint32 mask);
150 extern void osl_flag_clr(osl_t *osh, uint32 mask);
151 extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
152 
153 #define PKTFREESETCB(osh, _tx_fn, _tx_ctx)		\
154 	do {						\
155 	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn;	\
156 	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx;	\
157 	} while (0)
158 
159 
160 /* host/bus architecture-specific byte swap */
161 #define BUS_SWAP32(v)		(v)
162 	#define MALLOC(osh, size)	osl_malloc((osh), (size))
163 	#define MALLOCZ(osh, size)	osl_mallocz((osh), (size))
164 	#define MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
165 	#define VMALLOC(osh, size)	osl_vmalloc((osh), (size))
166 	#define VMALLOCZ(osh, size)	osl_vmallocz((osh), (size))
167 	#define VMFREE(osh, addr, size)	osl_vmfree((osh), (addr), (size))
168 	#define MALLOCED(osh)		osl_malloced((osh))
169 	#define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
170 	extern void *osl_malloc(osl_t *osh, uint size);
171 	extern void *osl_mallocz(osl_t *osh, uint size);
172 	extern void osl_mfree(osl_t *osh, void *addr, uint size);
173 	extern void *osl_vmalloc(osl_t *osh, uint size);
174 	extern void *osl_vmallocz(osl_t *osh, uint size);
175 	extern void osl_vmfree(osl_t *osh, void *addr, uint size);
176 	extern uint osl_malloced(osl_t *osh);
177 	extern uint osl_check_memleak(osl_t *osh);
178 
179 #define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
180 extern uint osl_malloc_failed(osl_t *osh);
181 
182 /* allocate/free shared (dma-able) consistent memory */
183 #define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
184 #define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
185 	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
186 #define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
187 	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
188 
189 #define	DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
190 	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
191 #define	DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
192 	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
193 
194 extern uint osl_dma_consistent_align(void);
195 extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
196 	uint *tot, dmaaddr_t *pap);
197 extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
198 
199 /* map/unmap direction */
200 #define DMA_NO	0	/* Used to skip cache op */
201 #define	DMA_TX	1	/* TX direction for DMA */
202 #define	DMA_RX	2	/* RX direction for DMA */
203 
204 /* map/unmap shared (dma-able) memory */
205 #define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
206 	osl_dma_unmap((osh), (pa), (size), (direction))
207 extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
208 	hnddma_seg_map_t *txp_dmah);
209 extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction);
210 
211 /* API for DMA addressing capability */
212 #define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
213 
214 #define OSL_SMP_WMB()	smp_wmb()
215 
216 /* API for CPU relax */
217 extern void osl_cpu_relax(void);
218 #define OSL_CPU_RELAX() osl_cpu_relax()
219 
220 extern void osl_preempt_disable(osl_t *osh);
221 extern void osl_preempt_enable(osl_t *osh);
222 #define OSL_DISABLE_PREEMPTION(osh)	osl_preempt_disable(osh)
223 #define OSL_ENABLE_PREEMPTION(osh)	osl_preempt_enable(osh)
224 
225 #if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \
226 	(defined(STBLINUX) && defined(__ARM_ARCH_7A__))
227 	extern void osl_cache_flush(void *va, uint size);
228 	extern void osl_cache_inv(void *va, uint size);
229 	extern void osl_prefetch(const void *ptr);
230 	#define OSL_CACHE_FLUSH(va, len)	osl_cache_flush((void *)(va), len)
231 	#define OSL_CACHE_INV(va, len)		osl_cache_inv((void *)(va), len)
232 	#define OSL_PREFETCH(ptr)			osl_prefetch(ptr)
233 #if defined(__ARM_ARCH_7A__)
234 	extern int osl_arch_is_coherent(void);
235 	#define OSL_ARCH_IS_COHERENT()		osl_arch_is_coherent()
236 	extern int osl_acp_war_enab(void);
237 	#define OSL_ACP_WAR_ENAB()			osl_acp_war_enab()
238 #else  /* !__ARM_ARCH_7A__ */
239 	#define OSL_ARCH_IS_COHERENT()		NULL
240 	#define OSL_ACP_WAR_ENAB()			NULL
241 #endif /* !__ARM_ARCH_7A__ */
242 #else  /* !__mips__ && !__ARM_ARCH_7A__ */
243 	#define OSL_CACHE_FLUSH(va, len)	BCM_REFERENCE(va)
244 	#define OSL_CACHE_INV(va, len)		BCM_REFERENCE(va)
245 	#define OSL_PREFETCH(ptr)		BCM_REFERENCE(ptr)
246 
247 	#define OSL_ARCH_IS_COHERENT()		NULL
248 	#define OSL_ACP_WAR_ENAB()			NULL
249 #endif
250 
251 /* register access macros */
252 #if defined(BCMSDIO)
253 	#include <bcmsdh.h>
254 	#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
255 		(uintptr)(r), sizeof(*(r)), (v)))
256 	#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
257 		(uintptr)(r), sizeof(*(r))))
258 #elif (defined(STB) && defined(__arm__))
259 extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
260 
261 #define OSL_READ_REG(osh, r) \
262 	({\
263 		__typeof(*(r)) __osl_v; \
264 		osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
265 		__osl_v; \
266 	})
267 #endif
268 
269 #if (defined(STB) && defined(__arm__))
270 	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
271 	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
272 #else /* !BCM47XX_CA9 */
273 #if defined(BCMSDIO)
274 	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
275 		mmap_op else bus_op
276 	#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
277 		mmap_op : bus_op
278 #else
279 	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
280 	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
281 #endif
282 #endif
283 
284 #define OSL_ERROR(bcmerror)	osl_error(bcmerror)
285 extern int osl_error(int bcmerror);
286 
287 /* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
288 #define	PKTBUFSZ	2048   /* largest reasonable packet buffer, driver uses for ethernet MTU */
289 
290 #define OSH_NULL   NULL
291 
292 /*
293  * BINOSL selects the slightly slower function-call-based binary compatible osl.
294  * Macros expand to calls to functions defined in linux_osl.c .
295  */
296 #include <linuxver.h>           /* use current 2.4.x calling conventions */
297 #include <linux/kernel.h>       /* for vsn/printf's */
298 #include <linux/string.h>       /* for mem*, str* */
299 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
300 extern uint64 osl_sysuptime_us(void);
301 #define OSL_SYSUPTIME()		((uint32)jiffies_to_msecs(jiffies))
302 #define OSL_SYSUPTIME_US()	osl_sysuptime_us()
303 #else
304 #define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
305 #error "OSL_SYSUPTIME_US() may need to be defined"
306 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
307 #define	printf(fmt, args...)	printk("[dhd] "fmt , ## args)
308 #define CONFIG_CHIPALIVE_LEVEL	(1 << 15)
309 #define DHD_PRINT(x, args...) \
310 	do { \
311 		if (config_msg_level & CONFIG_CHIPALIVE_LEVEL) { \
312 			printk("[dhd] " x, ## args); \
313 		} \
314 	} while (0)
315 #include <linux/kernel.h>	/* for vsn/printf's */
316 #include <linux/string.h>	/* for mem*, str* */
317 /* bcopy's: Linux kernel doesn't provide these (anymore) */
318 #define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
319 #define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
320 #define	bzero(b, len)		memset((b), '\0', (len))
321 
322 /* register access macros */
323 
324 #ifdef CONFIG_64BIT
325 /* readq is defined only for 64 bit platform */
326 #define R_REG(osh, r) (\
327 	SELECT_BUS_READ(osh, \
328 		({ \
329 			__typeof(*(r)) __osl_v = 0; \
330 			switch (sizeof(*(r))) { \
331 				case sizeof(uint8):	__osl_v = \
332 					readb((volatile uint8*)(r)); break; \
333 				case sizeof(uint16):	__osl_v = \
334 					readw((volatile uint16*)(r)); break; \
335 				case sizeof(uint32):	__osl_v = \
336 					readl((volatile uint32*)(r)); break; \
337 				case sizeof(uint64):	__osl_v = \
338 					readq((volatile uint64*)(r)); break; \
339 			} \
340 			__osl_v; \
341 		}), \
342 		OSL_READ_REG(osh, r)) \
343 )
344 #else /* !CONFIG_64BIT */
345 #define R_REG(osh, r) (\
346 	SELECT_BUS_READ(osh, \
347 		({ \
348 			__typeof(*(r)) __osl_v = 0; \
349 			switch (sizeof(*(r))) { \
350 				case sizeof(uint8):	__osl_v = \
351 					readb((volatile uint8*)(r)); break; \
352 				case sizeof(uint16):	__osl_v = \
353 					readw((volatile uint16*)(r)); break; \
354 				case sizeof(uint32):	__osl_v = \
355 					readl((volatile uint32*)(r)); break; \
356 			} \
357 			__osl_v; \
358 		}), \
359 		OSL_READ_REG(osh, r)) \
360 )
361 #endif /* CONFIG_64BIT */
362 
363 #ifdef CONFIG_64BIT
364 /* writeq is defined only for 64 bit platform */
365 #define W_REG(osh, r, v) do { \
366 	SELECT_BUS_WRITE(osh, \
367 		switch (sizeof(*(r))) { \
368 			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
369 			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
370 			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
371 			case sizeof(uint64):	writeq((uint64)(v), (volatile uint64*)(r)); break; \
372 		}, \
373 		(OSL_WRITE_REG(osh, r, v))); \
374 	} while (0)
375 
376 #else /* !CONFIG_64BIT */
377 #define W_REG(osh, r, v) do { \
378 	SELECT_BUS_WRITE(osh, \
379 		switch (sizeof(*(r))) { \
380 			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
381 			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
382 			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
383 		}, \
384 		(OSL_WRITE_REG(osh, r, v))); \
385 	} while (0)
386 #endif /* CONFIG_64BIT */
387 
388 #define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
389 #define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
390 
391 /* bcopy, bcmp, and bzero functions */
392 #define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
393 #define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
394 #define	bzero(b, len)		memset((b), '\0', (len))
395 
396 /* uncached/cached virtual address */
397 #define OSL_UNCACHED(va)	((void *)va)
398 #define OSL_CACHED(va)		((void *)va)
399 
400 #define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
401 #define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
402 
403 /* get processor cycle count */
404 #if defined(__i386__)
405 #define	OSL_GETCYCLES(x)	rdtscl((x))
406 #else
407 #define OSL_GETCYCLES(x)	((x) = 0)
408 #endif
409 
410 /* dereference an address that may cause a bus exception */
411 #define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
412 
413 /* map/unmap physical to virtual I/O */
414 #if !defined(CONFIG_MMC_MSM7X00A)
415 #define	REG_MAP(pa, size)	ioremap((unsigned long)(pa), (unsigned long)(size))
416 #else
417 #define REG_MAP(pa, size)       (void *)(0)
418 #endif /* !defined(CONFIG_MMC_MSM7X00A */
419 #define	REG_UNMAP(va)		iounmap((va))
420 
421 /* shared (dma-able) memory access macros */
422 #define	R_SM(r)			*(r)
423 #define	W_SM(r, v)		(*(r) = (v))
424 #define	BZERO_SM(r, len)	memset((r), '\0', (len))
425 
426 /* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
427  * performance reasons),  we need the Linux headers.
428  */
429 #include <linuxver.h>		/* use current 2.4.x calling conventions */
430 
431 /* packet primitives */
432 #ifdef BCMDBG_CTRACE
433 #define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FILE__)
434 #define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FILE__)
435 #else
436 #ifdef BCM_OBJECT_TRACE
437 #define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FUNCTION__)
438 #define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
439 #else
440 #define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
441 #define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
442 #endif /* BCM_OBJECT_TRACE */
443 #endif /* BCMDBG_CTRACE */
444 #define PKTLIST_DUMP(osh, buf)		BCM_REFERENCE(osh)
445 #define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
446 #if defined(BCM_OBJECT_TRACE)
447 #define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
448 #else
449 #define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
450 #endif /* BCM_OBJECT_TRACE */
451 #ifdef CONFIG_DHD_USE_STATIC_BUF
452 #define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
453 #define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
454 #else
455 #define	PKTGET_STATIC	PKTGET
456 #define	PKTFREE_STATIC	PKTFREE
457 #endif /* CONFIG_DHD_USE_STATIC_BUF */
458 #define	PKTDATA(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
459 #define	PKTLEN(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
460 #define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
461 #define PKTEXPHEADROOM(osh, skb, b)	\
462 	({ \
463 	 BCM_REFERENCE(osh); \
464 	 skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
465 	 })
466 #define PKTTAILROOM(osh, skb)		\
467 	({ \
468 	 BCM_REFERENCE(osh); \
469 	 skb_tailroom((struct sk_buff*)(skb)); \
470 	 })
471 #define PKTPADTAILROOM(osh, skb, padlen) \
472 	({ \
473 	 BCM_REFERENCE(osh); \
474 	 skb_pad((struct sk_buff*)(skb), (padlen)); \
475 	 })
476 #define	PKTNEXT(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
477 #define	PKTSETNEXT(osh, skb, x)		\
478 	({ \
479 	 BCM_REFERENCE(osh); \
480 	 (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
481 	 })
482 #define	PKTSETLEN(osh, skb, len)	\
483 	({ \
484 	 BCM_REFERENCE(osh); \
485 	 __skb_trim((struct sk_buff*)(skb), (len)); \
486 	 })
487 #define	PKTPUSH(osh, skb, bytes)	\
488 	({ \
489 	 BCM_REFERENCE(osh); \
490 	 skb_push((struct sk_buff*)(skb), (bytes)); \
491 	 })
492 #define	PKTPULL(osh, skb, bytes)	\
493 	({ \
494 	 BCM_REFERENCE(osh); \
495 	 skb_pull((struct sk_buff*)(skb), (bytes)); \
496 	 })
497 #define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
498 #define PKTSETPOOL(osh, skb, x, y)	BCM_REFERENCE(osh)
499 #define	PKTPOOL(osh, skb)		({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
500 #define PKTFREELIST(skb)        PKTLINK(skb)
501 #define PKTSETFREELIST(skb, x)  PKTSETLINK((skb), (x))
502 #define PKTPTR(skb)             (skb)
503 #define PKTID(skb)              ({BCM_REFERENCE(skb); 0;})
504 #define PKTSETID(skb, id)       ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
505 #define PKTSHRINK(osh, m)		({BCM_REFERENCE(osh); m;})
506 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
507 #define PKTORPHAN(skb, tsq)          osl_pkt_orphan_partial(skb, tsq)
508 extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq);
509 #else
510 #define PKTORPHAN(skb, tsq)          ({BCM_REFERENCE(skb); 0;})
511 #endif /* LINUX VERSION >= 3.6 */
512 
513 
514 #ifdef BCMDBG_CTRACE
515 #define	DEL_CTRACE(zosh, zskb) { \
516 	unsigned long zflags; \
517 	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
518 	list_del(&(zskb)->ctrace_list); \
519 	(zosh)->ctrace_num--; \
520 	(zskb)->ctrace_start = 0; \
521 	(zskb)->ctrace_count = 0; \
522 	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
523 }
524 
525 #define	UPDATE_CTRACE(zskb, zfile, zline) { \
526 	struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
527 	if (_zskb->ctrace_count < CTRACE_NUM) { \
528 		_zskb->func[_zskb->ctrace_count] = zfile; \
529 		_zskb->line[_zskb->ctrace_count] = zline; \
530 		_zskb->ctrace_count++; \
531 	} \
532 	else { \
533 		_zskb->func[_zskb->ctrace_start] = zfile; \
534 		_zskb->line[_zskb->ctrace_start] = zline; \
535 		_zskb->ctrace_start++; \
536 		if (_zskb->ctrace_start >= CTRACE_NUM) \
537 			_zskb->ctrace_start = 0; \
538 	} \
539 }
540 
541 #define	ADD_CTRACE(zosh, zskb, zfile, zline) { \
542 	unsigned long zflags; \
543 	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
544 	list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
545 	(zosh)->ctrace_num++; \
546 	UPDATE_CTRACE(zskb, zfile, zline); \
547 	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
548 }
549 
550 #define PKTCALLER(zskb)	UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
551 #endif /* BCMDBG_CTRACE */
552 
553 #ifdef CTFPOOL
554 #define	CTFPOOL_REFILL_THRESH	3
555 typedef struct ctfpool {
556 	void		*head;
557 	spinlock_t	lock;
558 	osl_t		*osh;
559 	uint		max_obj;
560 	uint		curr_obj;
561 	uint		obj_size;
562 	uint		refills;
563 	uint		fast_allocs;
564 	uint 		fast_frees;
565 	uint 		slow_allocs;
566 } ctfpool_t;
567 
568 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
569 #define	FASTBUF	(1 << 0)
570 #define	PKTSETFAST(osh, skb)	\
571 	({ \
572 	 BCM_REFERENCE(osh); \
573 	 ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
574 	 })
575 #define	PKTCLRFAST(osh, skb)	\
576 	({ \
577 	 BCM_REFERENCE(osh); \
578 	 ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
579 	 })
580 #define	PKTISFAST(osh, skb)	\
581 	({ \
582 	 BCM_REFERENCE(osh); \
583 	 ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
584 	 })
585 #define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->pktc_flags)
586 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
587 #define	FASTBUF	(1 << 16)
588 #define	PKTSETFAST(osh, skb)	\
589 	({ \
590 	 BCM_REFERENCE(osh); \
591 	 ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
592 	 })
593 #define	PKTCLRFAST(osh, skb)	\
594 	({ \
595 	 BCM_REFERENCE(osh); \
596 	 ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
597 	 })
598 #define	PKTISFAST(osh, skb)	\
599 	({ \
600 	 BCM_REFERENCE(osh); \
601 	 ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
602 	 })
603 #define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->mac_len)
604 #else
605 #define	FASTBUF	(1 << 0)
606 #define	PKTSETFAST(osh, skb)	\
607 	({ \
608 	 BCM_REFERENCE(osh); \
609 	 ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
610 	 })
611 #define	PKTCLRFAST(osh, skb)	\
612 	({ \
613 	 BCM_REFERENCE(osh); \
614 	 ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
615 	 })
616 #define	PKTISFAST(osh, skb)	\
617 	({ \
618 	 BCM_REFERENCE(osh); \
619 	 ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
620 	 })
621 #define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->__unused)
622 #endif /* 2.6.22 */
623 
624 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
625 #define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->ctfpool)
626 #define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
627 #else
628 #define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->sk)
629 #define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
630 #endif
631 
632 extern void *osl_ctfpool_add(osl_t *osh);
633 extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
634 extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
635 extern void osl_ctfpool_cleanup(osl_t *osh);
636 extern void osl_ctfpool_stats(osl_t *osh, void *b);
637 #else /* CTFPOOL */
638 #define	PKTSETFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
639 #define	PKTCLRFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
640 #define	PKTISFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
641 #endif /* CTFPOOL */
642 
643 #define	PKTSETCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
644 #define	PKTCLRCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
645 #define	PKTISCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
646 
647 #ifdef HNDCTF
648 
649 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
650 #define	SKIPCT	(1 << 2)
651 #define	CHAINED	(1 << 3)
652 #define	PKTSETSKIPCT(osh, skb)	\
653 	({ \
654 	 BCM_REFERENCE(osh); \
655 	 (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
656 	 })
657 #define	PKTCLRSKIPCT(osh, skb)	\
658 	({ \
659 	 BCM_REFERENCE(osh); \
660 	 (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
661 	 })
662 #define	PKTSKIPCT(osh, skb)	\
663 	({ \
664 	 BCM_REFERENCE(osh); \
665 	 (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
666 	 })
667 #define	PKTSETCHAINED(osh, skb)	\
668 	({ \
669 	 BCM_REFERENCE(osh); \
670 	 (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
671 	 })
672 #define	PKTCLRCHAINED(osh, skb)	\
673 	({ \
674 	 BCM_REFERENCE(osh); \
675 	 (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
676 	 })
677 #define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->pktc_flags & CHAINED)
678 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
679 #define	SKIPCT	(1 << 18)
680 #define	CHAINED	(1 << 19)
681 #define	PKTSETSKIPCT(osh, skb)	\
682 	({ \
683 	 BCM_REFERENCE(osh); \
684 	 (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
685 	 })
686 #define	PKTCLRSKIPCT(osh, skb)	\
687 	({ \
688 	 BCM_REFERENCE(osh); \
689 	 (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
690 	 })
691 #define	PKTSKIPCT(osh, skb)	\
692 	({ \
693 	 BCM_REFERENCE(osh); \
694 	 (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
695 	 })
696 #define	PKTSETCHAINED(osh, skb)	\
697 	({ \
698 	 BCM_REFERENCE(osh); \
699 	 (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
700 	 })
701 #define	PKTCLRCHAINED(osh, skb)	\
702 	({ \
703 	 BCM_REFERENCE(osh); \
704 	 (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
705 	 })
706 #define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->mac_len & CHAINED)
707 #else /* 2.6.22 */
708 #define	SKIPCT	(1 << 2)
709 #define	CHAINED	(1 << 3)
710 #define	PKTSETSKIPCT(osh, skb)	\
711 	({ \
712 	 BCM_REFERENCE(osh); \
713 	 (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
714 	 })
715 #define	PKTCLRSKIPCT(osh, skb)	\
716 	({ \
717 	 BCM_REFERENCE(osh); \
718 	 (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
719 	 })
720 #define	PKTSKIPCT(osh, skb)	\
721 	({ \
722 	 BCM_REFERENCE(osh); \
723 	 (((struct sk_buff*)(skb))->__unused & SKIPCT); \
724 	 })
725 #define	PKTSETCHAINED(osh, skb)	\
726 	({ \
727 	 BCM_REFERENCE(osh); \
728 	 (((struct sk_buff*)(skb))->__unused |= CHAINED); \
729 	 })
730 #define	PKTCLRCHAINED(osh, skb)	\
731 	({ \
732 	 BCM_REFERENCE(osh); \
733 	 (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
734 	 })
735 #define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->__unused & CHAINED)
736 #endif /* 2.6.22 */
737 typedef struct ctf_mark {
738 	uint32	value;
739 }	ctf_mark_t;
740 #define CTF_MARK(m)				(m.value)
741 #else /* HNDCTF */
742 #define	PKTSETSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
743 #define	PKTCLRSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
744 #define	PKTSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
745 #define CTF_MARK(m)		({BCM_REFERENCE(m); 0;})
746 #endif /* HNDCTF */
747 
748 #if defined(BCM_GMAC3)
749 
750 /** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
751 
752 /* Account for packets delivered to downstream forwarder by GMAC interface. */
753 extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
754 #define PKTTOFWDER(osh, skbs, skb_cnt)  \
755 	osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
756 
757 /* Account for packets received from downstream forwarder. */
758 #if defined(BCMDBG_CTRACE) /* pkt logging */
759 extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
760                              int line, char *file);
761 #define PKTFRMFWDER(osh, skbs, skb_cnt) \
762 	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
763 	                 __LINE__, __FILE__)
764 #else  /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
765 extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
766 #define PKTFRMFWDER(osh, skbs, skb_cnt) \
767 	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
768 #endif
769 
770 
771 /** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
772  * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
773  * been accessed in the GMAC forwarder. This may be used to limit the number of
774  * cachelines that need to be flushed or invalidated.
775  * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
776  * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
777  * Likewise, a debug print of a packet payload in say the ethernet driver needs
778  * to be accompanied with a clear of the FWDERBUF tag.
779  */
780 
781 /** Forwarded packets, have a GMAC_FWDER_HWRXOFF sized rx header (etc.h) */
782 #define FWDER_HWRXOFF       (18)
783 
784 /** Maximum amount of a pkt data that a downstream forwarder (GMAC) may have
785  * read into the L1 cache (not dirty). This may be used in reduced cache ops.
786  *
787  * Max 44: ET HWRXOFF[18] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
788  * Min 32: GMAC_FWDER_HWRXOFF[18] + EtherHdr[14]
789  */
790 #define FWDER_MINMAPSZ      (FWDER_HWRXOFF + 14)
791 #define FWDER_MAXMAPSZ      (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
792 #define FWDER_PKTMAPSZ      (FWDER_MINMAPSZ)
793 
794 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
795 
796 #define FWDERBUF            (1 << 4)
797 #define PKTSETFWDERBUF(osh, skb) \
798 	({ \
799 	 BCM_REFERENCE(osh); \
800 	 (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
801 	 })
802 #define PKTCLRFWDERBUF(osh, skb) \
803 	({ \
804 	 BCM_REFERENCE(osh); \
805 	 (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
806 	 })
807 #define PKTISFWDERBUF(osh, skb) \
808 	({ \
809 	 BCM_REFERENCE(osh); \
810 	 (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
811 	 })
812 
813 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
814 
815 #define FWDERBUF	        (1 << 20)
816 #define PKTSETFWDERBUF(osh, skb) \
817 	({ \
818 	 BCM_REFERENCE(osh); \
819 	 (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
820 	 })
821 #define PKTCLRFWDERBUF(osh, skb)  \
822 	({ \
823 	 BCM_REFERENCE(osh); \
824 	 (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
825 	 })
826 #define PKTISFWDERBUF(osh, skb) \
827 	({ \
828 	 BCM_REFERENCE(osh); \
829 	 (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
830 	 })
831 
832 #else /* 2.6.22 */
833 
834 #define FWDERBUF            (1 << 4)
835 #define PKTSETFWDERBUF(osh, skb)  \
836 	({ \
837 	 BCM_REFERENCE(osh); \
838 	 (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
839 	 })
840 #define PKTCLRFWDERBUF(osh, skb)  \
841 	({ \
842 	 BCM_REFERENCE(osh); \
843 	 (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
844 	 })
845 #define PKTISFWDERBUF(osh, skb) \
846 	({ \
847 	 BCM_REFERENCE(osh); \
848 	 (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
849 	 })
850 
851 #endif /* 2.6.22 */
852 
853 #else  /* ! BCM_GMAC3 */
854 
855 #define PKTSETFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
856 #define PKTCLRFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
857 #define PKTISFWDERBUF(osh, skb)   ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
858 
859 #endif /* ! BCM_GMAC3 */
860 
861 
862 #ifdef HNDCTF
863 /* For broadstream iqos */
864 
865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
866 #define	TOBR		(1 << 5)
867 #define	PKTSETTOBR(osh, skb)	\
868 	({ \
869 	 BCM_REFERENCE(osh); \
870 	 (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
871 	 })
872 #define	PKTCLRTOBR(osh, skb)	\
873 	({ \
874 	 BCM_REFERENCE(osh); \
875 	 (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
876 	 })
877 #define	PKTISTOBR(skb)	(((struct sk_buff*)(skb))->pktc_flags & TOBR)
878 #define	PKTSETCTFIPCTXIF(skb, ifp)	(((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
879 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
880 #define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
881 #define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
882 #define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
883 #define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
884 #else /* 2.6.22 */
885 #define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
886 #define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
887 #define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
888 #define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
889 #endif /* 2.6.22 */
890 #else /* HNDCTF */
891 #define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
892 #define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
893 #define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
894 #endif /* HNDCTF */
895 
896 
897 #ifdef BCMFA
898 #ifdef BCMFA_HW_HASH
899 #define PKTSETFAHIDX(skb, idx)	(((struct sk_buff*)(skb))->napt_idx = idx)
900 #else
901 #define PKTSETFAHIDX(skb, idx)	({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
902 #endif /* BCMFA_SW_HASH */
903 #define PKTGETFAHIDX(skb)	(((struct sk_buff*)(skb))->napt_idx)
904 #define PKTSETFADEV(skb, imp)	(((struct sk_buff*)(skb))->dev = imp)
905 #define PKTSETRXDEV(skb)	(((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
906 
907 #define	AUX_TCP_FIN_RST	(1 << 0)
908 #define	AUX_FREED	(1 << 1)
909 #define PKTSETFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
910 #define	PKTCLRFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
911 #define	PKTISFAAUX(skb)		(((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
912 #define PKTSETFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
913 #define	PKTCLRFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
914 #define	PKTISFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
915 #define	PKTISFABRIDGED(skb)	PKTISFAAUX(skb)
916 #else
917 #define	PKTISFAAUX(skb)		({BCM_REFERENCE(skb); FALSE;})
918 #define	PKTISFABRIDGED(skb)	({BCM_REFERENCE(skb); FALSE;})
919 #define	PKTISFAFREED(skb)	({BCM_REFERENCE(skb); FALSE;})
920 
921 #define	PKTCLRFAAUX(skb)	BCM_REFERENCE(skb)
922 #define PKTSETFAFREED(skb)	BCM_REFERENCE(skb)
923 #define	PKTCLRFAFREED(skb)	BCM_REFERENCE(skb)
924 #endif /* BCMFA */
925 
926 #if defined(BCM_OBJECT_TRACE)
927 extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
928 #else
929 extern void osl_pktfree(osl_t *osh, void *skb, bool send);
930 #endif /* BCM_OBJECT_TRACE */
931 extern void *osl_pktget_static(osl_t *osh, uint len);
932 extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
933 extern void osl_pktclone(osl_t *osh, void **pkt);
934 
935 #ifdef BCMDBG_CTRACE
936 #define PKT_CTRACE_DUMP(osh, b)	osl_ctrace_dump((osh), (b))
937 extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
938 extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
939 extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
940 extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
941 struct bcmstrbuf;
942 extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
943 #else
944 #ifdef BCM_OBJECT_TRACE
945 extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller);
946 extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
947 #else
948 extern void *osl_pktget(osl_t *osh, uint len);
949 extern void *osl_pktdup(osl_t *osh, void *skb);
950 #endif /* BCM_OBJECT_TRACE */
951 extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
952 #endif /* BCMDBG_CTRACE */
953 extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
954 #ifdef BCMDBG_CTRACE
955 #define PKTFRMNATIVE(osh, skb)  osl_pkt_frmnative(((osl_t *)osh), \
956 				(struct sk_buff*)(skb), __LINE__, __FILE__)
957 #define	PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
958 #else
959 #define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
960 #endif /* BCMDBG_CTRACE */
961 #define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_t *)(osh), (pkt))
962 
963 #define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
964 #define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
965 #define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
966 #define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
967 #define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
968 #define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
969 						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
970 /* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
971 #define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
972 
973 #ifdef CONFIG_NF_CONNTRACK_MARK
974 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
975 #define PKTMARK(p)                     (((struct sk_buff *)(p))->mark)
976 #define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->mark = (m)
977 #else /* !2.6.0 */
978 #define PKTMARK(p)                     (((struct sk_buff *)(p))->nfmark)
979 #define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->nfmark = (m)
980 #endif /* 2.6.0 */
981 #else /* CONFIG_NF_CONNTRACK_MARK */
982 #define PKTMARK(p)                     0
983 #define PKTSETMARK(p, m)
984 #endif /* CONFIG_NF_CONNTRACK_MARK */
985 
986 #define PKTALLOCED(osh)		osl_pktalloced(osh)
987 extern uint osl_pktalloced(osl_t *osh);
988 
989 #define OSL_RAND()		osl_rand()
990 extern uint32 osl_rand(void);
991 
992 #if !defined(BCM_SECURE_DMA)
993 #define	DMA_MAP(osh, va, size, direction, p, dmah) \
994 	osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
995 #endif /* !(defined(BCM_SECURE_DMA)) */
996 
997 #ifdef PKTC
998 /* Use 8 bytes of skb tstamp field to store below info */
999 struct chain_node {
1000 	struct sk_buff	*link;
1001 	unsigned int	flags:3, pkts:9, bytes:20;
1002 };
1003 
1004 #define CHAIN_NODE(skb)		((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
1005 
1006 #define	PKTCSETATTR(s, f, p, b)	({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
1007 	                         CHAIN_NODE(s)->bytes = (b);})
1008 #define	PKTCCLRATTR(s)		({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
1009 	                         CHAIN_NODE(s)->bytes = 0;})
1010 #define	PKTCGETATTR(s)		(CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
1011 	                         CHAIN_NODE(s)->bytes)
1012 #define	PKTCCNT(skb)		(CHAIN_NODE(skb)->pkts)
1013 #define	PKTCLEN(skb)		(CHAIN_NODE(skb)->bytes)
1014 #define	PKTCGETFLAGS(skb)	(CHAIN_NODE(skb)->flags)
1015 #define	PKTCSETFLAGS(skb, f)	(CHAIN_NODE(skb)->flags = (f))
1016 #define	PKTCCLRFLAGS(skb)	(CHAIN_NODE(skb)->flags = 0)
1017 #define	PKTCFLAGS(skb)		(CHAIN_NODE(skb)->flags)
1018 #define	PKTCSETCNT(skb, c)	(CHAIN_NODE(skb)->pkts = (c))
1019 #define	PKTCINCRCNT(skb)	(CHAIN_NODE(skb)->pkts++)
1020 #define	PKTCADDCNT(skb, c)	(CHAIN_NODE(skb)->pkts += (c))
1021 #define	PKTCSETLEN(skb, l)	(CHAIN_NODE(skb)->bytes = (l))
1022 #define	PKTCADDLEN(skb, l)	(CHAIN_NODE(skb)->bytes += (l))
1023 #define	PKTCSETFLAG(skb, fb)	(CHAIN_NODE(skb)->flags |= (fb))
1024 #define	PKTCCLRFLAG(skb, fb)	(CHAIN_NODE(skb)->flags &= ~(fb))
1025 #define	PKTCLINK(skb)		(CHAIN_NODE(skb)->link)
1026 #define	PKTSETCLINK(skb, x)	(CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
1027 #define FOREACH_CHAINED_PKT(skb, nskb) \
1028 	for (; (skb) != NULL; (skb) = (nskb)) \
1029 		if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
1030 		    PKTSETCLINK((skb), NULL), 1)
1031 #define	PKTCFREE(osh, skb, send) \
1032 do { \
1033 	void *nskb; \
1034 	ASSERT((skb) != NULL); \
1035 	FOREACH_CHAINED_PKT((skb), nskb) { \
1036 		PKTCLRCHAINED((osh), (skb)); \
1037 		PKTCCLRFLAGS((skb)); \
1038 		PKTFREE((osh), (skb), (send)); \
1039 	} \
1040 } while (0)
1041 #define PKTCENQTAIL(h, t, p) \
1042 do { \
1043 	if ((t) == NULL) { \
1044 		(h) = (t) = (p); \
1045 	} else { \
1046 		PKTSETCLINK((t), (p)); \
1047 		(t) = (p); \
1048 	} \
1049 } while (0)
1050 #endif /* PKTC */
1051 
1052 #else /* ! BCMDRIVER */
1053 
1054 
1055 /* ASSERT */
1056 	#define ASSERT(exp)	do {} while (0)
1057 
1058 /* MALLOC and MFREE */
1059 #define MALLOC(o, l) malloc(l)
1060 #define MFREE(o, p, l) free(p)
1061 #include <stdlib.h>
1062 
1063 /* str* and mem* functions */
1064 #include <string.h>
1065 
1066 /* *printf functions */
1067 #include <stdio.h>
1068 
1069 /* bcopy, bcmp, and bzero */
1070 extern void bcopy(const void *src, void *dst, size_t len);
1071 extern int bcmp(const void *b1, const void *b2, size_t len);
1072 extern void bzero(void *b, size_t len);
1073 #endif /* ! BCMDRIVER */
1074 
1075 /* Current STB 7445D1 doesn't use ACP and it is non-coherrent.
1076  * Adding these dummy values for build apss only
1077  * When we revisit need to change these.
1078  */
1079 #if defined(STBLINUX)
1080 
1081 #if defined(__ARM_ARCH_7A__)
1082 #define ACP_WAR_ENAB() 0
1083 #define ACP_WIN_LIMIT 1
1084 #define arch_is_coherent() 0
1085 #endif /* __ARM_ARCH_7A__ */
1086 
1087 #endif /* STBLINUX */
1088 
1089 #ifdef BCM_SECURE_DMA
1090 
1091 #define	SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \
1092 	osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset))
1093 #define	SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \
1094 	osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah))
1095 #define	SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \
1096 	osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma))
1097 #define	SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \
1098 	osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset))
1099 #define	SECURE_DMA_UNMAP_ALL(osh, pcma) \
1100 	osl_sec_dma_unmap_all((osh), (pcma))
1101 
1102 #define DMA_MAP(osh, va, size, direction, p, dmah)
1103 
1104 typedef struct sec_cma_info {
1105 	struct sec_mem_elem *sec_alloc_list;
1106 	struct sec_mem_elem *sec_alloc_list_tail;
1107 } sec_cma_info_t;
1108 
1109 #if defined(__ARM_ARCH_7A__)
1110 #define CMA_BUFSIZE_4K	4096
1111 #define CMA_BUFSIZE_2K	2048
1112 #define CMA_BUFSIZE_512	512
1113 
1114 #define	CMA_BUFNUM		2048
1115 #define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */
1116 #define SEC_CMA_COHERENT_MAX 278
1117 #define CMA_DMA_DESC_MEMBLOCK	(SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX)
1118 #define CMA_DMA_DATA_MEMBLOCK	(CMA_BUFSIZE_4K*CMA_BUFNUM)
1119 #define	CMA_MEMBLOCK		(CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK)
1120 #define CONT_REGION	0x02		/* Region CMA */
1121 #else
1122 #define CONT_REGION	0x00		/* To access the MIPs mem, Not yet... */
1123 #endif /* !defined __ARM_ARCH_7A__ */
1124 
1125 #define SEC_DMA_ALIGN	(1<<16)
1126 typedef struct sec_mem_elem {
1127 	size_t			size;
1128 	int				direction;
1129 	phys_addr_t		pa_cma;     /**< physical  address */
1130 	void			*va;        /**< virtual address of driver pkt */
1131 	dma_addr_t		dma_handle; /**< bus address assign by linux */
1132 	void			*vac;       /**< virtual address of cma buffer */
1133 	struct page *pa_cma_page;	/* phys to page address */
1134 	struct	sec_mem_elem	*next;
1135 } sec_mem_elem_t;
1136 
1137 extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1138 	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset);
1139 extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p,
1140 	hnddma_seg_map_t *dmah);
1141 extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size,
1142   int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info);
1143 extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1144 	void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset);
1145 extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info);
1146 
1147 #endif /* BCM_SECURE_DMA */
1148 
1149 typedef struct sk_buff_head PKT_LIST;
1150 #define PKTLIST_INIT(x)		skb_queue_head_init((x))
1151 #define PKTLIST_ENQ(x, y)	skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y))
1152 #define PKTLIST_DEQ(x)		skb_dequeue((struct sk_buff_head *)(x))
1153 #define PKTLIST_UNLINK(x, y)	skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
1154 #define PKTLIST_FINI(x)		skb_queue_purge((struct sk_buff_head *)(x))
1155 
1156 #ifdef REPORT_FATAL_TIMEOUTS
1157 typedef struct osl_timer {
1158 	struct timer_list *timer;
1159 	bool   set;
1160 } osl_timer_t;
1161 
1162 typedef void (*linux_timer_fn)(ulong arg);
1163 
1164 extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg);
1165 extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
1166 extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
1167 extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
1168 #endif
1169 
1170 typedef struct osl_timespec {
1171 	__kernel_time_t	tv_sec;			/* seconds */
1172 	__kernel_suseconds_t	tv_usec;	/* microseconds */
1173 	long		tv_nsec;		/* nanoseconds */
1174 } osl_timespec_t;
1175 extern void osl_do_gettimeofday(struct osl_timespec *ts);
1176 extern void osl_get_monotonic_boottime(struct osl_timespec *ts);
1177 extern uint32 osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts);
1178 #endif	/* _linux_osl_h_ */
1179