xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/include/linuxver.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Linux-specific abstractions to gain some independence from linux kernel versions.
3*4882a593Smuzhiyun  * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
8*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
10*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11*4882a593Smuzhiyun  * following added to such license:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
14*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
15*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
16*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
17*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
18*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
19*4882a593Smuzhiyun  * modifications of the software.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Dual:>>
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #ifndef _linuxver_h_
26*4882a593Smuzhiyun #define _linuxver_h_
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * The below pragmas are added as workaround for errors caused by update
30*4882a593Smuzhiyun  * of gcc version to 4.8.2. GCC 4.6 adds -Wunused-but-set-variable and
31*4882a593Smuzhiyun  * -Wunused-but-set-parameter to -Wall, for some configurations those
32*4882a593Smuzhiyun  * warnings are produced in linux kernel. So for now the below pragmas
33*4882a593Smuzhiyun  * disable the offending warnings. Permanent solution is to use -isystem
34*4882a593Smuzhiyun  * but there is a performance problem with this change on RHEL5 servers
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
38*4882a593Smuzhiyun #pragma GCC diagnostic push
39*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
40*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <typedefs.h>
44*4882a593Smuzhiyun #include <linux/version.h>
45*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
46*4882a593Smuzhiyun #include <linux/config.h>
47*4882a593Smuzhiyun #else
48*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
49*4882a593Smuzhiyun #include <generated/autoconf.h>
50*4882a593Smuzhiyun #else
51*4882a593Smuzhiyun #include <linux/autoconf.h>
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
56*4882a593Smuzhiyun #include <linux/kconfig.h>
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun #include <linux/module.h>
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
61*4882a593Smuzhiyun /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
62*4882a593Smuzhiyun #ifdef __UNDEF_NO_VERSION__
63*4882a593Smuzhiyun #undef __NO_VERSION__
64*4882a593Smuzhiyun #else
65*4882a593Smuzhiyun #define __NO_VERSION__
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
70*4882a593Smuzhiyun #define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
71*4882a593Smuzhiyun #define module_param_string(_name_, _string_, _size_, _perm_) \
72*4882a593Smuzhiyun 		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* linux/malloc.h is deprecated, use linux/slab.h instead. */
76*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
77*4882a593Smuzhiyun #include <linux/malloc.h>
78*4882a593Smuzhiyun #else
79*4882a593Smuzhiyun #include <linux/slab.h>
80*4882a593Smuzhiyun #endif
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #include <linux/types.h>
83*4882a593Smuzhiyun #include <linux/init.h>
84*4882a593Smuzhiyun #include <linux/mm.h>
85*4882a593Smuzhiyun #include <linux/string.h>
86*4882a593Smuzhiyun #include <linux/pci.h>
87*4882a593Smuzhiyun #include <linux/interrupt.h>
88*4882a593Smuzhiyun #include <linux/kthread.h>
89*4882a593Smuzhiyun #include <linux/netdevice.h>
90*4882a593Smuzhiyun #include <linux/time.h>
91*4882a593Smuzhiyun #include <linux/rtc.h>
92*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
93*4882a593Smuzhiyun #include <linux/semaphore.h>
94*4882a593Smuzhiyun #else
95*4882a593Smuzhiyun #include <asm/semaphore.h>
96*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
97*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
98*4882a593Smuzhiyun #undef IP_TOS
99*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
100*4882a593Smuzhiyun #include <asm/io.h>
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
103*4882a593Smuzhiyun #include <linux/workqueue.h>
104*4882a593Smuzhiyun #else
105*4882a593Smuzhiyun #include <linux/tqueue.h>
106*4882a593Smuzhiyun #ifndef work_struct
107*4882a593Smuzhiyun #define work_struct tq_struct
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun #ifndef INIT_WORK
110*4882a593Smuzhiyun #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun #ifndef schedule_work
113*4882a593Smuzhiyun #define schedule_work(_work) schedule_task((_work))
114*4882a593Smuzhiyun #endif
115*4882a593Smuzhiyun #ifndef flush_scheduled_work
116*4882a593Smuzhiyun #define flush_scheduled_work() flush_scheduled_tasks()
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun #endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * TODO:
122*4882a593Smuzhiyun  * daemonize() API is deprecated from kernel-3.8 onwards. More debugging
123*4882a593Smuzhiyun  *      has to be done whether this can cause any issue in case, if driver is
124*4882a593Smuzhiyun  *      loaded as a module from userspace.
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
127*4882a593Smuzhiyun #define DAEMONIZE(a)	do { \
128*4882a593Smuzhiyun 		allow_signal(SIGKILL);	\
129*4882a593Smuzhiyun 		allow_signal(SIGTERM);	\
130*4882a593Smuzhiyun 	} while (0)
131*4882a593Smuzhiyun #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
132*4882a593Smuzhiyun 	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
133*4882a593Smuzhiyun #define DAEMONIZE(a) daemonize(a); \
134*4882a593Smuzhiyun 	allow_signal(SIGKILL); \
135*4882a593Smuzhiyun 	allow_signal(SIGTERM);
136*4882a593Smuzhiyun #else /* Linux 2.4 (w/o preemption patch) */
137*4882a593Smuzhiyun #define RAISE_RX_SOFTIRQ() \
138*4882a593Smuzhiyun 	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
139*4882a593Smuzhiyun #define DAEMONIZE(a) daemonize(); \
140*4882a593Smuzhiyun 	do { if (a) \
141*4882a593Smuzhiyun 		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
142*4882a593Smuzhiyun 	} while (0);
143*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE  */
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
146*4882a593Smuzhiyun #define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
147*4882a593Smuzhiyun #else
148*4882a593Smuzhiyun #define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
149*4882a593Smuzhiyun #if (!(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
150*4882a593Smuzhiyun /* Exclude RHEL 5 */
151*4882a593Smuzhiyun typedef void (*work_func_t)(void *work);
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun #endif	/* >= 2.6.20 */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
156*4882a593Smuzhiyun /* Some distributions have their own 2.6.x compatibility layers */
157*4882a593Smuzhiyun #ifndef IRQ_NONE
158*4882a593Smuzhiyun typedef void irqreturn_t;
159*4882a593Smuzhiyun #define IRQ_NONE
160*4882a593Smuzhiyun #define IRQ_HANDLED
161*4882a593Smuzhiyun #define IRQ_RETVAL(x)
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
165*4882a593Smuzhiyun #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
168*4882a593Smuzhiyun #define IRQF_SHARED	SA_SHIRQ
169*4882a593Smuzhiyun #endif /* < 2.6.18 */
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
172*4882a593Smuzhiyun #ifdef	CONFIG_NET_RADIO
173*4882a593Smuzhiyun #endif
174*4882a593Smuzhiyun #endif	/* < 2.6.17 */
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
177*4882a593Smuzhiyun #define MOD_INC_USE_COUNT
178*4882a593Smuzhiyun #define MOD_DEC_USE_COUNT
179*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
182*4882a593Smuzhiyun #include <linux/sched.h>
183*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
186*4882a593Smuzhiyun #include <linux/sched/rt.h>
187*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
188*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
189*4882a593Smuzhiyun #include <uapi/linux/sched/types.h>
190*4882a593Smuzhiyun #endif /* LINUX_VERS >= 4.11.0 */
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
193*4882a593Smuzhiyun #include <net/lib80211.h>
194*4882a593Smuzhiyun #endif
195*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
196*4882a593Smuzhiyun #include <linux/ieee80211.h>
197*4882a593Smuzhiyun #else
198*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
199*4882a593Smuzhiyun #include <net/ieee80211.h>
200*4882a593Smuzhiyun #endif
201*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #ifndef __exit
204*4882a593Smuzhiyun #define __exit
205*4882a593Smuzhiyun #endif
206*4882a593Smuzhiyun #ifndef __devexit
207*4882a593Smuzhiyun #define __devexit
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun #ifndef __devinit
210*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
211*4882a593Smuzhiyun 	#define __devinit	__init
212*4882a593Smuzhiyun #else
213*4882a593Smuzhiyun /* All devices are hotpluggable since linux 3.8.0 */
214*4882a593Smuzhiyun 	#define __devinit
215*4882a593Smuzhiyun #endif
216*4882a593Smuzhiyun #endif /* !__devinit */
217*4882a593Smuzhiyun #ifndef __devinitdata
218*4882a593Smuzhiyun #define __devinitdata
219*4882a593Smuzhiyun #endif
220*4882a593Smuzhiyun #ifndef __devexit_p
221*4882a593Smuzhiyun #define __devexit_p(x)	x
222*4882a593Smuzhiyun #endif
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #define pci_get_drvdata(dev)		(dev)->sysdata
227*4882a593Smuzhiyun #define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun  * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
231*4882a593Smuzhiyun  */
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun struct pci_device_id {
234*4882a593Smuzhiyun 	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
235*4882a593Smuzhiyun 	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
236*4882a593Smuzhiyun 	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
237*4882a593Smuzhiyun 	unsigned long driver_data;		/* Data private to the driver */
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun struct pci_driver {
241*4882a593Smuzhiyun 	struct list_head node;
242*4882a593Smuzhiyun 	char *name;
243*4882a593Smuzhiyun 	const struct pci_device_id *id_table;	/* NULL if wants all devices */
244*4882a593Smuzhiyun 	int (*probe)(struct pci_dev *dev,
245*4882a593Smuzhiyun 	             const struct pci_device_id *id); /* New device inserted */
246*4882a593Smuzhiyun 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
247*4882a593Smuzhiyun 						 * capable driver)
248*4882a593Smuzhiyun 						 */
249*4882a593Smuzhiyun 	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
250*4882a593Smuzhiyun 	void (*resume)(struct pci_dev *dev);	/* Device woken up */
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #define MODULE_DEVICE_TABLE(type, name)
254*4882a593Smuzhiyun #define PCI_ANY_ID (~0)
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /* compatpci.c */
257*4882a593Smuzhiyun #define pci_module_init pci_register_driver
258*4882a593Smuzhiyun extern int pci_register_driver(struct pci_driver *drv);
259*4882a593Smuzhiyun extern void pci_unregister_driver(struct pci_driver *drv);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun #endif /* PCI registration */
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
264*4882a593Smuzhiyun #define pci_module_init pci_register_driver
265*4882a593Smuzhiyun #endif
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
268*4882a593Smuzhiyun #ifdef MODULE
269*4882a593Smuzhiyun #define module_init(x) int init_module(void) { return x(); }
270*4882a593Smuzhiyun #define module_exit(x) void cleanup_module(void) { x(); }
271*4882a593Smuzhiyun #else
272*4882a593Smuzhiyun #define module_init(x)	__initcall(x);
273*4882a593Smuzhiyun #define module_exit(x)	__exitcall(x);
274*4882a593Smuzhiyun #endif
275*4882a593Smuzhiyun #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
278*4882a593Smuzhiyun #define WL_USE_NETDEV_OPS
279*4882a593Smuzhiyun #else
280*4882a593Smuzhiyun #undef WL_USE_NETDEV_OPS
281*4882a593Smuzhiyun #endif
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
284*4882a593Smuzhiyun #define WL_CONFIG_RFKILL
285*4882a593Smuzhiyun #else
286*4882a593Smuzhiyun #undef WL_CONFIG_RFKILL
287*4882a593Smuzhiyun #endif
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
290*4882a593Smuzhiyun #define list_for_each(pos, head) \
291*4882a593Smuzhiyun 	for (pos = (head)->next; pos != (head); pos = pos->next)
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
295*4882a593Smuzhiyun #define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
296*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
297*4882a593Smuzhiyun #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
298*4882a593Smuzhiyun #endif
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
301*4882a593Smuzhiyun #define pci_enable_device(dev) do { } while (0)
302*4882a593Smuzhiyun #endif
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
305*4882a593Smuzhiyun #define net_device device
306*4882a593Smuzhiyun #endif
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * DMA mapping
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * See linux/Documentation/DMA-mapping.txt
314*4882a593Smuzhiyun  */
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun #ifndef PCI_DMA_TODEVICE
317*4882a593Smuzhiyun #define	PCI_DMA_TODEVICE	1
318*4882a593Smuzhiyun #define	PCI_DMA_FROMDEVICE	2
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun typedef u32 dma_addr_t;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun /* Pure 2^n version of get_order */
get_order(unsigned long size)324*4882a593Smuzhiyun static inline int get_order(unsigned long size)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	int order;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	size = (size-1) >> (PAGE_SHIFT-1);
329*4882a593Smuzhiyun 	order = -1;
330*4882a593Smuzhiyun 	do {
331*4882a593Smuzhiyun 		size >>= 1;
332*4882a593Smuzhiyun 		order++;
333*4882a593Smuzhiyun 	} while (size);
334*4882a593Smuzhiyun 	return order;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)337*4882a593Smuzhiyun static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
338*4882a593Smuzhiyun                                          dma_addr_t *dma_handle)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	void *ret;
341*4882a593Smuzhiyun 	int gfp = GFP_ATOMIC | GFP_DMA;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	ret = (void *)__get_free_pages(gfp, get_order(size));
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (ret != NULL) {
346*4882a593Smuzhiyun 		bzero(ret, size);
347*4882a593Smuzhiyun 		*dma_handle = virt_to_bus(ret);
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 	return ret;
350*4882a593Smuzhiyun }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)351*4882a593Smuzhiyun static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
352*4882a593Smuzhiyun                                        void *vaddr, dma_addr_t dma_handle)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	free_pages((unsigned long)vaddr, get_order(size));
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun #ifdef ILSIM
357*4882a593Smuzhiyun extern uint pci_map_single(void *dev, void *va, uint size, int direction);
358*4882a593Smuzhiyun extern void pci_unmap_single(void *dev, uint pa, uint size, int direction);
359*4882a593Smuzhiyun #else
360*4882a593Smuzhiyun #define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
361*4882a593Smuzhiyun #define pci_unmap_single(cookie, address, size, dir)
362*4882a593Smuzhiyun #endif
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun #endif /* DMA mapping */
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun typedef struct timer_list timer_list_compat_t;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun #define init_timer_compat(timer_compat, cb, priv) \
371*4882a593Smuzhiyun 	init_timer(timer_compat); \
372*4882a593Smuzhiyun 	(timer_compat)->data = (ulong)priv; \
373*4882a593Smuzhiyun 	(timer_compat)->function = cb
374*4882a593Smuzhiyun #define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
375*4882a593Smuzhiyun #define timer_expires(timer_compat) (timer_compat)->expires
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun typedef struct timer_list_compat {
380*4882a593Smuzhiyun 	struct timer_list timer;
381*4882a593Smuzhiyun 	void *arg;
382*4882a593Smuzhiyun 	void (*callback)(ulong arg);
383*4882a593Smuzhiyun } timer_list_compat_t;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun extern void timer_cb_compat(struct timer_list *tl);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun #define init_timer_compat(timer_compat, cb, priv) \
388*4882a593Smuzhiyun 	(timer_compat)->arg = priv; \
389*4882a593Smuzhiyun 	(timer_compat)->callback = cb; \
390*4882a593Smuzhiyun 	timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
391*4882a593Smuzhiyun #define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
392*4882a593Smuzhiyun #define timer_expires(timer_compat) (timer_compat)->timer.expires
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun #define del_timer(t) del_timer(&((t)->timer))
395*4882a593Smuzhiyun #ifndef del_timer_sync
396*4882a593Smuzhiyun #define del_timer_sync(t) del_timer_sync(&((t)->timer))
397*4882a593Smuzhiyun #endif
398*4882a593Smuzhiyun #define timer_pending(t) timer_pending(&((t)->timer))
399*4882a593Smuzhiyun #define add_timer(t) add_timer(&((t)->timer))
400*4882a593Smuzhiyun #define mod_timer(t, j) mod_timer(&((t)->timer), j)
401*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
404*4882a593Smuzhiyun #define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b)
405*4882a593Smuzhiyun #else
406*4882a593Smuzhiyun #define rtc_time_to_tm(a, b) rtc_time_to_tm(a, b)
407*4882a593Smuzhiyun #endif /* LINUX_VER >= 3.19.0 */
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
410*4882a593Smuzhiyun #define time_to_tm(a, b, c) time64_to_tm(a, b, c)
411*4882a593Smuzhiyun #else
412*4882a593Smuzhiyun #define time_to_tm(a, b, c) time_to_tm(a, b, c)
413*4882a593Smuzhiyun #endif /* LINUX_VER >= 4.20.0 */
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun #define dev_kfree_skb_any(a)		dev_kfree_skb(a)
418*4882a593Smuzhiyun #define netif_down(dev)			do { (dev)->start = 0; } while (0)
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /* pcmcia-cs provides its own netdevice compatibility layer */
421*4882a593Smuzhiyun #ifndef _COMPAT_NETDEVICE_H
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun  * SoftNet
425*4882a593Smuzhiyun  *
426*4882a593Smuzhiyun  * For pre-softnet kernels we need to tell the upper layer not to
427*4882a593Smuzhiyun  * re-enter start_xmit() while we are in there. However softnet
428*4882a593Smuzhiyun  * guarantees not to enter while we are in there so there is no need
429*4882a593Smuzhiyun  * to do the netif_stop_queue() dance unless the transmit queue really
430*4882a593Smuzhiyun  * gets stuck. This should also improve performance according to tests
431*4882a593Smuzhiyun  * done by Aman Singla.
432*4882a593Smuzhiyun  */
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun #define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
435*4882a593Smuzhiyun #define netif_wake_queue(dev) \
436*4882a593Smuzhiyun 		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
437*4882a593Smuzhiyun #define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
438*4882a593Smuzhiyun 
netif_start_queue(struct net_device * dev)439*4882a593Smuzhiyun static inline void netif_start_queue(struct net_device *dev)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	dev->tbusy = 0;
442*4882a593Smuzhiyun 	dev->interrupt = 0;
443*4882a593Smuzhiyun 	dev->start = 1;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun #define netif_queue_stopped(dev)	(dev)->tbusy
447*4882a593Smuzhiyun #define netif_running(dev)		(dev)->start
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun #endif /* _COMPAT_NETDEVICE_H */
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun #define netif_device_attach(dev)	netif_start_queue(dev)
452*4882a593Smuzhiyun #define netif_device_detach(dev)	netif_stop_queue(dev)
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /* 2.4.x renamed bottom halves to tasklets */
455*4882a593Smuzhiyun #define tasklet_struct				tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)456*4882a593Smuzhiyun static inline void tasklet_schedule(struct tasklet_struct *tasklet)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	queue_task(tasklet, &tq_immediate);
459*4882a593Smuzhiyun 	mark_bh(IMMEDIATE_BH);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)462*4882a593Smuzhiyun static inline void tasklet_init(struct tasklet_struct *tasklet,
463*4882a593Smuzhiyun                                 void (*func)(unsigned long),
464*4882a593Smuzhiyun                                 unsigned long data)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun 	tasklet->next = NULL;
467*4882a593Smuzhiyun 	tasklet->sync = 0;
468*4882a593Smuzhiyun 	tasklet->routine = (void (*)(void *))func;
469*4882a593Smuzhiyun 	tasklet->data = (void *)data;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun #define tasklet_kill(tasklet)	{ do {} while (0); }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /* 2.4.x introduced del_timer_sync() */
474*4882a593Smuzhiyun #define del_timer_sync(timer) del_timer(timer)
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #else
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun #define netif_down(dev)
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun #endif /* SoftNet */
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun  * Emit code to initialise a tq_struct's routine and data pointers
486*4882a593Smuzhiyun  */
487*4882a593Smuzhiyun #define PREPARE_TQUEUE(_tq, _routine, _data)			\
488*4882a593Smuzhiyun 	do {							\
489*4882a593Smuzhiyun 		(_tq)->routine = _routine;			\
490*4882a593Smuzhiyun 		(_tq)->data = _data;				\
491*4882a593Smuzhiyun 	} while (0)
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun  * Emit code to initialise all of a tq_struct
495*4882a593Smuzhiyun  */
496*4882a593Smuzhiyun #define INIT_TQUEUE(_tq, _routine, _data)			\
497*4882a593Smuzhiyun 	do {							\
498*4882a593Smuzhiyun 		INIT_LIST_HEAD(&(_tq)->list);			\
499*4882a593Smuzhiyun 		(_tq)->sync = 0;				\
500*4882a593Smuzhiyun 		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
501*4882a593Smuzhiyun 	} while (0)
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun /* Power management related macro & routines */
506*4882a593Smuzhiyun #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
507*4882a593Smuzhiyun #define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
508*4882a593Smuzhiyun #define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
509*4882a593Smuzhiyun #else
510*4882a593Smuzhiyun #define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
511*4882a593Smuzhiyun #define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
512*4882a593Smuzhiyun #endif
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
515*4882a593Smuzhiyun static inline int
pci_save_state(struct pci_dev * dev,u32 * buffer)516*4882a593Smuzhiyun pci_save_state(struct pci_dev *dev, u32 *buffer)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	int i;
519*4882a593Smuzhiyun 	if (buffer) {
520*4882a593Smuzhiyun 		/* 100% dword access ok here? */
521*4882a593Smuzhiyun 		for (i = 0; i < 16; i++)
522*4882a593Smuzhiyun 			pci_read_config_dword(dev, i * 4, &buffer[i]);
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 	return 0;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun static inline int
pci_restore_state(struct pci_dev * dev,u32 * buffer)528*4882a593Smuzhiyun pci_restore_state(struct pci_dev *dev, u32 *buffer)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	int i;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (buffer) {
533*4882a593Smuzhiyun 		for (i = 0; i < 16; i++)
534*4882a593Smuzhiyun 			pci_write_config_dword(dev, i * 4, buffer[i]);
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 	/*
537*4882a593Smuzhiyun 	 * otherwise, write the context information we know from bootup.
538*4882a593Smuzhiyun 	 * This works around a problem where warm-booting from Windows
539*4882a593Smuzhiyun 	 * combined with a D3(hot)->D0 transition causes PCI config
540*4882a593Smuzhiyun 	 * header data to be forgotten.
541*4882a593Smuzhiyun 	 */
542*4882a593Smuzhiyun 	else {
543*4882a593Smuzhiyun 		for (i = 0; i < 6; i ++)
544*4882a593Smuzhiyun 			pci_write_config_dword(dev,
545*4882a593Smuzhiyun 			                       PCI_BASE_ADDRESS_0 + (i * 4),
546*4882a593Smuzhiyun 			                       pci_resource_start(dev, i));
547*4882a593Smuzhiyun 		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 	return 0;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun #endif /* PCI power management */
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun /* Old cp0 access macros deprecated in 2.4.19 */
554*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
555*4882a593Smuzhiyun #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
556*4882a593Smuzhiyun #endif
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /* Module refcount handled internally in 2.6.x */
559*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
560*4882a593Smuzhiyun #ifndef SET_MODULE_OWNER
561*4882a593Smuzhiyun #define SET_MODULE_OWNER(dev)		do {} while (0)
562*4882a593Smuzhiyun #define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
563*4882a593Smuzhiyun #define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
564*4882a593Smuzhiyun #else
565*4882a593Smuzhiyun #define OLD_MOD_INC_USE_COUNT		do {} while (0)
566*4882a593Smuzhiyun #define OLD_MOD_DEC_USE_COUNT		do {} while (0)
567*4882a593Smuzhiyun #endif
568*4882a593Smuzhiyun #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
569*4882a593Smuzhiyun #ifndef SET_MODULE_OWNER
570*4882a593Smuzhiyun #define SET_MODULE_OWNER(dev)		do {} while (0)
571*4882a593Smuzhiyun #endif
572*4882a593Smuzhiyun #ifndef MOD_INC_USE_COUNT
573*4882a593Smuzhiyun #define MOD_INC_USE_COUNT			do {} while (0)
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun #ifndef MOD_DEC_USE_COUNT
576*4882a593Smuzhiyun #define MOD_DEC_USE_COUNT			do {} while (0)
577*4882a593Smuzhiyun #endif
578*4882a593Smuzhiyun #define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
579*4882a593Smuzhiyun #define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
580*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun #ifndef SET_NETDEV_DEV
583*4882a593Smuzhiyun #define SET_NETDEV_DEV(net, pdev)	do {} while (0)
584*4882a593Smuzhiyun #endif
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
587*4882a593Smuzhiyun #ifndef HAVE_FREE_NETDEV
588*4882a593Smuzhiyun #define free_netdev(dev)		kfree(dev)
589*4882a593Smuzhiyun #endif
590*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
593*4882a593Smuzhiyun /* struct packet_type redefined in 2.6.x */
594*4882a593Smuzhiyun #define af_packet_priv			data
595*4882a593Smuzhiyun #endif
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /* suspend args */
598*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
599*4882a593Smuzhiyun #define DRV_SUSPEND_STATE_TYPE pm_message_t
600*4882a593Smuzhiyun #else
601*4882a593Smuzhiyun #define DRV_SUSPEND_STATE_TYPE uint32
602*4882a593Smuzhiyun #endif
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
605*4882a593Smuzhiyun #define CHECKSUM_HW	CHECKSUM_PARTIAL
606*4882a593Smuzhiyun #endif
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun typedef struct {
609*4882a593Smuzhiyun 	void	*parent;  /* some external entity that the thread supposed to work for */
610*4882a593Smuzhiyun 	char	*proc_name;
611*4882a593Smuzhiyun 	struct	task_struct *p_task;
612*4882a593Smuzhiyun 	long	thr_pid;
613*4882a593Smuzhiyun 	int		prio; /* priority */
614*4882a593Smuzhiyun 	struct	semaphore sema;
615*4882a593Smuzhiyun 	int	terminated;
616*4882a593Smuzhiyun 	struct	completion completed;
617*4882a593Smuzhiyun 	int	flush_ind;
618*4882a593Smuzhiyun 	struct	completion flushed;
619*4882a593Smuzhiyun 	spinlock_t	spinlock;
620*4882a593Smuzhiyun 	int		up_cnt;
621*4882a593Smuzhiyun } tsk_ctl_t;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /* ANDREY: new MACROs to start stop threads(OLD kthread API STYLE) */
624*4882a593Smuzhiyun /* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
625*4882a593Smuzhiyun /* note this macro assumes there may be only one context waiting on thread's completion */
626*4882a593Smuzhiyun #ifdef KERNEL_TIMESTAMP
627*4882a593Smuzhiyun extern char *dhd_log_dump_get_timestamp(void);
628*4882a593Smuzhiyun #ifdef SYSTEM_TIMESTAMP
629*4882a593Smuzhiyun extern char* dhd_dbg_get_system_timestamp(void);
630*4882a593Smuzhiyun #define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp(), dhd_dbg_get_system_timestamp()
631*4882a593Smuzhiyun #define PERCENT_S "[%s][%s]"
632*4882a593Smuzhiyun #else
633*4882a593Smuzhiyun #define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp()
634*4882a593Smuzhiyun #define PERCENT_S "[%s]"
635*4882a593Smuzhiyun #endif
636*4882a593Smuzhiyun #else
637*4882a593Smuzhiyun #define PRINTF_SYSTEM_TIME ""
638*4882a593Smuzhiyun #define PERCENT_S "%s"
639*4882a593Smuzhiyun #endif
640*4882a593Smuzhiyun #ifndef DHD_LOG_PREFIX
641*4882a593Smuzhiyun #define DHD_LOG_PREFIX "[dhd]"
642*4882a593Smuzhiyun #endif
643*4882a593Smuzhiyun #define DHD_LOG_PREFIXS DHD_LOG_PREFIX" "
644*4882a593Smuzhiyun #ifdef DHD_DEBUG
645*4882a593Smuzhiyun #define	printf_thr(fmt, args...)	printk(PERCENT_S DHD_LOG_PREFIXS fmt, PRINTF_SYSTEM_TIME, ## args)
646*4882a593Smuzhiyun #define DBG_THR(args)		do {printf_thr args;} while (0)
647*4882a593Smuzhiyun #else
648*4882a593Smuzhiyun #define DBG_THR(x)
649*4882a593Smuzhiyun #endif
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun extern unsigned long osl_spin_lock(void *lock);
652*4882a593Smuzhiyun extern void osl_spin_unlock(void *lock, unsigned long flags);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun #define TSK_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
655*4882a593Smuzhiyun #define TSK_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
656*4882a593Smuzhiyun 
binary_sema_down(tsk_ctl_t * tsk)657*4882a593Smuzhiyun static inline bool binary_sema_down(tsk_ctl_t *tsk)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	if (down_interruptible(&tsk->sema) == 0) {
660*4882a593Smuzhiyun 		unsigned long flags = 0;
661*4882a593Smuzhiyun 		TSK_LOCK(&tsk->spinlock, flags);
662*4882a593Smuzhiyun 		if (tsk->up_cnt == 1)
663*4882a593Smuzhiyun 			tsk->up_cnt--;
664*4882a593Smuzhiyun 		else {
665*4882a593Smuzhiyun 			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
666*4882a593Smuzhiyun 		}
667*4882a593Smuzhiyun 		TSK_UNLOCK(&tsk->spinlock, flags);
668*4882a593Smuzhiyun 		return false;
669*4882a593Smuzhiyun 	} else
670*4882a593Smuzhiyun 		return true;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
binary_sema_up(tsk_ctl_t * tsk)673*4882a593Smuzhiyun static inline bool binary_sema_up(tsk_ctl_t *tsk)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	bool sem_up = false;
676*4882a593Smuzhiyun 	unsigned long flags = 0;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	TSK_LOCK(&tsk->spinlock, flags);
679*4882a593Smuzhiyun 	if (tsk->up_cnt == 0) {
680*4882a593Smuzhiyun 		tsk->up_cnt++;
681*4882a593Smuzhiyun 		sem_up = true;
682*4882a593Smuzhiyun 	} else if (tsk->up_cnt == 1) {
683*4882a593Smuzhiyun 		/* dhd_sched_dpc: dpc is alread up! */
684*4882a593Smuzhiyun 	} else
685*4882a593Smuzhiyun 		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	TSK_UNLOCK(&tsk->spinlock, flags);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	if (sem_up)
690*4882a593Smuzhiyun 		up(&tsk->sema);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	return sem_up;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
696*4882a593Smuzhiyun #define SMP_RD_BARRIER_DEPENDS(x)
697*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
698*4882a593Smuzhiyun #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
699*4882a593Smuzhiyun #else
700*4882a593Smuzhiyun #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
701*4882a593Smuzhiyun #endif
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
704*4882a593Smuzhiyun { \
705*4882a593Smuzhiyun 	sema_init(&((tsk_ctl)->sema), 0); \
706*4882a593Smuzhiyun 	init_completion(&((tsk_ctl)->completed)); \
707*4882a593Smuzhiyun 	init_completion(&((tsk_ctl)->flushed)); \
708*4882a593Smuzhiyun 	(tsk_ctl)->parent = owner; \
709*4882a593Smuzhiyun 	(tsk_ctl)->proc_name = name;  \
710*4882a593Smuzhiyun 	(tsk_ctl)->terminated = FALSE; \
711*4882a593Smuzhiyun 	(tsk_ctl)->flush_ind = FALSE; \
712*4882a593Smuzhiyun 	(tsk_ctl)->up_cnt = 0; \
713*4882a593Smuzhiyun 	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
714*4882a593Smuzhiyun 	if (IS_ERR((tsk_ctl)->p_task)) { \
715*4882a593Smuzhiyun 		(tsk_ctl)->thr_pid = -1; \
716*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
717*4882a593Smuzhiyun 			(tsk_ctl)->proc_name)); \
718*4882a593Smuzhiyun 	} else { \
719*4882a593Smuzhiyun 		(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
720*4882a593Smuzhiyun 		spin_lock_init(&((tsk_ctl)->spinlock)); \
721*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
722*4882a593Smuzhiyun 			(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
723*4882a593Smuzhiyun 	}; \
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun #define PROC_WAIT_TIMEOUT_MSEC	5000 /* 5 seconds */
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun #define PROC_STOP(tsk_ctl) \
729*4882a593Smuzhiyun { \
730*4882a593Smuzhiyun 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
731*4882a593Smuzhiyun 	(tsk_ctl)->terminated = TRUE; \
732*4882a593Smuzhiyun 	smp_wmb(); \
733*4882a593Smuzhiyun 	up(&((tsk_ctl)->sema));	\
734*4882a593Smuzhiyun 	DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
735*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
736*4882a593Smuzhiyun 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
737*4882a593Smuzhiyun 	if (timeout == 0) \
738*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
739*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
740*4882a593Smuzhiyun 	else \
741*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
742*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
743*4882a593Smuzhiyun 	(tsk_ctl)->parent = NULL; \
744*4882a593Smuzhiyun 	(tsk_ctl)->proc_name = NULL;  \
745*4882a593Smuzhiyun 	(tsk_ctl)->thr_pid = -1; \
746*4882a593Smuzhiyun 	(tsk_ctl)->up_cnt = 0; \
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
750*4882a593Smuzhiyun { \
751*4882a593Smuzhiyun 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
752*4882a593Smuzhiyun 	(tsk_ctl)->terminated = TRUE; \
753*4882a593Smuzhiyun 	smp_wmb(); \
754*4882a593Smuzhiyun 	binary_sema_up(tsk_ctl);	\
755*4882a593Smuzhiyun 	DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
756*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
757*4882a593Smuzhiyun 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
758*4882a593Smuzhiyun 	if (timeout == 0) \
759*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
760*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
761*4882a593Smuzhiyun 	else \
762*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
763*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
764*4882a593Smuzhiyun 	(tsk_ctl)->parent = NULL; \
765*4882a593Smuzhiyun 	(tsk_ctl)->proc_name = NULL;  \
766*4882a593Smuzhiyun 	(tsk_ctl)->thr_pid = -1; \
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * Flush is non-rentrant, so callers must make sure
771*4882a593Smuzhiyun * there is no race condition.
772*4882a593Smuzhiyun * For safer exit, added wait_for_completion_timeout
773*4882a593Smuzhiyun * with 1 sec timeout.
774*4882a593Smuzhiyun */
775*4882a593Smuzhiyun #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
776*4882a593Smuzhiyun { \
777*4882a593Smuzhiyun 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
778*4882a593Smuzhiyun 	(tsk_ctl)->flush_ind = TRUE; \
779*4882a593Smuzhiyun 	smp_wmb(); \
780*4882a593Smuzhiyun 	binary_sema_up(tsk_ctl);	\
781*4882a593Smuzhiyun 	DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
782*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
783*4882a593Smuzhiyun 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
784*4882a593Smuzhiyun 	if (timeout == 0) \
785*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
786*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
787*4882a593Smuzhiyun 	else \
788*4882a593Smuzhiyun 		DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
789*4882a593Smuzhiyun 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /*  ----------------------- */
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
795*4882a593Smuzhiyun #define KILL_PROC(nr, sig) \
796*4882a593Smuzhiyun { \
797*4882a593Smuzhiyun struct task_struct *tsk; \
798*4882a593Smuzhiyun struct pid *pid;    \
799*4882a593Smuzhiyun pid = find_get_pid((pid_t)nr);    \
800*4882a593Smuzhiyun tsk = pid_task(pid, PIDTYPE_PID);    \
801*4882a593Smuzhiyun if (tsk) send_sig(sig, tsk, 1); \
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun #else
804*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
805*4882a593Smuzhiyun 	KERNEL_VERSION(2, 6, 30))
806*4882a593Smuzhiyun #define KILL_PROC(pid, sig) \
807*4882a593Smuzhiyun { \
808*4882a593Smuzhiyun 	struct task_struct *tsk; \
809*4882a593Smuzhiyun 	tsk = find_task_by_vpid(pid); \
810*4882a593Smuzhiyun 	if (tsk) send_sig(sig, tsk, 1); \
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun #else
813*4882a593Smuzhiyun #define KILL_PROC(pid, sig) \
814*4882a593Smuzhiyun { \
815*4882a593Smuzhiyun 	kill_proc(pid, sig, 1); \
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun #endif
818*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
821*4882a593Smuzhiyun #include <linux/time.h>
822*4882a593Smuzhiyun #include <linux/wait.h>
823*4882a593Smuzhiyun #else
824*4882a593Smuzhiyun #include <linux/sched.h>
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun #define __wait_event_interruptible_timeout(wq, condition, ret)		\
827*4882a593Smuzhiyun do {									\
828*4882a593Smuzhiyun 	wait_queue_t __wait;						\
829*4882a593Smuzhiyun 	init_waitqueue_entry(&__wait, current);				\
830*4882a593Smuzhiyun 									\
831*4882a593Smuzhiyun 	add_wait_queue(&wq, &__wait);					\
832*4882a593Smuzhiyun 	for (;;) {							\
833*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);			\
834*4882a593Smuzhiyun 		if (condition)						\
835*4882a593Smuzhiyun 			break;						\
836*4882a593Smuzhiyun 		if (!signal_pending(current)) {				\
837*4882a593Smuzhiyun 			ret = schedule_timeout(ret);			\
838*4882a593Smuzhiyun 			if (!ret)					\
839*4882a593Smuzhiyun 				break;					\
840*4882a593Smuzhiyun 			continue;					\
841*4882a593Smuzhiyun 		}							\
842*4882a593Smuzhiyun 		ret = -ERESTARTSYS;					\
843*4882a593Smuzhiyun 		break;							\
844*4882a593Smuzhiyun 	}								\
845*4882a593Smuzhiyun 	current->state = TASK_RUNNING;					\
846*4882a593Smuzhiyun 	remove_wait_queue(&wq, &__wait);				\
847*4882a593Smuzhiyun } while (0)
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun #define wait_event_interruptible_timeout(wq, condition, timeout)	\
850*4882a593Smuzhiyun ({									\
851*4882a593Smuzhiyun 	long __ret = timeout;						\
852*4882a593Smuzhiyun 	if (!(condition))						\
853*4882a593Smuzhiyun 		__wait_event_interruptible_timeout(wq, condition, __ret); \
854*4882a593Smuzhiyun 	__ret;								\
855*4882a593Smuzhiyun })
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun For < 2.6.24, wl creates its own netdev but doesn't
861*4882a593Smuzhiyun align the priv area like the genuine alloc_netdev().
862*4882a593Smuzhiyun Since netdev_priv() always gives us the aligned address, it will
863*4882a593Smuzhiyun not match our unaligned address for < 2.6.24
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
866*4882a593Smuzhiyun #define DEV_PRIV(dev)	(dev->priv)
867*4882a593Smuzhiyun #else
868*4882a593Smuzhiyun #define DEV_PRIV(dev)	netdev_priv(dev)
869*4882a593Smuzhiyun #endif
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
872*4882a593Smuzhiyun #define WL_ISR(i, d, p)         wl_isr((i), (d))
873*4882a593Smuzhiyun #else
874*4882a593Smuzhiyun #define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
875*4882a593Smuzhiyun #endif  /* < 2.6.20 */
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
878*4882a593Smuzhiyun #define netdev_priv(dev) dev->priv
879*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
882*4882a593Smuzhiyun #define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
883*4882a593Smuzhiyun #else
884*4882a593Smuzhiyun #define CAN_SLEEP()	(FALSE)
885*4882a593Smuzhiyun #endif
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 170))
890*4882a593Smuzhiyun #define RANDOM32	get_random_u32
891*4882a593Smuzhiyun #define RANDOM_BYTES    get_random_bytes
892*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
893*4882a593Smuzhiyun #define RANDOM32	prandom_u32
894*4882a593Smuzhiyun #define RANDOM_BYTES    prandom_bytes
895*4882a593Smuzhiyun #else
896*4882a593Smuzhiyun #define RANDOM32	random32
897*4882a593Smuzhiyun #define RANDOM_BYTES    get_random_bytes
898*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
901*4882a593Smuzhiyun #define SRANDOM32(entropy)	prandom_seed(entropy)
902*4882a593Smuzhiyun #else
903*4882a593Smuzhiyun #define SRANDOM32(entropy)	srandom32(entropy)
904*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /*
907*4882a593Smuzhiyun  * Overide latest kfifo functions with
908*4882a593Smuzhiyun  * older version to work on older kernels
909*4882a593Smuzhiyun  */
910*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
911*4882a593Smuzhiyun #define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
912*4882a593Smuzhiyun #define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
913*4882a593Smuzhiyun #define kfifo_esize(a)				1
914*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
915*4882a593Smuzhiyun 	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
916*4882a593Smuzhiyun #define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
917*4882a593Smuzhiyun #define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
918*4882a593Smuzhiyun #define kfifo_esize(a)				1
919*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
922*4882a593Smuzhiyun #pragma GCC diagnostic pop
923*4882a593Smuzhiyun #endif
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
926*4882a593Smuzhiyun #include <linux/fs.h>
file_inode(const struct file * f)927*4882a593Smuzhiyun static inline struct inode *file_inode(const struct file *f)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	return f->f_dentry->d_inode;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
934*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
937*4882a593Smuzhiyun #define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
938*4882a593Smuzhiyun #define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
939*4882a593Smuzhiyun int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
940*4882a593Smuzhiyun #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
941*4882a593Smuzhiyun #define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
942*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
945*4882a593Smuzhiyun #define netdev_tx_t int
946*4882a593Smuzhiyun #endif
947*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
948*4882a593Smuzhiyun #define complete_and_exit(a, b) kthread_complete_and_exit(a, b)
949*4882a593Smuzhiyun #else
950*4882a593Smuzhiyun #define	dev_addr_set(net, addr) memcpy(net->dev_addr, addr, ETHER_ADDR_LEN)
951*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 17, 0) */
952*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
953*4882a593Smuzhiyun #define netif_rx_ni(skb) netif_rx(skb)
954*4882a593Smuzhiyun #define pci_free_consistent(a, b, c, d) dma_free_coherent(&((struct pci_dev *)a)->dev, b, c, d)
955*4882a593Smuzhiyun #define pci_map_single(a, b, c, d) dma_map_single(&((struct pci_dev *)a)->dev, b, c, d)
956*4882a593Smuzhiyun #define pci_unmap_single(a, b, c, d) dma_unmap_single(&((struct pci_dev *)a)->dev, b, c, d)
957*4882a593Smuzhiyun #define pci_dma_mapping_error(a, b) dma_mapping_error(&((struct pci_dev *)a)->dev, b)
958*4882a593Smuzhiyun #ifndef PCI_DMA_TODEVICE
959*4882a593Smuzhiyun #define	PCI_DMA_TODEVICE	1
960*4882a593Smuzhiyun #define	PCI_DMA_FROMDEVICE	2
961*4882a593Smuzhiyun #endif
962*4882a593Smuzhiyun #endif
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun #ifdef ANDROID_BKPORT
965*4882a593Smuzhiyun #if (ANDROID_VERSION >= 13) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 41))
966*4882a593Smuzhiyun #define ANDROID13_KERNEL515_BKPORT
967*4882a593Smuzhiyun #define CFG80211_BKPORT_MLO
968*4882a593Smuzhiyun #endif /* ANDROID_VERSION >= 13 && KERNEL >= 5.15.41 */
969*4882a593Smuzhiyun #endif /* ANDROID_BKPORT */
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun #endif /* _linuxver_h_ */
972