xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/include/linuxver.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Linux-specific abstractions to gain some independence from linux kernel versions.
3  * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Dual:>>
23  */
24 
25 #ifndef _linuxver_h_
26 #define _linuxver_h_
27 
28 /*
29  * The below pragmas are added as workaround for errors caused by update
30  * of gcc version to 4.8.2. GCC 4.6 adds -Wunused-but-set-variable and
31  * -Wunused-but-set-parameter to -Wall, for some configurations those
32  * warnings are produced in linux kernel. So for now the below pragmas
33  * disable the offending warnings. Permanent solution is to use -isystem
34  * but there is a performance problem with this change on RHEL5 servers
35  *
36  */
37 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
38 #pragma GCC diagnostic push
39 #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
40 #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
41 #endif
42 
43 #include <typedefs.h>
44 #include <linux/version.h>
45 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
46 #include <linux/config.h>
47 #else
48 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
49 #include <generated/autoconf.h>
50 #else
51 #include <linux/autoconf.h>
52 #endif
53 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
54 
55 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
56 #include <linux/kconfig.h>
57 #endif
58 #include <linux/module.h>
59 
60 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
61 /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
62 #ifdef __UNDEF_NO_VERSION__
63 #undef __NO_VERSION__
64 #else
65 #define __NO_VERSION__
66 #endif
67 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
68 
69 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
70 #define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
71 #define module_param_string(_name_, _string_, _size_, _perm_) \
72 		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
73 #endif
74 
75 /* linux/malloc.h is deprecated, use linux/slab.h instead. */
76 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
77 #include <linux/malloc.h>
78 #else
79 #include <linux/slab.h>
80 #endif
81 
82 #include <linux/types.h>
83 #include <linux/init.h>
84 #include <linux/mm.h>
85 #include <linux/string.h>
86 #include <linux/pci.h>
87 #include <linux/interrupt.h>
88 #include <linux/kthread.h>
89 #include <linux/netdevice.h>
90 #include <linux/time.h>
91 #include <linux/rtc.h>
92 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
93 #include <linux/semaphore.h>
94 #else
95 #include <asm/semaphore.h>
96 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
97 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
98 #undef IP_TOS
99 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
100 #include <asm/io.h>
101 
102 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
103 #include <linux/workqueue.h>
104 #else
105 #include <linux/tqueue.h>
106 #ifndef work_struct
107 #define work_struct tq_struct
108 #endif
109 #ifndef INIT_WORK
110 #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
111 #endif
112 #ifndef schedule_work
113 #define schedule_work(_work) schedule_task((_work))
114 #endif
115 #ifndef flush_scheduled_work
116 #define flush_scheduled_work() flush_scheduled_tasks()
117 #endif
118 #endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
119 
120 /*
121  * TODO:
122  * daemonize() API is deprecated from kernel-3.8 onwards. More debugging
123  *      has to be done whether this can cause any issue in case, if driver is
124  *      loaded as a module from userspace.
125  */
126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
127 #define DAEMONIZE(a)	do { \
128 		allow_signal(SIGKILL);	\
129 		allow_signal(SIGTERM);	\
130 	} while (0)
131 #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
132 	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
133 #define DAEMONIZE(a) daemonize(a); \
134 	allow_signal(SIGKILL); \
135 	allow_signal(SIGTERM);
136 #else /* Linux 2.4 (w/o preemption patch) */
137 #define RAISE_RX_SOFTIRQ() \
138 	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
139 #define DAEMONIZE(a) daemonize(); \
140 	do { if (a) \
141 		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
142 	} while (0);
143 #endif /* LINUX_VERSION_CODE  */
144 
145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
146 #define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
147 #else
148 #define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
149 #if (!(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
150 /* Exclude RHEL 5 */
151 typedef void (*work_func_t)(void *work);
152 #endif
153 #endif	/* >= 2.6.20 */
154 
155 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
156 /* Some distributions have their own 2.6.x compatibility layers */
157 #ifndef IRQ_NONE
158 typedef void irqreturn_t;
159 #define IRQ_NONE
160 #define IRQ_HANDLED
161 #define IRQ_RETVAL(x)
162 #endif
163 #else
164 typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
165 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
166 
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
168 #define IRQF_SHARED	SA_SHIRQ
169 #endif /* < 2.6.18 */
170 
171 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
172 #ifdef	CONFIG_NET_RADIO
173 #endif
174 #endif	/* < 2.6.17 */
175 
176 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
177 #define MOD_INC_USE_COUNT
178 #define MOD_DEC_USE_COUNT
179 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
180 
181 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
182 #include <linux/sched.h>
183 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
184 
185 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
186 #include <linux/sched/rt.h>
187 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
189 #include <uapi/linux/sched/types.h>
190 #endif /* LINUX_VERS >= 4.11.0 */
191 
192 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
193 #include <net/lib80211.h>
194 #endif
195 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
196 #include <linux/ieee80211.h>
197 #else
198 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
199 #include <net/ieee80211.h>
200 #endif
201 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
202 
203 #ifndef __exit
204 #define __exit
205 #endif
206 #ifndef __devexit
207 #define __devexit
208 #endif
209 #ifndef __devinit
210 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
211 	#define __devinit	__init
212 #else
213 /* All devices are hotpluggable since linux 3.8.0 */
214 	#define __devinit
215 #endif
216 #endif /* !__devinit */
217 #ifndef __devinitdata
218 #define __devinitdata
219 #endif
220 #ifndef __devexit_p
221 #define __devexit_p(x)	x
222 #endif
223 
224 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
225 
226 #define pci_get_drvdata(dev)		(dev)->sysdata
227 #define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
228 
229 /*
230  * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
231  */
232 
233 struct pci_device_id {
234 	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
235 	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
236 	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
237 	unsigned long driver_data;		/* Data private to the driver */
238 };
239 
240 struct pci_driver {
241 	struct list_head node;
242 	char *name;
243 	const struct pci_device_id *id_table;	/* NULL if wants all devices */
244 	int (*probe)(struct pci_dev *dev,
245 	             const struct pci_device_id *id); /* New device inserted */
246 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
247 						 * capable driver)
248 						 */
249 	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
250 	void (*resume)(struct pci_dev *dev);	/* Device woken up */
251 };
252 
253 #define MODULE_DEVICE_TABLE(type, name)
254 #define PCI_ANY_ID (~0)
255 
256 /* compatpci.c */
257 #define pci_module_init pci_register_driver
258 extern int pci_register_driver(struct pci_driver *drv);
259 extern void pci_unregister_driver(struct pci_driver *drv);
260 
261 #endif /* PCI registration */
262 
263 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
264 #define pci_module_init pci_register_driver
265 #endif
266 
267 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
268 #ifdef MODULE
269 #define module_init(x) int init_module(void) { return x(); }
270 #define module_exit(x) void cleanup_module(void) { x(); }
271 #else
272 #define module_init(x)	__initcall(x);
273 #define module_exit(x)	__exitcall(x);
274 #endif
275 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
276 
277 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
278 #define WL_USE_NETDEV_OPS
279 #else
280 #undef WL_USE_NETDEV_OPS
281 #endif
282 
283 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
284 #define WL_CONFIG_RFKILL
285 #else
286 #undef WL_CONFIG_RFKILL
287 #endif
288 
289 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
290 #define list_for_each(pos, head) \
291 	for (pos = (head)->next; pos != (head); pos = pos->next)
292 #endif
293 
294 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
295 #define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
296 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
297 #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
298 #endif
299 
300 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
301 #define pci_enable_device(dev) do { } while (0)
302 #endif
303 
304 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
305 #define net_device device
306 #endif
307 
308 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
309 
310 /*
311  * DMA mapping
312  *
313  * See linux/Documentation/DMA-mapping.txt
314  */
315 
316 #ifndef PCI_DMA_TODEVICE
317 #define	PCI_DMA_TODEVICE	1
318 #define	PCI_DMA_FROMDEVICE	2
319 #endif
320 
321 typedef u32 dma_addr_t;
322 
323 /* Pure 2^n version of get_order */
get_order(unsigned long size)324 static inline int get_order(unsigned long size)
325 {
326 	int order;
327 
328 	size = (size-1) >> (PAGE_SHIFT-1);
329 	order = -1;
330 	do {
331 		size >>= 1;
332 		order++;
333 	} while (size);
334 	return order;
335 }
336 
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)337 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
338                                          dma_addr_t *dma_handle)
339 {
340 	void *ret;
341 	int gfp = GFP_ATOMIC | GFP_DMA;
342 
343 	ret = (void *)__get_free_pages(gfp, get_order(size));
344 
345 	if (ret != NULL) {
346 		bzero(ret, size);
347 		*dma_handle = virt_to_bus(ret);
348 	}
349 	return ret;
350 }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)351 static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
352                                        void *vaddr, dma_addr_t dma_handle)
353 {
354 	free_pages((unsigned long)vaddr, get_order(size));
355 }
356 #ifdef ILSIM
357 extern uint pci_map_single(void *dev, void *va, uint size, int direction);
358 extern void pci_unmap_single(void *dev, uint pa, uint size, int direction);
359 #else
360 #define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
361 #define pci_unmap_single(cookie, address, size, dir)
362 #endif
363 
364 #endif /* DMA mapping */
365 
366 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
367 
368 typedef struct timer_list timer_list_compat_t;
369 
370 #define init_timer_compat(timer_compat, cb, priv) \
371 	init_timer(timer_compat); \
372 	(timer_compat)->data = (ulong)priv; \
373 	(timer_compat)->function = cb
374 #define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
375 #define timer_expires(timer_compat) (timer_compat)->expires
376 
377 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
378 
379 typedef struct timer_list_compat {
380 	struct timer_list timer;
381 	void *arg;
382 	void (*callback)(ulong arg);
383 } timer_list_compat_t;
384 
385 extern void timer_cb_compat(struct timer_list *tl);
386 
387 #define init_timer_compat(timer_compat, cb, priv) \
388 	(timer_compat)->arg = priv; \
389 	(timer_compat)->callback = cb; \
390 	timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
391 #define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
392 #define timer_expires(timer_compat) (timer_compat)->timer.expires
393 
394 #define del_timer(t) del_timer(&((t)->timer))
395 #ifndef del_timer_sync
396 #define del_timer_sync(t) del_timer_sync(&((t)->timer))
397 #endif
398 #define timer_pending(t) timer_pending(&((t)->timer))
399 #define add_timer(t) add_timer(&((t)->timer))
400 #define mod_timer(t, j) mod_timer(&((t)->timer), j)
401 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
402 
403 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
404 #define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b)
405 #else
406 #define rtc_time_to_tm(a, b) rtc_time_to_tm(a, b)
407 #endif /* LINUX_VER >= 3.19.0 */
408 
409 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
410 #define time_to_tm(a, b, c) time64_to_tm(a, b, c)
411 #else
412 #define time_to_tm(a, b, c) time_to_tm(a, b, c)
413 #endif /* LINUX_VER >= 4.20.0 */
414 
415 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
416 
417 #define dev_kfree_skb_any(a)		dev_kfree_skb(a)
418 #define netif_down(dev)			do { (dev)->start = 0; } while (0)
419 
420 /* pcmcia-cs provides its own netdevice compatibility layer */
421 #ifndef _COMPAT_NETDEVICE_H
422 
423 /*
424  * SoftNet
425  *
426  * For pre-softnet kernels we need to tell the upper layer not to
427  * re-enter start_xmit() while we are in there. However softnet
428  * guarantees not to enter while we are in there so there is no need
429  * to do the netif_stop_queue() dance unless the transmit queue really
430  * gets stuck. This should also improve performance according to tests
431  * done by Aman Singla.
432  */
433 
434 #define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
435 #define netif_wake_queue(dev) \
436 		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
437 #define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
438 
netif_start_queue(struct net_device * dev)439 static inline void netif_start_queue(struct net_device *dev)
440 {
441 	dev->tbusy = 0;
442 	dev->interrupt = 0;
443 	dev->start = 1;
444 }
445 
446 #define netif_queue_stopped(dev)	(dev)->tbusy
447 #define netif_running(dev)		(dev)->start
448 
449 #endif /* _COMPAT_NETDEVICE_H */
450 
451 #define netif_device_attach(dev)	netif_start_queue(dev)
452 #define netif_device_detach(dev)	netif_stop_queue(dev)
453 
454 /* 2.4.x renamed bottom halves to tasklets */
455 #define tasklet_struct				tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)456 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
457 {
458 	queue_task(tasklet, &tq_immediate);
459 	mark_bh(IMMEDIATE_BH);
460 }
461 
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)462 static inline void tasklet_init(struct tasklet_struct *tasklet,
463                                 void (*func)(unsigned long),
464                                 unsigned long data)
465 {
466 	tasklet->next = NULL;
467 	tasklet->sync = 0;
468 	tasklet->routine = (void (*)(void *))func;
469 	tasklet->data = (void *)data;
470 }
471 #define tasklet_kill(tasklet)	{ do {} while (0); }
472 
473 /* 2.4.x introduced del_timer_sync() */
474 #define del_timer_sync(timer) del_timer(timer)
475 
476 #else
477 
478 #define netif_down(dev)
479 
480 #endif /* SoftNet */
481 
482 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
483 
484 /*
485  * Emit code to initialise a tq_struct's routine and data pointers
486  */
487 #define PREPARE_TQUEUE(_tq, _routine, _data)			\
488 	do {							\
489 		(_tq)->routine = _routine;			\
490 		(_tq)->data = _data;				\
491 	} while (0)
492 
493 /*
494  * Emit code to initialise all of a tq_struct
495  */
496 #define INIT_TQUEUE(_tq, _routine, _data)			\
497 	do {							\
498 		INIT_LIST_HEAD(&(_tq)->list);			\
499 		(_tq)->sync = 0;				\
500 		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
501 	} while (0)
502 
503 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
504 
505 /* Power management related macro & routines */
506 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
507 #define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
508 #define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
509 #else
510 #define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
511 #define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
512 #endif
513 
514 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
515 static inline int
pci_save_state(struct pci_dev * dev,u32 * buffer)516 pci_save_state(struct pci_dev *dev, u32 *buffer)
517 {
518 	int i;
519 	if (buffer) {
520 		/* 100% dword access ok here? */
521 		for (i = 0; i < 16; i++)
522 			pci_read_config_dword(dev, i * 4, &buffer[i]);
523 	}
524 	return 0;
525 }
526 
527 static inline int
pci_restore_state(struct pci_dev * dev,u32 * buffer)528 pci_restore_state(struct pci_dev *dev, u32 *buffer)
529 {
530 	int i;
531 
532 	if (buffer) {
533 		for (i = 0; i < 16; i++)
534 			pci_write_config_dword(dev, i * 4, buffer[i]);
535 	}
536 	/*
537 	 * otherwise, write the context information we know from bootup.
538 	 * This works around a problem where warm-booting from Windows
539 	 * combined with a D3(hot)->D0 transition causes PCI config
540 	 * header data to be forgotten.
541 	 */
542 	else {
543 		for (i = 0; i < 6; i ++)
544 			pci_write_config_dword(dev,
545 			                       PCI_BASE_ADDRESS_0 + (i * 4),
546 			                       pci_resource_start(dev, i));
547 		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
548 	}
549 	return 0;
550 }
551 #endif /* PCI power management */
552 
553 /* Old cp0 access macros deprecated in 2.4.19 */
554 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
555 #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
556 #endif
557 
558 /* Module refcount handled internally in 2.6.x */
559 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
560 #ifndef SET_MODULE_OWNER
561 #define SET_MODULE_OWNER(dev)		do {} while (0)
562 #define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
563 #define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
564 #else
565 #define OLD_MOD_INC_USE_COUNT		do {} while (0)
566 #define OLD_MOD_DEC_USE_COUNT		do {} while (0)
567 #endif
568 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
569 #ifndef SET_MODULE_OWNER
570 #define SET_MODULE_OWNER(dev)		do {} while (0)
571 #endif
572 #ifndef MOD_INC_USE_COUNT
573 #define MOD_INC_USE_COUNT			do {} while (0)
574 #endif
575 #ifndef MOD_DEC_USE_COUNT
576 #define MOD_DEC_USE_COUNT			do {} while (0)
577 #endif
578 #define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
579 #define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
580 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
581 
582 #ifndef SET_NETDEV_DEV
583 #define SET_NETDEV_DEV(net, pdev)	do {} while (0)
584 #endif
585 
586 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
587 #ifndef HAVE_FREE_NETDEV
588 #define free_netdev(dev)		kfree(dev)
589 #endif
590 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
591 
592 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
593 /* struct packet_type redefined in 2.6.x */
594 #define af_packet_priv			data
595 #endif
596 
597 /* suspend args */
598 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
599 #define DRV_SUSPEND_STATE_TYPE pm_message_t
600 #else
601 #define DRV_SUSPEND_STATE_TYPE uint32
602 #endif
603 
604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
605 #define CHECKSUM_HW	CHECKSUM_PARTIAL
606 #endif
607 
608 typedef struct {
609 	void	*parent;  /* some external entity that the thread supposed to work for */
610 	char	*proc_name;
611 	struct	task_struct *p_task;
612 	long	thr_pid;
613 	int		prio; /* priority */
614 	struct	semaphore sema;
615 	int	terminated;
616 	struct	completion completed;
617 	int	flush_ind;
618 	struct	completion flushed;
619 	spinlock_t	spinlock;
620 	int		up_cnt;
621 } tsk_ctl_t;
622 
623 /* ANDREY: new MACROs to start stop threads(OLD kthread API STYLE) */
624 /* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
625 /* note this macro assumes there may be only one context waiting on thread's completion */
626 #ifdef KERNEL_TIMESTAMP
627 extern char *dhd_log_dump_get_timestamp(void);
628 #ifdef SYSTEM_TIMESTAMP
629 extern char* dhd_dbg_get_system_timestamp(void);
630 #define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp(), dhd_dbg_get_system_timestamp()
631 #define PERCENT_S "[%s][%s]"
632 #else
633 #define PRINTF_SYSTEM_TIME dhd_log_dump_get_timestamp()
634 #define PERCENT_S "[%s]"
635 #endif
636 #else
637 #define PRINTF_SYSTEM_TIME ""
638 #define PERCENT_S "%s"
639 #endif
640 #ifndef DHD_LOG_PREFIX
641 #define DHD_LOG_PREFIX "[dhd]"
642 #endif
643 #define DHD_LOG_PREFIXS DHD_LOG_PREFIX" "
644 #ifdef DHD_DEBUG
645 #define	printf_thr(fmt, args...)	printk(PERCENT_S DHD_LOG_PREFIXS fmt, PRINTF_SYSTEM_TIME, ## args)
646 #define DBG_THR(args)		do {printf_thr args;} while (0)
647 #else
648 #define DBG_THR(x)
649 #endif
650 
651 extern unsigned long osl_spin_lock(void *lock);
652 extern void osl_spin_unlock(void *lock, unsigned long flags);
653 
654 #define TSK_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
655 #define TSK_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
656 
binary_sema_down(tsk_ctl_t * tsk)657 static inline bool binary_sema_down(tsk_ctl_t *tsk)
658 {
659 	if (down_interruptible(&tsk->sema) == 0) {
660 		unsigned long flags = 0;
661 		TSK_LOCK(&tsk->spinlock, flags);
662 		if (tsk->up_cnt == 1)
663 			tsk->up_cnt--;
664 		else {
665 			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
666 		}
667 		TSK_UNLOCK(&tsk->spinlock, flags);
668 		return false;
669 	} else
670 		return true;
671 }
672 
binary_sema_up(tsk_ctl_t * tsk)673 static inline bool binary_sema_up(tsk_ctl_t *tsk)
674 {
675 	bool sem_up = false;
676 	unsigned long flags = 0;
677 
678 	TSK_LOCK(&tsk->spinlock, flags);
679 	if (tsk->up_cnt == 0) {
680 		tsk->up_cnt++;
681 		sem_up = true;
682 	} else if (tsk->up_cnt == 1) {
683 		/* dhd_sched_dpc: dpc is alread up! */
684 	} else
685 		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
686 
687 	TSK_UNLOCK(&tsk->spinlock, flags);
688 
689 	if (sem_up)
690 		up(&tsk->sema);
691 
692 	return sem_up;
693 }
694 
695 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
696 #define SMP_RD_BARRIER_DEPENDS(x)
697 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
698 #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
699 #else
700 #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
701 #endif
702 
703 #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
704 { \
705 	sema_init(&((tsk_ctl)->sema), 0); \
706 	init_completion(&((tsk_ctl)->completed)); \
707 	init_completion(&((tsk_ctl)->flushed)); \
708 	(tsk_ctl)->parent = owner; \
709 	(tsk_ctl)->proc_name = name;  \
710 	(tsk_ctl)->terminated = FALSE; \
711 	(tsk_ctl)->flush_ind = FALSE; \
712 	(tsk_ctl)->up_cnt = 0; \
713 	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
714 	if (IS_ERR((tsk_ctl)->p_task)) { \
715 		(tsk_ctl)->thr_pid = -1; \
716 		DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
717 			(tsk_ctl)->proc_name)); \
718 	} else { \
719 		(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
720 		spin_lock_init(&((tsk_ctl)->spinlock)); \
721 		DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
722 			(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
723 	}; \
724 }
725 
726 #define PROC_WAIT_TIMEOUT_MSEC	5000 /* 5 seconds */
727 
728 #define PROC_STOP(tsk_ctl) \
729 { \
730 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
731 	(tsk_ctl)->terminated = TRUE; \
732 	smp_wmb(); \
733 	up(&((tsk_ctl)->sema));	\
734 	DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
735 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
736 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
737 	if (timeout == 0) \
738 		DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
739 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
740 	else \
741 		DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
742 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
743 	(tsk_ctl)->parent = NULL; \
744 	(tsk_ctl)->proc_name = NULL;  \
745 	(tsk_ctl)->thr_pid = -1; \
746 	(tsk_ctl)->up_cnt = 0; \
747 }
748 
749 #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
750 { \
751 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
752 	(tsk_ctl)->terminated = TRUE; \
753 	smp_wmb(); \
754 	binary_sema_up(tsk_ctl);	\
755 	DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
756 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
757 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
758 	if (timeout == 0) \
759 		DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
760 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
761 	else \
762 		DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
763 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
764 	(tsk_ctl)->parent = NULL; \
765 	(tsk_ctl)->proc_name = NULL;  \
766 	(tsk_ctl)->thr_pid = -1; \
767 }
768 
769 /*
770 * Flush is non-rentrant, so callers must make sure
771 * there is no race condition.
772 * For safer exit, added wait_for_completion_timeout
773 * with 1 sec timeout.
774 */
775 #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
776 { \
777 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
778 	(tsk_ctl)->flush_ind = TRUE; \
779 	smp_wmb(); \
780 	binary_sema_up(tsk_ctl);	\
781 	DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
782 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
783 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
784 	if (timeout == 0) \
785 		DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
786 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
787 	else \
788 		DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
789 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
790 }
791 
792 /*  ----------------------- */
793 
794 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
795 #define KILL_PROC(nr, sig) \
796 { \
797 struct task_struct *tsk; \
798 struct pid *pid;    \
799 pid = find_get_pid((pid_t)nr);    \
800 tsk = pid_task(pid, PIDTYPE_PID);    \
801 if (tsk) send_sig(sig, tsk, 1); \
802 }
803 #else
804 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
805 	KERNEL_VERSION(2, 6, 30))
806 #define KILL_PROC(pid, sig) \
807 { \
808 	struct task_struct *tsk; \
809 	tsk = find_task_by_vpid(pid); \
810 	if (tsk) send_sig(sig, tsk, 1); \
811 }
812 #else
813 #define KILL_PROC(pid, sig) \
814 { \
815 	kill_proc(pid, sig, 1); \
816 }
817 #endif
818 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
819 
820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
821 #include <linux/time.h>
822 #include <linux/wait.h>
823 #else
824 #include <linux/sched.h>
825 
826 #define __wait_event_interruptible_timeout(wq, condition, ret)		\
827 do {									\
828 	wait_queue_t __wait;						\
829 	init_waitqueue_entry(&__wait, current);				\
830 									\
831 	add_wait_queue(&wq, &__wait);					\
832 	for (;;) {							\
833 		set_current_state(TASK_INTERRUPTIBLE);			\
834 		if (condition)						\
835 			break;						\
836 		if (!signal_pending(current)) {				\
837 			ret = schedule_timeout(ret);			\
838 			if (!ret)					\
839 				break;					\
840 			continue;					\
841 		}							\
842 		ret = -ERESTARTSYS;					\
843 		break;							\
844 	}								\
845 	current->state = TASK_RUNNING;					\
846 	remove_wait_queue(&wq, &__wait);				\
847 } while (0)
848 
849 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
850 ({									\
851 	long __ret = timeout;						\
852 	if (!(condition))						\
853 		__wait_event_interruptible_timeout(wq, condition, __ret); \
854 	__ret;								\
855 })
856 
857 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
858 
859 /*
860 For < 2.6.24, wl creates its own netdev but doesn't
861 align the priv area like the genuine alloc_netdev().
862 Since netdev_priv() always gives us the aligned address, it will
863 not match our unaligned address for < 2.6.24
864 */
865 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
866 #define DEV_PRIV(dev)	(dev->priv)
867 #else
868 #define DEV_PRIV(dev)	netdev_priv(dev)
869 #endif
870 
871 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
872 #define WL_ISR(i, d, p)         wl_isr((i), (d))
873 #else
874 #define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
875 #endif  /* < 2.6.20 */
876 
877 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
878 #define netdev_priv(dev) dev->priv
879 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
880 
881 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
882 #define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
883 #else
884 #define CAN_SLEEP()	(FALSE)
885 #endif
886 
887 #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
888 
889 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 170))
890 #define RANDOM32	get_random_u32
891 #define RANDOM_BYTES    get_random_bytes
892 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
893 #define RANDOM32	prandom_u32
894 #define RANDOM_BYTES    prandom_bytes
895 #else
896 #define RANDOM32	random32
897 #define RANDOM_BYTES    get_random_bytes
898 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
899 
900 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
901 #define SRANDOM32(entropy)	prandom_seed(entropy)
902 #else
903 #define SRANDOM32(entropy)	srandom32(entropy)
904 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
905 
906 /*
907  * Overide latest kfifo functions with
908  * older version to work on older kernels
909  */
910 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
911 #define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
912 #define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
913 #define kfifo_esize(a)				1
914 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
915 	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
916 #define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
917 #define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
918 #define kfifo_esize(a)				1
919 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
920 
921 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
922 #pragma GCC diagnostic pop
923 #endif
924 
925 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
926 #include <linux/fs.h>
file_inode(const struct file * f)927 static inline struct inode *file_inode(const struct file *f)
928 {
929 	return f->f_dentry->d_inode;
930 }
931 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
932 
933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
934 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
935 
936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
937 #define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
938 #define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
939 int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
940 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
941 #define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
942 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
943 
944 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
945 #define netdev_tx_t int
946 #endif
947 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
948 #define complete_and_exit(a, b) kthread_complete_and_exit(a, b)
949 #else
950 #define	dev_addr_set(net, addr) memcpy(net->dev_addr, addr, ETHER_ADDR_LEN)
951 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(5, 17, 0) */
952 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
953 #define netif_rx_ni(skb) netif_rx(skb)
954 #define pci_free_consistent(a, b, c, d) dma_free_coherent(&((struct pci_dev *)a)->dev, b, c, d)
955 #define pci_map_single(a, b, c, d) dma_map_single(&((struct pci_dev *)a)->dev, b, c, d)
956 #define pci_unmap_single(a, b, c, d) dma_unmap_single(&((struct pci_dev *)a)->dev, b, c, d)
957 #define pci_dma_mapping_error(a, b) dma_mapping_error(&((struct pci_dev *)a)->dev, b)
958 #ifndef PCI_DMA_TODEVICE
959 #define	PCI_DMA_TODEVICE	1
960 #define	PCI_DMA_FROMDEVICE	2
961 #endif
962 #endif
963 
964 #ifdef ANDROID_BKPORT
965 #if (ANDROID_VERSION >= 13) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 41))
966 #define ANDROID13_KERNEL515_BKPORT
967 #define CFG80211_BKPORT_MLO
968 #endif /* ANDROID_VERSION >= 13 && KERNEL >= 5.15.41 */
969 #endif /* ANDROID_BKPORT */
970 
971 #endif /* _linuxver_h_ */
972