1 /*
2 * Linux-specific abstractions to gain some independence from linux kernel versions.
3 * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4 *
5 * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6 *
7 * Copyright (C) 1999-2017, Broadcom Corporation
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
26 *
27 *
28 * <<Broadcom-WL-IPTag/Open:>>
29 *
30 * $Id: linuxver.h 646730 2016-06-30 13:01:49Z $
31 */
32
33 #ifndef _linuxver_h_
34 #define _linuxver_h_
35
36 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
37 #pragma GCC diagnostic push
38 #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
39 #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
40 #endif // endif
41
42 #include <typedefs.h>
43 #include <linux/version.h>
44 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
45 #include <linux/config.h>
46 #else
47 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
48 #include <generated/autoconf.h>
49 #else
50 #include <linux/autoconf.h>
51 #endif // endif
52 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
53
54 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
55 #include <linux/kconfig.h>
56 #endif // endif
57 #include <linux/module.h>
58
59 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
60 /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
61 #ifdef __UNDEF_NO_VERSION__
62 #undef __NO_VERSION__
63 #else
64 #define __NO_VERSION__
65 #endif // endif
66 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
67
68 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
69 #define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
70 #define module_param_string(_name_, _string_, _size_, _perm_) \
71 MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
72 #endif // endif
73
74 /* linux/malloc.h is deprecated, use linux/slab.h instead. */
75 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
76 #include <linux/malloc.h>
77 #else
78 #include <linux/slab.h>
79 #endif // endif
80
81 #include <linux/types.h>
82 #include <linux/init.h>
83 #include <linux/mm.h>
84 #include <linux/string.h>
85 #include <linux/pci.h>
86 #include <linux/interrupt.h>
87 #include <linux/kthread.h>
88 #include <linux/netdevice.h>
89 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
90 #include <linux/semaphore.h>
91 #else
92 #include <asm/semaphore.h>
93 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
94 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
95 #undef IP_TOS
96 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
97 #include <asm/io.h>
98
99 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
100 #include <linux/workqueue.h>
101 #else
102 #include <linux/tqueue.h>
103 #ifndef work_struct
104 #define work_struct tq_struct
105 #endif // endif
106 #ifndef INIT_WORK
107 #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
108 #endif // endif
109 #ifndef schedule_work
110 #define schedule_work(_work) schedule_task((_work))
111 #endif // endif
112 #ifndef flush_scheduled_work
113 #define flush_scheduled_work() flush_scheduled_tasks()
114 #endif // endif
115 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
116
117 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
118 #define DAEMONIZE(a) do { \
119 allow_signal(SIGKILL); \
120 allow_signal(SIGTERM); \
121 } while (0)
122 #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
123 (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
124 #define DAEMONIZE(a) daemonize(a); \
125 allow_signal(SIGKILL); \
126 allow_signal(SIGTERM);
127 #else /* Linux 2.4 (w/o preemption patch) */
128 #define RAISE_RX_SOFTIRQ() \
129 cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
130 #define DAEMONIZE(a) daemonize(); \
131 do { if (a) \
132 strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
133 } while (0);
134 #endif /* LINUX_VERSION_CODE */
135
136 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
137 #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
138 #else
139 #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
140 #if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
141 (RHEL_MAJOR == 5))
142 /* Exclude RHEL 5 */
143 typedef void (*work_func_t)(void *work);
144 #endif // endif
145 #endif /* >= 2.6.20 */
146
147 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
148 /* Some distributions have their own 2.6.x compatibility layers */
149 #ifndef IRQ_NONE
150 typedef void irqreturn_t;
151 #define IRQ_NONE
152 #define IRQ_HANDLED
153 #define IRQ_RETVAL(x)
154 #endif // endif
155 #else
156 typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
157 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
158
159 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
160 #define IRQF_SHARED SA_SHIRQ
161 #endif /* < 2.6.18 */
162
163 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
164 #ifdef CONFIG_NET_RADIO
165 #define CONFIG_WIRELESS_EXT
166 #endif // endif
167 #endif /* < 2.6.17 */
168
169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
170 #define MOD_INC_USE_COUNT
171 #define MOD_DEC_USE_COUNT
172 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
173
174 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
175 #include <linux/sched.h>
176 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
177
178 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
179 #include <linux/signal.h>
180 #include <linux/sched/signal.h>
181 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
182
183 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
184 #include <linux/sched/rt.h>
185 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
186
187 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
188 #include <net/lib80211.h>
189 #endif // endif
190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
191 #include <linux/ieee80211.h>
192 #else
193 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
194 #include <net/ieee80211.h>
195 #endif // endif
196 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
197
198 #ifndef __exit
199 #define __exit
200 #endif // endif
201 #ifndef __devexit
202 #define __devexit
203 #endif // endif
204 #ifndef __devinit
205 # if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
206 # define __devinit __init
207 # else
208 /* All devices are hotpluggable since linux 3.8.0 */
209 # define __devinit
210 # endif
211 #endif /* !__devinit */
212 #ifndef __devinitdata
213 #define __devinitdata
214 #endif // endif
215 #ifndef __devexit_p
216 #define __devexit_p(x) x
217 #endif // endif
218
219 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
220
221 #define pci_get_drvdata(dev) (dev)->sysdata
222 #define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
223
224 /*
225 * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
226 */
227
228 struct pci_device_id {
229 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
230 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
231 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
232 unsigned long driver_data; /* Data private to the driver */
233 };
234
235 struct pci_driver {
236 struct list_head node;
237 char *name;
238 const struct pci_device_id *id_table; /* NULL if wants all devices */
239 int (*probe)(struct pci_dev *dev,
240 const struct pci_device_id *id); /* New device inserted */
241 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug
242 * capable driver)
243 */
244 void (*suspend)(struct pci_dev *dev); /* Device suspended */
245 void (*resume)(struct pci_dev *dev); /* Device woken up */
246 };
247
248 #define MODULE_DEVICE_TABLE(type, name)
249 #define PCI_ANY_ID (~0)
250
251 /* compatpci.c */
252 #define pci_module_init pci_register_driver
253 extern int pci_register_driver(struct pci_driver *drv);
254 extern void pci_unregister_driver(struct pci_driver *drv);
255
256 #endif /* PCI registration */
257
258 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
259 #define pci_module_init pci_register_driver
260 #endif // endif
261
262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
263 #ifdef MODULE
264 #define module_init(x) int init_module(void) { return x(); }
265 #define module_exit(x) void cleanup_module(void) { x(); }
266 #else
267 #define module_init(x) __initcall(x);
268 #define module_exit(x) __exitcall(x);
269 #endif // endif
270 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
271
272 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
273 #define WL_USE_NETDEV_OPS
274 #else
275 #undef WL_USE_NETDEV_OPS
276 #endif // endif
277
278 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
279 #define WL_CONFIG_RFKILL
280 #else
281 #undef WL_CONFIG_RFKILL
282 #endif // endif
283
284 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
285 #define list_for_each(pos, head) \
286 for (pos = (head)->next; pos != (head); pos = pos->next)
287 #endif // endif
288
289 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
290 #define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
291 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
292 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
293 #endif // endif
294
295 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
296 #define pci_enable_device(dev) do { } while (0)
297 #endif // endif
298
299 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
300 #define net_device device
301 #endif // endif
302
303 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
304
305 /*
306 * DMA mapping
307 *
308 * See linux/Documentation/DMA-mapping.txt
309 */
310
311 #ifndef PCI_DMA_TODEVICE
312 #define PCI_DMA_TODEVICE 1
313 #define PCI_DMA_FROMDEVICE 2
314 #endif // endif
315
316 typedef u32 dma_addr_t;
317
318 /* Pure 2^n version of get_order */
get_order(unsigned long size)319 static inline int get_order(unsigned long size)
320 {
321 int order;
322
323 size = (size-1) >> (PAGE_SHIFT-1);
324 order = -1;
325 do {
326 size >>= 1;
327 order++;
328 } while (size);
329 return order;
330 }
331
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)332 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
333 dma_addr_t *dma_handle)
334 {
335 void *ret;
336 int gfp = GFP_ATOMIC | GFP_DMA;
337
338 ret = (void *)__get_free_pages(gfp, get_order(size));
339
340 if (ret != NULL) {
341 memset(ret, 0, size);
342 *dma_handle = virt_to_bus(ret);
343 }
344 return ret;
345 }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)346 static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
347 void *vaddr, dma_addr_t dma_handle)
348 {
349 free_pages((unsigned long)vaddr, get_order(size));
350 }
351 #define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
352 #define pci_unmap_single(cookie, address, size, dir)
353
354 #endif /* DMA mapping */
355
356 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
357 typedef struct timer_list timer_list_compat_t;
358
359 #define init_timer_compat(timer_compat, cb, priv) \
360 init_timer(timer_compat); \
361 (timer_compat)->data = (ulong)priv; \
362 (timer_compat)->function = cb
363 #define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
364 #define timer_expires(timer_compat) (timer_compat)->expires
365
366 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
367
368 typedef struct timer_list_compat {
369 struct timer_list timer;
370 void *arg;
371 void (*callback)(ulong arg);
372 } timer_list_compat_t;
373
374 extern void timer_cb_compat(struct timer_list *tl);
375
376 #define init_timer_compat(timer_compat, cb, priv) \
377 (timer_compat)->arg = priv; \
378 (timer_compat)->callback = cb; \
379 timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
380 #define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
381 #define timer_expires(timer_compat) (timer_compat)->timer.expires
382
383 #define del_timer(t) del_timer(&((t)->timer))
384 #define del_timer_sync(t) del_timer_sync(&((t)->timer))
385 #define timer_pending(t) timer_pending(&((t)->timer))
386 #define add_timer(t) add_timer(&((t)->timer))
387 #define mod_timer(t, j) mod_timer(&((t)->timer), j)
388
389 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
390
391 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
392
393 #define dev_kfree_skb_any(a) dev_kfree_skb(a)
394 #define netif_down(dev) do { (dev)->start = 0; } while (0)
395
396 /* pcmcia-cs provides its own netdevice compatibility layer */
397 #ifndef _COMPAT_NETDEVICE_H
398
399 /*
400 * SoftNet
401 *
402 * For pre-softnet kernels we need to tell the upper layer not to
403 * re-enter start_xmit() while we are in there. However softnet
404 * guarantees not to enter while we are in there so there is no need
405 * to do the netif_stop_queue() dance unless the transmit queue really
406 * gets stuck. This should also improve performance according to tests
407 * done by Aman Singla.
408 */
409
410 #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
411 #define netif_wake_queue(dev) \
412 do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
413 #define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
414
netif_start_queue(struct net_device * dev)415 static inline void netif_start_queue(struct net_device *dev)
416 {
417 dev->tbusy = 0;
418 dev->interrupt = 0;
419 dev->start = 1;
420 }
421
422 #define netif_queue_stopped(dev) (dev)->tbusy
423 #define netif_running(dev) (dev)->start
424
425 #endif /* _COMPAT_NETDEVICE_H */
426
427 #define netif_device_attach(dev) netif_start_queue(dev)
428 #define netif_device_detach(dev) netif_stop_queue(dev)
429
430 /* 2.4.x renamed bottom halves to tasklets */
431 #define tasklet_struct tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)432 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
433 {
434 queue_task(tasklet, &tq_immediate);
435 mark_bh(IMMEDIATE_BH);
436 }
437
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)438 static inline void tasklet_init(struct tasklet_struct *tasklet,
439 void (*func)(unsigned long),
440 unsigned long data)
441 {
442 tasklet->next = NULL;
443 tasklet->sync = 0;
444 tasklet->routine = (void (*)(void *))func;
445 tasklet->data = (void *)data;
446 }
447 #define tasklet_kill(tasklet) { do {} while (0); }
448
449 /* 2.4.x introduced del_timer_sync() */
450 #define del_timer_sync(timer) del_timer(timer)
451
452 #else
453
454 #define netif_down(dev)
455
456 #endif /* SoftNet */
457
458 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
459
460 /*
461 * Emit code to initialise a tq_struct's routine and data pointers
462 */
463 #define PREPARE_TQUEUE(_tq, _routine, _data) \
464 do { \
465 (_tq)->routine = _routine; \
466 (_tq)->data = _data; \
467 } while (0)
468
469 /*
470 * Emit code to initialise all of a tq_struct
471 */
472 #define INIT_TQUEUE(_tq, _routine, _data) \
473 do { \
474 INIT_LIST_HEAD(&(_tq)->list); \
475 (_tq)->sync = 0; \
476 PREPARE_TQUEUE((_tq), (_routine), (_data)); \
477 } while (0)
478
479 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
480
481 /* Power management related macro & routines */
482 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
483 #define PCI_SAVE_STATE(a, b) pci_save_state(a)
484 #define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
485 #else
486 #define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
487 #define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
488 #endif // endif
489
490 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
491 static inline int
pci_save_state(struct pci_dev * dev,u32 * buffer)492 pci_save_state(struct pci_dev *dev, u32 *buffer)
493 {
494 int i;
495 if (buffer) {
496 for (i = 0; i < 16; i++)
497 pci_read_config_dword(dev, i * 4, &buffer[i]);
498 }
499 return 0;
500 }
501
502 static inline int
pci_restore_state(struct pci_dev * dev,u32 * buffer)503 pci_restore_state(struct pci_dev *dev, u32 *buffer)
504 {
505 int i;
506
507 if (buffer) {
508 for (i = 0; i < 16; i++)
509 pci_write_config_dword(dev, i * 4, buffer[i]);
510 }
511 /*
512 * otherwise, write the context information we know from bootup.
513 * This works around a problem where warm-booting from Windows
514 * combined with a D3(hot)->D0 transition causes PCI config
515 * header data to be forgotten.
516 */
517 else {
518 for (i = 0; i < 6; i ++)
519 pci_write_config_dword(dev,
520 PCI_BASE_ADDRESS_0 + (i * 4),
521 pci_resource_start(dev, i));
522 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
523 }
524 return 0;
525 }
526 #endif /* PCI power management */
527
528 /* Old cp0 access macros deprecated in 2.4.19 */
529 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
530 #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
531 #endif // endif
532
533 /* Module refcount handled internally in 2.6.x */
534 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
535 #ifndef SET_MODULE_OWNER
536 #define SET_MODULE_OWNER(dev) do {} while (0)
537 #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
538 #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
539 #else
540 #define OLD_MOD_INC_USE_COUNT do {} while (0)
541 #define OLD_MOD_DEC_USE_COUNT do {} while (0)
542 #endif // endif
543 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
544 #ifndef SET_MODULE_OWNER
545 #define SET_MODULE_OWNER(dev) do {} while (0)
546 #endif // endif
547 #ifndef MOD_INC_USE_COUNT
548 #define MOD_INC_USE_COUNT do {} while (0)
549 #endif // endif
550 #ifndef MOD_DEC_USE_COUNT
551 #define MOD_DEC_USE_COUNT do {} while (0)
552 #endif // endif
553 #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
554 #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
555 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
556
557 #ifndef SET_NETDEV_DEV
558 #define SET_NETDEV_DEV(net, pdev) do {} while (0)
559 #endif // endif
560
561 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
562 #ifndef HAVE_FREE_NETDEV
563 #define free_netdev(dev) kfree(dev)
564 #endif // endif
565 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
566
567 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
568 /* struct packet_type redefined in 2.6.x */
569 #define af_packet_priv data
570 #endif // endif
571
572 /* suspend args */
573 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
574 #define DRV_SUSPEND_STATE_TYPE pm_message_t
575 #else
576 #define DRV_SUSPEND_STATE_TYPE uint32
577 #endif // endif
578
579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
580 #define CHECKSUM_HW CHECKSUM_PARTIAL
581 #endif // endif
582
583 typedef struct {
584 void *parent; /* some external entity that the thread supposed to work for */
585 char *proc_name;
586 struct task_struct *p_task;
587 long thr_pid;
588 int prio; /* priority */
589 struct semaphore sema;
590 int terminated;
591 struct completion completed;
592 int flush_ind;
593 struct completion flushed;
594 spinlock_t spinlock;
595 int up_cnt;
596 } tsk_ctl_t;
597
598 /* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
599 /* note this macro assumes there may be only one context waiting on thread's completion */
600 #ifdef DHD_DEBUG
601 #define DBG_THR(x) printk x
602 #else
603 #define DBG_THR(x)
604 #endif // endif
605
binary_sema_down(tsk_ctl_t * tsk)606 static inline bool binary_sema_down(tsk_ctl_t *tsk)
607 {
608 if (down_interruptible(&tsk->sema) == 0) {
609 unsigned long flags = 0;
610 spin_lock_irqsave(&tsk->spinlock, flags);
611 if (tsk->up_cnt == 1)
612 tsk->up_cnt--;
613 else {
614 DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
615 }
616 spin_unlock_irqrestore(&tsk->spinlock, flags);
617 return false;
618 } else
619 return true;
620 }
621
binary_sema_up(tsk_ctl_t * tsk)622 static inline bool binary_sema_up(tsk_ctl_t *tsk)
623 {
624 bool sem_up = false;
625 unsigned long flags = 0;
626
627 spin_lock_irqsave(&tsk->spinlock, flags);
628 if (tsk->up_cnt == 0) {
629 tsk->up_cnt++;
630 sem_up = true;
631 } else if (tsk->up_cnt == 1) {
632 /* dhd_sched_dpc: dpc is alread up! */
633 } else
634 DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
635
636 spin_unlock_irqrestore(&tsk->spinlock, flags);
637
638 if (sem_up)
639 up(&tsk->sema);
640
641 return sem_up;
642 }
643
644 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)))
645 #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
646 #else
647 #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
648 #endif // endif
649
650 #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
651 { \
652 sema_init(&((tsk_ctl)->sema), 0); \
653 init_completion(&((tsk_ctl)->completed)); \
654 init_completion(&((tsk_ctl)->flushed)); \
655 (tsk_ctl)->parent = owner; \
656 (tsk_ctl)->proc_name = name; \
657 (tsk_ctl)->terminated = FALSE; \
658 (tsk_ctl)->flush_ind = FALSE; \
659 (tsk_ctl)->up_cnt = 0; \
660 (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
661 if (IS_ERR((tsk_ctl)->p_task)) { \
662 (tsk_ctl)->thr_pid = -1; \
663 DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
664 (tsk_ctl)->proc_name)); \
665 } else { \
666 (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
667 spin_lock_init(&((tsk_ctl)->spinlock)); \
668 DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
669 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
670 }; \
671 }
672
673 #define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
674
675 #define PROC_STOP(tsk_ctl) \
676 { \
677 uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
678 (tsk_ctl)->terminated = TRUE; \
679 smp_wmb(); \
680 up(&((tsk_ctl)->sema)); \
681 DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
682 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
683 timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
684 if (timeout == 0) \
685 DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
686 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
687 else \
688 DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
689 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
690 (tsk_ctl)->parent = NULL; \
691 (tsk_ctl)->proc_name = NULL; \
692 (tsk_ctl)->thr_pid = -1; \
693 (tsk_ctl)->up_cnt = 0; \
694 }
695
696 #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
697 { \
698 uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
699 (tsk_ctl)->terminated = TRUE; \
700 smp_wmb(); \
701 binary_sema_up(tsk_ctl); \
702 DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
703 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
704 timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
705 if (timeout == 0) \
706 DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
707 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
708 else \
709 DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
710 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
711 (tsk_ctl)->parent = NULL; \
712 (tsk_ctl)->proc_name = NULL; \
713 (tsk_ctl)->thr_pid = -1; \
714 }
715
716 /*
717 * Flush is non-rentrant, so callers must make sure
718 * there is no race condition.
719 * For safer exit, added wait_for_completion_timeout
720 * with 1 sec timeout.
721 */
722 #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
723 { \
724 uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
725 (tsk_ctl)->flush_ind = TRUE; \
726 smp_wmb(); \
727 binary_sema_up(tsk_ctl); \
728 DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
729 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
730 timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
731 if (timeout == 0) \
732 DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
733 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
734 else \
735 DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
736 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
737 }
738
739 /* ----------------------- */
740
741 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
742 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
743 /* send_sig declaration moved */
744 #include <linux/sched/signal.h>
745 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
746
747 #define KILL_PROC(nr, sig) \
748 { \
749 struct task_struct *tsk; \
750 struct pid *pid; \
751 pid = find_get_pid((pid_t)nr); \
752 tsk = pid_task(pid, PIDTYPE_PID); \
753 if (tsk) send_sig(sig, tsk, 1); \
754 }
755 #else
756 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
757 KERNEL_VERSION(2, 6, 30))
758 #define KILL_PROC(pid, sig) \
759 { \
760 struct task_struct *tsk; \
761 tsk = find_task_by_vpid(pid); \
762 if (tsk) send_sig(sig, tsk, 1); \
763 }
764 #else
765 #define KILL_PROC(pid, sig) \
766 { \
767 kill_proc(pid, sig, 1); \
768 }
769 #endif // endif
770 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
771
772 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
773 #include <linux/time.h>
774 #include <linux/wait.h>
775 #else
776 #include <linux/sched.h>
777
778 #define __wait_event_interruptible_timeout(wq, condition, ret) \
779 do { \
780 wait_queue_t __wait; \
781 init_waitqueue_entry(&__wait, current); \
782 \
783 add_wait_queue(&wq, &__wait); \
784 for (;;) { \
785 set_current_state(TASK_INTERRUPTIBLE); \
786 if (condition) \
787 break; \
788 if (!signal_pending(current)) { \
789 ret = schedule_timeout(ret); \
790 if (!ret) \
791 break; \
792 continue; \
793 } \
794 ret = -ERESTARTSYS; \
795 break; \
796 } \
797 current->state = TASK_RUNNING; \
798 remove_wait_queue(&wq, &__wait); \
799 } while (0)
800
801 #define wait_event_interruptible_timeout(wq, condition, timeout) \
802 ({ \
803 long __ret = timeout; \
804 if (!(condition)) \
805 __wait_event_interruptible_timeout(wq, condition, __ret); \
806 __ret; \
807 })
808
809 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
810
811 /*
812 For < 2.6.24, wl creates its own netdev but doesn't
813 align the priv area like the genuine alloc_netdev().
814 Since netdev_priv() always gives us the aligned address, it will
815 not match our unaligned address for < 2.6.24
816 */
817 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
818 #define DEV_PRIV(dev) (dev->priv)
819 #else
820 #define DEV_PRIV(dev) netdev_priv(dev)
821 #endif // endif
822
823 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
824 #define WL_ISR(i, d, p) wl_isr((i), (d))
825 #else
826 #define WL_ISR(i, d, p) wl_isr((i), (d), (p))
827 #endif /* < 2.6.20 */
828
829 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
830 #define netdev_priv(dev) dev->priv
831 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
832
833 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
834 #define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
835 #else
836 #define CAN_SLEEP() (FALSE)
837 #endif // endif
838
839 #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
840
841 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
842 #define RANDOM32 prandom_u32
843 #define RANDOM_BYTES prandom_bytes
844 #else
845 #define RANDOM32 random32
846 #define RANDOM_BYTES get_random_bytes
847 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
848
849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
850 #define SRANDOM32(entropy) prandom_seed(entropy)
851 #else
852 #define SRANDOM32(entropy) srandom32(entropy)
853 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
854
855 /*
856 * Overide latest kfifo functions with
857 * older version to work on older kernels
858 */
859 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
860 #define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
861 #define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
862 #define kfifo_esize(a) 1
863 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
864 (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS)
865 #define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d)
866 #define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d)
867 #define kfifo_esize(a) 1
868 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
869
870 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
871 #pragma GCC diagnostic pop
872 #endif // endif
873
874 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
file_inode(const struct file * f)875 static inline struct inode *file_inode(const struct file *f)
876 {
877 return f->f_dentry->d_inode;
878 }
879 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
880
881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
882 #define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
883 #define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
884 int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
885 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
886 #define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
887 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
888
889 #endif /* _linuxver_h_ */
890