1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright(c) 2007 - 2017 Realtek Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
6*4882a593Smuzhiyun * under the terms of version 2 of the GNU General Public License as
7*4882a593Smuzhiyun * published by the Free Software Foundation.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but WITHOUT
10*4882a593Smuzhiyun * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12*4882a593Smuzhiyun * more details.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *****************************************************************************/
15*4882a593Smuzhiyun #ifndef __OSDEP_LINUX_SERVICE_H_
16*4882a593Smuzhiyun #define __OSDEP_LINUX_SERVICE_H_
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/version.h>
19*4882a593Smuzhiyun #include <linux/spinlock.h>
20*4882a593Smuzhiyun #include <linux/compiler.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/errno.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/namei.h>
27*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
28*4882a593Smuzhiyun #include <linux/kref.h>
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun /* #include <linux/smp_lock.h> */
31*4882a593Smuzhiyun #include <linux/netdevice.h>
32*4882a593Smuzhiyun #include <linux/inetdevice.h>
33*4882a593Smuzhiyun #include <linux/skbuff.h>
34*4882a593Smuzhiyun #include <linux/circ_buf.h>
35*4882a593Smuzhiyun #include <asm/uaccess.h>
36*4882a593Smuzhiyun #include <asm/byteorder.h>
37*4882a593Smuzhiyun #include <asm/atomic.h>
38*4882a593Smuzhiyun #include <asm/io.h>
39*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
40*4882a593Smuzhiyun #include <asm/semaphore.h>
41*4882a593Smuzhiyun #else
42*4882a593Smuzhiyun #include <linux/semaphore.h>
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun #include <linux/sem.h>
45*4882a593Smuzhiyun #include <linux/sched.h>
46*4882a593Smuzhiyun #include <linux/etherdevice.h>
47*4882a593Smuzhiyun #include <linux/wireless.h>
48*4882a593Smuzhiyun #include <net/iw_handler.h>
49*4882a593Smuzhiyun #include <net/addrconf.h>
50*4882a593Smuzhiyun #include <linux/if_arp.h>
51*4882a593Smuzhiyun #include <linux/rtnetlink.h>
52*4882a593Smuzhiyun #include <linux/delay.h>
53*4882a593Smuzhiyun #include <linux/interrupt.h> /* for struct tasklet_struct */
54*4882a593Smuzhiyun #include <linux/ip.h>
55*4882a593Smuzhiyun #include <linux/kthread.h>
56*4882a593Smuzhiyun #include <linux/list.h>
57*4882a593Smuzhiyun #include <linux/vmalloc.h>
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
60*4882a593Smuzhiyun #include <uapi/linux/sched/types.h>
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 5, 41))
64*4882a593Smuzhiyun #include <linux/tqueue.h>
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
68*4882a593Smuzhiyun #include <uapi/linux/limits.h>
69*4882a593Smuzhiyun #else
70*4882a593Smuzhiyun #include <linux/limits.h>
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #ifdef RTK_DMP_PLATFORM
74*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
75*4882a593Smuzhiyun #include <linux/pageremap.h>
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun #include <asm/io.h>
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #ifdef CONFIG_NET_RADIO
81*4882a593Smuzhiyun #define CONFIG_WIRELESS_EXT
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Monitor mode */
85*4882a593Smuzhiyun #include <net/ieee80211_radiotap.h>
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
88*4882a593Smuzhiyun #include <linux/ieee80211.h>
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && \
92*4882a593Smuzhiyun LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
93*4882a593Smuzhiyun #define CONFIG_IEEE80211_HT_ADDT_INFO
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun #ifdef CONFIG_IOCTL_CFG80211
97*4882a593Smuzhiyun /* #include <linux/ieee80211.h> */
98*4882a593Smuzhiyun #include <net/cfg80211.h>
99*4882a593Smuzhiyun #else
100*4882a593Smuzhiyun #ifdef CONFIG_REGD_SRC_FROM_OS
101*4882a593Smuzhiyun #error "CONFIG_REGD_SRC_FROM_OS requires CONFIG_IOCTL_CFG80211"
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun #endif /* CONFIG_IOCTL_CFG80211 */
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #ifdef CONFIG_HAS_EARLYSUSPEND
107*4882a593Smuzhiyun #include <linux/earlysuspend.h>
108*4882a593Smuzhiyun #endif /* CONFIG_HAS_EARLYSUSPEND */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #ifdef CONFIG_EFUSE_CONFIG_FILE
111*4882a593Smuzhiyun #include <linux/fs.h>
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #ifdef CONFIG_USB_HCI
115*4882a593Smuzhiyun #include <linux/usb.h>
116*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
117*4882a593Smuzhiyun #include <linux/usb_ch9.h>
118*4882a593Smuzhiyun #else
119*4882a593Smuzhiyun #include <linux/usb/ch9.h>
120*4882a593Smuzhiyun #endif
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun #ifdef CONFIG_BT_COEXIST_SOCKET_TRX
124*4882a593Smuzhiyun #include <net/sock.h>
125*4882a593Smuzhiyun #include <net/tcp.h>
126*4882a593Smuzhiyun #include <linux/udp.h>
127*4882a593Smuzhiyun #include <linux/in.h>
128*4882a593Smuzhiyun #include <linux/netlink.h>
129*4882a593Smuzhiyun #endif /* CONFIG_BT_COEXIST_SOCKET_TRX */
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #ifdef CONFIG_USB_HCI
132*4882a593Smuzhiyun typedef struct urb *PURB;
133*4882a593Smuzhiyun #endif
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #if defined(CONFIG_RTW_GRO) && (!defined(CONFIG_RTW_NAPI))
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #error "Enable NAPI before enable GRO\n"
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #if (KERNEL_VERSION(2, 6, 29) > LINUX_VERSION_CODE && defined(CONFIG_RTW_NAPI))
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun #undef CONFIG_RTW_NAPI
145*4882a593Smuzhiyun /*#warning "Linux Kernel version too old to support NAPI (should newer than 2.6.29)\n"*/
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #endif
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE && defined(CONFIG_RTW_GRO))
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #undef CONFIG_RTW_GRO
152*4882a593Smuzhiyun /*#warning "Linux Kernel version too old to support GRO(should newer than 2.6.33)\n"*/
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun typedef struct semaphore _sema;
157*4882a593Smuzhiyun typedef spinlock_t _lock;
158*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
159*4882a593Smuzhiyun typedef struct mutex _mutex;
160*4882a593Smuzhiyun #else
161*4882a593Smuzhiyun typedef struct semaphore _mutex;
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun struct rtw_timer_list {
164*4882a593Smuzhiyun struct timer_list timer;
165*4882a593Smuzhiyun void (*function)(void *);
166*4882a593Smuzhiyun void *arg;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun typedef struct rtw_timer_list _timer;
170*4882a593Smuzhiyun typedef struct completion _completion;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun struct __queue {
173*4882a593Smuzhiyun struct list_head queue;
174*4882a593Smuzhiyun _lock lock;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun typedef struct sk_buff _pkt;
178*4882a593Smuzhiyun typedef unsigned char _buffer;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun typedef struct __queue _queue;
181*4882a593Smuzhiyun typedef struct list_head _list;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* hlist */
184*4882a593Smuzhiyun typedef struct hlist_head rtw_hlist_head;
185*4882a593Smuzhiyun typedef struct hlist_node rtw_hlist_node;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* RCU */
188*4882a593Smuzhiyun typedef struct rcu_head rtw_rcu_head;
189*4882a593Smuzhiyun #define rtw_rcu_dereference(p) rcu_dereference((p))
190*4882a593Smuzhiyun #define rtw_rcu_dereference_protected(p, c) rcu_dereference_protected(p, c)
191*4882a593Smuzhiyun #define rtw_rcu_assign_pointer(p, v) rcu_assign_pointer((p), (v))
192*4882a593Smuzhiyun #define rtw_rcu_read_lock() rcu_read_lock()
193*4882a593Smuzhiyun #define rtw_rcu_read_unlock() rcu_read_unlock()
194*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34))
195*4882a593Smuzhiyun #define rtw_rcu_access_pointer(p) rcu_access_pointer(p)
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* rhashtable */
199*4882a593Smuzhiyun #include "../os_dep/linux/rtw_rhashtable.h"
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun typedef int _OS_STATUS;
202*4882a593Smuzhiyun /* typedef u32 _irqL; */
203*4882a593Smuzhiyun typedef unsigned long _irqL;
204*4882a593Smuzhiyun typedef struct net_device *_nic_hdl;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun typedef void *_thread_hdl_;
207*4882a593Smuzhiyun typedef int thread_return;
208*4882a593Smuzhiyun typedef void *thread_context;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun typedef void timer_hdl_return;
211*4882a593Smuzhiyun typedef void *timer_hdl_context;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
214*4882a593Smuzhiyun typedef struct work_struct _workitem;
215*4882a593Smuzhiyun #else
216*4882a593Smuzhiyun typedef struct tq_struct _workitem;
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
220*4882a593Smuzhiyun #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun typedef unsigned long systime;
224*4882a593Smuzhiyun typedef ktime_t sysptime;
225*4882a593Smuzhiyun typedef struct tasklet_struct _tasklet;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
228*4882a593Smuzhiyun /* Porting from linux kernel, for compatible with old kernel. */
skb_tail_pointer(const struct sk_buff * skb)229*4882a593Smuzhiyun static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun return skb->tail;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
skb_reset_tail_pointer(struct sk_buff * skb)234*4882a593Smuzhiyun static inline void skb_reset_tail_pointer(struct sk_buff *skb)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun skb->tail = skb->data;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
skb_set_tail_pointer(struct sk_buff * skb,const int offset)239*4882a593Smuzhiyun static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun skb->tail = skb->data + offset;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
skb_end_pointer(const struct sk_buff * skb)244*4882a593Smuzhiyun static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun return skb->end;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun #endif
249*4882a593Smuzhiyun
rtw_list_delete(_list * plist)250*4882a593Smuzhiyun __inline static void rtw_list_delete(_list *plist)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun list_del_init(plist);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
get_next(_list * list)255*4882a593Smuzhiyun __inline static _list *get_next(_list *list)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun return list->next;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun #define LIST_CONTAINOR(ptr, type, member) \
261*4882a593Smuzhiyun ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun #define rtw_list_first_entry(ptr, type, member) list_first_entry(ptr, type, member)
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun #define rtw_hlist_for_each_entry(pos, head, member) hlist_for_each_entry(pos, head, member)
266*4882a593Smuzhiyun #define rtw_hlist_for_each_safe(pos, n, head) hlist_for_each_safe(pos, n, head)
267*4882a593Smuzhiyun #define rtw_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
268*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
269*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, n, head, member)
270*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, head, member)
271*4882a593Smuzhiyun #else
272*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, np, n, head, member)
273*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, node, head, member)
274*4882a593Smuzhiyun #endif
275*4882a593Smuzhiyun
_enter_critical(_lock * plock,_irqL * pirqL)276*4882a593Smuzhiyun __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun spin_lock_irqsave(plock, *pirqL);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
_exit_critical(_lock * plock,_irqL * pirqL)281*4882a593Smuzhiyun __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun spin_unlock_irqrestore(plock, *pirqL);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
_enter_critical_ex(_lock * plock,_irqL * pirqL)286*4882a593Smuzhiyun __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun spin_lock_irqsave(plock, *pirqL);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
_exit_critical_ex(_lock * plock,_irqL * pirqL)291*4882a593Smuzhiyun __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun spin_unlock_irqrestore(plock, *pirqL);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
_enter_critical_bh(_lock * plock,_irqL * pirqL)296*4882a593Smuzhiyun __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun spin_lock_bh(plock);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
_exit_critical_bh(_lock * plock,_irqL * pirqL)301*4882a593Smuzhiyun __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun spin_unlock_bh(plock);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
enter_critical_bh(_lock * plock)306*4882a593Smuzhiyun __inline static void enter_critical_bh(_lock *plock)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun spin_lock_bh(plock);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
exit_critical_bh(_lock * plock)311*4882a593Smuzhiyun __inline static void exit_critical_bh(_lock *plock)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun spin_unlock_bh(plock);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
_enter_critical_mutex(_mutex * pmutex,_irqL * pirqL)316*4882a593Smuzhiyun __inline static int _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun int ret = 0;
319*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
320*4882a593Smuzhiyun /* mutex_lock(pmutex); */
321*4882a593Smuzhiyun ret = mutex_lock_interruptible(pmutex);
322*4882a593Smuzhiyun #else
323*4882a593Smuzhiyun ret = down_interruptible(pmutex);
324*4882a593Smuzhiyun #endif
325*4882a593Smuzhiyun return ret;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun
_enter_critical_mutex_lock(_mutex * pmutex,_irqL * pirqL)329*4882a593Smuzhiyun __inline static int _enter_critical_mutex_lock(_mutex *pmutex, _irqL *pirqL)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun int ret = 0;
332*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
333*4882a593Smuzhiyun mutex_lock(pmutex);
334*4882a593Smuzhiyun #else
335*4882a593Smuzhiyun down(pmutex);
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun return ret;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
_exit_critical_mutex(_mutex * pmutex,_irqL * pirqL)340*4882a593Smuzhiyun __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
343*4882a593Smuzhiyun mutex_unlock(pmutex);
344*4882a593Smuzhiyun #else
345*4882a593Smuzhiyun up(pmutex);
346*4882a593Smuzhiyun #endif
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
get_list_head(_queue * queue)349*4882a593Smuzhiyun __inline static _list *get_list_head(_queue *queue)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun return &(queue->queue);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
timer_hdl(struct timer_list * in_timer)355*4882a593Smuzhiyun static inline void timer_hdl(struct timer_list *in_timer)
356*4882a593Smuzhiyun #else
357*4882a593Smuzhiyun static inline void timer_hdl(unsigned long cntx)
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
361*4882a593Smuzhiyun _timer *ptimer = from_timer(ptimer, in_timer, timer);
362*4882a593Smuzhiyun #else
363*4882a593Smuzhiyun _timer *ptimer = (_timer *)cntx;
364*4882a593Smuzhiyun #endif
365*4882a593Smuzhiyun ptimer->function(ptimer->arg);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
_init_timer(_timer * ptimer,_nic_hdl nic_hdl,void * pfunc,void * cntx)368*4882a593Smuzhiyun __inline static void _init_timer(_timer *ptimer, _nic_hdl nic_hdl, void *pfunc, void *cntx)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun ptimer->function = pfunc;
371*4882a593Smuzhiyun ptimer->arg = cntx;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
374*4882a593Smuzhiyun timer_setup(&ptimer->timer, timer_hdl, 0);
375*4882a593Smuzhiyun #else
376*4882a593Smuzhiyun /* setup_timer(ptimer, pfunc,(u32)cntx); */
377*4882a593Smuzhiyun ptimer->timer.function = timer_hdl;
378*4882a593Smuzhiyun ptimer->timer.data = (unsigned long)ptimer;
379*4882a593Smuzhiyun init_timer(&ptimer->timer);
380*4882a593Smuzhiyun #endif
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
_set_timer(_timer * ptimer,u32 delay_time)383*4882a593Smuzhiyun __inline static void _set_timer(_timer *ptimer, u32 delay_time)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun mod_timer(&ptimer->timer , (jiffies + (delay_time * HZ / 1000)));
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
_cancel_timer(_timer * ptimer,u8 * bcancelled)388*4882a593Smuzhiyun __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun *bcancelled = del_timer_sync(&ptimer->timer) == 1 ? 1 : 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
_cancel_timer_async(_timer * ptimer)393*4882a593Smuzhiyun __inline static void _cancel_timer_async(_timer *ptimer)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun del_timer(&ptimer->timer);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)398*4882a593Smuzhiyun static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
401*4882a593Smuzhiyun INIT_WORK(pwork, pfunc);
402*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
403*4882a593Smuzhiyun INIT_WORK(pwork, pfunc, pwork);
404*4882a593Smuzhiyun #else
405*4882a593Smuzhiyun INIT_TQUEUE(pwork, pfunc, pwork);
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
_set_workitem(_workitem * pwork)409*4882a593Smuzhiyun __inline static void _set_workitem(_workitem *pwork)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
412*4882a593Smuzhiyun schedule_work(pwork);
413*4882a593Smuzhiyun #else
414*4882a593Smuzhiyun schedule_task(pwork);
415*4882a593Smuzhiyun #endif
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
_cancel_workitem_sync(_workitem * pwork)418*4882a593Smuzhiyun __inline static void _cancel_workitem_sync(_workitem *pwork)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
421*4882a593Smuzhiyun cancel_work_sync(pwork);
422*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
423*4882a593Smuzhiyun flush_scheduled_work();
424*4882a593Smuzhiyun #else
425*4882a593Smuzhiyun flush_scheduled_tasks();
426*4882a593Smuzhiyun #endif
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * Global Mutex: can only be used at PASSIVE level.
430*4882a593Smuzhiyun * */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
433*4882a593Smuzhiyun { \
434*4882a593Smuzhiyun while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1) { \
435*4882a593Smuzhiyun atomic_dec((atomic_t *)&(_MutexCounter)); \
436*4882a593Smuzhiyun msleep(10); \
437*4882a593Smuzhiyun } \
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
441*4882a593Smuzhiyun { \
442*4882a593Smuzhiyun atomic_dec((atomic_t *)&(_MutexCounter)); \
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
rtw_netif_queue_stopped(struct net_device * pnetdev)445*4882a593Smuzhiyun static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
448*4882a593Smuzhiyun return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
449*4882a593Smuzhiyun netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
450*4882a593Smuzhiyun netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
451*4882a593Smuzhiyun netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
452*4882a593Smuzhiyun #else
453*4882a593Smuzhiyun return netif_queue_stopped(pnetdev);
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
rtw_netif_wake_queue(struct net_device * pnetdev)457*4882a593Smuzhiyun static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
460*4882a593Smuzhiyun netif_tx_wake_all_queues(pnetdev);
461*4882a593Smuzhiyun #else
462*4882a593Smuzhiyun netif_wake_queue(pnetdev);
463*4882a593Smuzhiyun #endif
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
rtw_netif_start_queue(struct net_device * pnetdev)466*4882a593Smuzhiyun static inline void rtw_netif_start_queue(struct net_device *pnetdev)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
469*4882a593Smuzhiyun netif_tx_start_all_queues(pnetdev);
470*4882a593Smuzhiyun #else
471*4882a593Smuzhiyun netif_start_queue(pnetdev);
472*4882a593Smuzhiyun #endif
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
rtw_netif_stop_queue(struct net_device * pnetdev)475*4882a593Smuzhiyun static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
478*4882a593Smuzhiyun netif_tx_stop_all_queues(pnetdev);
479*4882a593Smuzhiyun #else
480*4882a593Smuzhiyun netif_stop_queue(pnetdev);
481*4882a593Smuzhiyun #endif
482*4882a593Smuzhiyun }
rtw_netif_device_attach(struct net_device * pnetdev)483*4882a593Smuzhiyun static inline void rtw_netif_device_attach(struct net_device *pnetdev)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun netif_device_attach(pnetdev);
486*4882a593Smuzhiyun }
rtw_netif_device_detach(struct net_device * pnetdev)487*4882a593Smuzhiyun static inline void rtw_netif_device_detach(struct net_device *pnetdev)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun netif_device_detach(pnetdev);
490*4882a593Smuzhiyun }
rtw_netif_carrier_on(struct net_device * pnetdev)491*4882a593Smuzhiyun static inline void rtw_netif_carrier_on(struct net_device *pnetdev)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun netif_carrier_on(pnetdev);
494*4882a593Smuzhiyun }
rtw_netif_carrier_off(struct net_device * pnetdev)495*4882a593Smuzhiyun static inline void rtw_netif_carrier_off(struct net_device *pnetdev)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun netif_carrier_off(pnetdev);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
rtw_merge_string(char * dst,int dst_len,const char * src1,const char * src2)500*4882a593Smuzhiyun static inline int rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun int len = 0;
503*4882a593Smuzhiyun len += snprintf(dst + len, dst_len - len, "%s", src1);
504*4882a593Smuzhiyun len += snprintf(dst + len, dst_len - len, "%s", src2);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun return len;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
510*4882a593Smuzhiyun #define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
511*4882a593Smuzhiyun #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
512*4882a593Smuzhiyun #define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
513*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* Suspend lock prevent system from going suspend */
517*4882a593Smuzhiyun #ifdef CONFIG_WAKELOCK
518*4882a593Smuzhiyun #include <linux/wakelock.h>
519*4882a593Smuzhiyun #elif defined(CONFIG_ANDROID_POWER)
520*4882a593Smuzhiyun #include <linux/android_power.h>
521*4882a593Smuzhiyun #endif
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* limitation of path length */
524*4882a593Smuzhiyun #define PATH_LENGTH_MAX PATH_MAX
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* Atomic integer operations */
527*4882a593Smuzhiyun #define ATOMIC_T atomic_t
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun #if defined(DBG_MEM_ERR_FREE)
531*4882a593Smuzhiyun void rtw_dbg_mem_init(void);
532*4882a593Smuzhiyun void rtw_dbg_mem_deinit(void);
533*4882a593Smuzhiyun #else
534*4882a593Smuzhiyun #define rtw_dbg_mem_init() do {} while (0)
535*4882a593Smuzhiyun #define rtw_dbg_mem_deinit() do {} while (0)
536*4882a593Smuzhiyun #endif /* DBG_MEM_ERR_FREE */
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun #define NDEV_FMT "%s"
541*4882a593Smuzhiyun #define NDEV_ARG(ndev) ndev->name
542*4882a593Smuzhiyun #define ADPT_FMT "%s"
543*4882a593Smuzhiyun #define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
544*4882a593Smuzhiyun #define FUNC_NDEV_FMT "%s(%s)"
545*4882a593Smuzhiyun #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
546*4882a593Smuzhiyun #define FUNC_ADPT_FMT "%s(%s)"
547*4882a593Smuzhiyun #define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun struct rtw_netdev_priv_indicator {
550*4882a593Smuzhiyun void *priv;
551*4882a593Smuzhiyun u32 sizeof_priv;
552*4882a593Smuzhiyun };
553*4882a593Smuzhiyun struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
554*4882a593Smuzhiyun extern struct net_device *rtw_alloc_etherdev(int sizeof_priv);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
557*4882a593Smuzhiyun #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(name)
558*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
559*4882a593Smuzhiyun #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(ndev->nd_net, name)
560*4882a593Smuzhiyun #else
561*4882a593Smuzhiyun #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(dev_net(ndev), name)
562*4882a593Smuzhiyun #endif
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
565*4882a593Smuzhiyun #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(name)
566*4882a593Smuzhiyun #else
567*4882a593Smuzhiyun #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(&init_net, name)
568*4882a593Smuzhiyun #endif
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun #define STRUCT_PACKED __attribute__ ((packed))
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun #ifndef fallthrough
573*4882a593Smuzhiyun #define fallthrough do {} while (0)
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun #endif /* __OSDEP_LINUX_SERVICE_H_ */
577