1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright(c) 2007 - 2019 Realtek Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
6*4882a593Smuzhiyun * under the terms of version 2 of the GNU General Public License as
7*4882a593Smuzhiyun * published by the Free Software Foundation.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but WITHOUT
10*4882a593Smuzhiyun * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12*4882a593Smuzhiyun * more details.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *****************************************************************************/
15*4882a593Smuzhiyun #ifndef __OSDEP_LINUX_SERVICE_H_
16*4882a593Smuzhiyun #define __OSDEP_LINUX_SERVICE_H_
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/version.h>
19*4882a593Smuzhiyun #include <linux/spinlock.h>
20*4882a593Smuzhiyun #include <linux/compiler.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/errno.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/namei.h>
27*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
28*4882a593Smuzhiyun #include <linux/kref.h>
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun /* #include <linux/smp_lock.h> */
31*4882a593Smuzhiyun #include <linux/netdevice.h>
32*4882a593Smuzhiyun #include <linux/inetdevice.h>
33*4882a593Smuzhiyun #include <linux/skbuff.h>
34*4882a593Smuzhiyun #include <linux/circ_buf.h>
35*4882a593Smuzhiyun #include <asm/uaccess.h>
36*4882a593Smuzhiyun #include <asm/byteorder.h>
37*4882a593Smuzhiyun #include <asm/atomic.h>
38*4882a593Smuzhiyun #include <asm/io.h>
39*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
40*4882a593Smuzhiyun #include <asm/semaphore.h>
41*4882a593Smuzhiyun #else
42*4882a593Smuzhiyun #include <linux/semaphore.h>
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun #include <linux/sem.h>
45*4882a593Smuzhiyun #include <linux/sched.h>
46*4882a593Smuzhiyun #include <linux/etherdevice.h>
47*4882a593Smuzhiyun #include <linux/wireless.h>
48*4882a593Smuzhiyun #include <net/iw_handler.h>
49*4882a593Smuzhiyun #include <net/addrconf.h>
50*4882a593Smuzhiyun #include <linux/if_arp.h>
51*4882a593Smuzhiyun #include <linux/rtnetlink.h>
52*4882a593Smuzhiyun #include <linux/delay.h>
53*4882a593Smuzhiyun #include <linux/interrupt.h> /* for struct tasklet_struct */
54*4882a593Smuzhiyun #include <linux/ip.h>
55*4882a593Smuzhiyun #include <linux/kthread.h>
56*4882a593Smuzhiyun #include <linux/list.h>
57*4882a593Smuzhiyun #include <linux/vmalloc.h>
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #ifdef CONFIG_RTKM
60*4882a593Smuzhiyun #include <rtw_mem.h>
61*4882a593Smuzhiyun #endif /* CONFIG_RTKM */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #ifdef CONFIG_RECV_THREAD_MODE
64*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
65*4882a593Smuzhiyun #include <uapi/linux/sched/types.h> /* struct sched_param */
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 5, 41))
70*4882a593Smuzhiyun #include <linux/tqueue.h>
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
74*4882a593Smuzhiyun #include <uapi/linux/limits.h>
75*4882a593Smuzhiyun #else
76*4882a593Smuzhiyun #include <linux/limits.h>
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #ifdef RTK_DMP_PLATFORM
80*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
81*4882a593Smuzhiyun #include <linux/pageremap.h>
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun #include <asm/io.h>
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #ifdef CONFIG_NET_RADIO
87*4882a593Smuzhiyun #define CONFIG_WIRELESS_EXT
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Monitor mode */
91*4882a593Smuzhiyun #include <net/ieee80211_radiotap.h>
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
94*4882a593Smuzhiyun #include <linux/ieee80211.h>
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && \
98*4882a593Smuzhiyun LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
99*4882a593Smuzhiyun #define CONFIG_IEEE80211_HT_ADDT_INFO
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #ifdef CONFIG_IOCTL_CFG80211
103*4882a593Smuzhiyun /* #include <linux/ieee80211.h> */
104*4882a593Smuzhiyun #include <net/cfg80211.h>
105*4882a593Smuzhiyun #else
106*4882a593Smuzhiyun #ifdef CONFIG_REGD_SRC_FROM_OS
107*4882a593Smuzhiyun #error "CONFIG_REGD_SRC_FROM_OS requires CONFIG_IOCTL_CFG80211"
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun #endif /* CONFIG_IOCTL_CFG80211 */
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #ifdef CONFIG_HAS_EARLYSUSPEND
113*4882a593Smuzhiyun #include <linux/earlysuspend.h>
114*4882a593Smuzhiyun #endif /* CONFIG_HAS_EARLYSUSPEND */
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #ifdef CONFIG_EFUSE_CONFIG_FILE
117*4882a593Smuzhiyun #include <linux/fs.h>
118*4882a593Smuzhiyun #endif
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun #ifdef CONFIG_USB_HCI
121*4882a593Smuzhiyun #include <linux/usb.h>
122*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
123*4882a593Smuzhiyun #include <linux/usb_ch9.h>
124*4882a593Smuzhiyun #else
125*4882a593Smuzhiyun #include <linux/usb/ch9.h>
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #if defined(CONFIG_RTW_GRO) && (!defined(CONFIG_RTW_NAPI))
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #error "Enable NAPI before enable GRO\n"
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #if (KERNEL_VERSION(2, 6, 29) > LINUX_VERSION_CODE && defined(CONFIG_RTW_NAPI))
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #undef CONFIG_RTW_NAPI
140*4882a593Smuzhiyun /*#warning "Linux Kernel version too old to support NAPI (should newer than 2.6.29)\n"*/
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun #if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE && defined(CONFIG_RTW_GRO))
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #undef CONFIG_RTW_GRO
147*4882a593Smuzhiyun /*#warning "Linux Kernel version too old to support GRO(should newer than 2.6.33)\n"*/
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #define ATOMIC_T atomic_t
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #ifdef DBG_MEMORY_LEAK
154*4882a593Smuzhiyun extern ATOMIC_T _malloc_cnt;
155*4882a593Smuzhiyun extern ATOMIC_T _malloc_size;
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun
_rtw_vmalloc(u32 sz)158*4882a593Smuzhiyun static inline void *_rtw_vmalloc(u32 sz)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun void *pbuf;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun pbuf = vmalloc(sz);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #ifdef DBG_MEMORY_LEAK
165*4882a593Smuzhiyun if (pbuf != NULL) {
166*4882a593Smuzhiyun atomic_inc(&_malloc_cnt);
167*4882a593Smuzhiyun atomic_add(sz, &_malloc_size);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun #endif /* DBG_MEMORY_LEAK */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return pbuf;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
_rtw_zvmalloc(u32 sz)174*4882a593Smuzhiyun static inline void *_rtw_zvmalloc(u32 sz)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun void *pbuf;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun pbuf = _rtw_vmalloc(sz);
179*4882a593Smuzhiyun if (pbuf != NULL)
180*4882a593Smuzhiyun memset(pbuf, 0, sz);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return pbuf;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
_rtw_vmfree(void * pbuf,u32 sz)185*4882a593Smuzhiyun static inline void _rtw_vmfree(void *pbuf, u32 sz)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun vfree(pbuf);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun #ifdef DBG_MEMORY_LEAK
190*4882a593Smuzhiyun atomic_dec(&_malloc_cnt);
191*4882a593Smuzhiyun atomic_sub(sz, &_malloc_size);
192*4882a593Smuzhiyun #endif /* DBG_MEMORY_LEAK */
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
_rtw_malloc(u32 sz)195*4882a593Smuzhiyun static inline void *_rtw_malloc(u32 sz)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun void *pbuf = NULL;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun #ifdef RTK_DMP_PLATFORM
200*4882a593Smuzhiyun if (sz > 0x4000)
201*4882a593Smuzhiyun pbuf = dvr_malloc(sz);
202*4882a593Smuzhiyun else
203*4882a593Smuzhiyun #endif
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun #ifdef CONFIG_RTKM
206*4882a593Smuzhiyun pbuf = rtkm_kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
207*4882a593Smuzhiyun #else /* !CONFIG_RTKM */
208*4882a593Smuzhiyun pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
209*4882a593Smuzhiyun #endif /* CONFIG_RTKM */
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun #ifdef DBG_MEMORY_LEAK
213*4882a593Smuzhiyun if (pbuf != NULL) {
214*4882a593Smuzhiyun atomic_inc(&_malloc_cnt);
215*4882a593Smuzhiyun atomic_add(sz, &_malloc_size);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun #endif /* DBG_MEMORY_LEAK */
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return pbuf;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
_rtw_zmalloc(u32 sz)223*4882a593Smuzhiyun static inline void *_rtw_zmalloc(u32 sz)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun #if 0
226*4882a593Smuzhiyun void *pbuf = _rtw_malloc(sz);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (pbuf != NULL)
229*4882a593Smuzhiyun memset(pbuf, 0, sz);
230*4882a593Smuzhiyun #else
231*4882a593Smuzhiyun #ifdef CONFIG_RTKM
232*4882a593Smuzhiyun void *pbuf = rtkm_kzalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
233*4882a593Smuzhiyun #else /* !CONFIG_RTKM */
234*4882a593Smuzhiyun /*kzalloc in KERNEL_VERSION(2, 6, 14)*/
235*4882a593Smuzhiyun void *pbuf = kzalloc( sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
236*4882a593Smuzhiyun #endif /* CONFIG_RTKM */
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #endif
239*4882a593Smuzhiyun return pbuf;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
_rtw_mfree(void * pbuf,u32 sz)242*4882a593Smuzhiyun static inline void _rtw_mfree(void *pbuf, u32 sz)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun #ifdef RTK_DMP_PLATFORM
245*4882a593Smuzhiyun if (sz > 0x4000)
246*4882a593Smuzhiyun dvr_free(pbuf);
247*4882a593Smuzhiyun else
248*4882a593Smuzhiyun #endif
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun #ifdef CONFIG_RTKM
251*4882a593Smuzhiyun rtkm_kfree(pbuf, sz);
252*4882a593Smuzhiyun #else /* !CONFIG_RTKM */
253*4882a593Smuzhiyun kfree(pbuf);
254*4882a593Smuzhiyun #endif /* CONFIG_RTKM */
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun #ifdef DBG_MEMORY_LEAK
258*4882a593Smuzhiyun atomic_dec(&_malloc_cnt);
259*4882a593Smuzhiyun atomic_sub(sz, &_malloc_size);
260*4882a593Smuzhiyun #endif /* DBG_MEMORY_LEAK */
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun #ifdef CONFIG_USB_HCI
265*4882a593Smuzhiyun typedef struct urb *PURB;
266*4882a593Smuzhiyun
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)267*4882a593Smuzhiyun static inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
270*4882a593Smuzhiyun return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
271*4882a593Smuzhiyun #else
272*4882a593Smuzhiyun return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
273*4882a593Smuzhiyun #endif
274*4882a593Smuzhiyun }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)275*4882a593Smuzhiyun static inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
278*4882a593Smuzhiyun usb_free_coherent(dev, size, addr, dma);
279*4882a593Smuzhiyun #else
280*4882a593Smuzhiyun usb_buffer_free(dev, size, addr, dma);
281*4882a593Smuzhiyun #endif
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun #endif /* CONFIG_USB_HCI */
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*lock - spinlock*/
287*4882a593Smuzhiyun typedef spinlock_t _lock;
_rtw_spinlock_init(_lock * plock)288*4882a593Smuzhiyun static inline void _rtw_spinlock_init(_lock *plock)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun spin_lock_init(plock);
291*4882a593Smuzhiyun }
_rtw_spinlock_free(_lock * plock)292*4882a593Smuzhiyun static inline void _rtw_spinlock_free(_lock *plock)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun }
_rtw_spinlock(_lock * plock)295*4882a593Smuzhiyun static inline void _rtw_spinlock(_lock *plock)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun spin_lock(plock);
298*4882a593Smuzhiyun }
_rtw_spinunlock(_lock * plock)299*4882a593Smuzhiyun static inline void _rtw_spinunlock(_lock *plock)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun spin_unlock(plock);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun #if 0
305*4882a593Smuzhiyun static inline void _rtw_spinlock_ex(_lock *plock)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun spin_lock(plock);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun static inline void _rtw_spinunlock_ex(_lock *plock)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun spin_unlock(plock);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun #endif
_rtw_spinlock_irq(_lock * plock,unsigned long * flags)316*4882a593Smuzhiyun __inline static void _rtw_spinlock_irq(_lock *plock, unsigned long *flags)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun spin_lock_irqsave(plock, *flags);
319*4882a593Smuzhiyun }
_rtw_spinunlock_irq(_lock * plock,unsigned long * flags)320*4882a593Smuzhiyun __inline static void _rtw_spinunlock_irq(_lock *plock, unsigned long *flags)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun spin_unlock_irqrestore(plock, *flags);
323*4882a593Smuzhiyun }
_rtw_spinlock_bh(_lock * plock)324*4882a593Smuzhiyun __inline static void _rtw_spinlock_bh(_lock *plock)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun spin_lock_bh(plock);
327*4882a593Smuzhiyun }
_rtw_spinunlock_bh(_lock * plock)328*4882a593Smuzhiyun __inline static void _rtw_spinunlock_bh(_lock *plock)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun spin_unlock_bh(plock);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*lock - semaphore*/
335*4882a593Smuzhiyun typedef struct semaphore _sema;
_rtw_init_sema(_sema * sema,int init_val)336*4882a593Smuzhiyun static inline void _rtw_init_sema(_sema *sema, int init_val)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun sema_init(sema, init_val);
339*4882a593Smuzhiyun }
_rtw_free_sema(_sema * sema)340*4882a593Smuzhiyun static inline void _rtw_free_sema(_sema *sema)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun }
_rtw_up_sema(_sema * sema)343*4882a593Smuzhiyun static inline void _rtw_up_sema(_sema *sema)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun up(sema);
346*4882a593Smuzhiyun }
_rtw_down_sema(_sema * sema)347*4882a593Smuzhiyun static inline u32 _rtw_down_sema(_sema *sema)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun if (down_interruptible(sema))
350*4882a593Smuzhiyun return _FAIL;
351*4882a593Smuzhiyun else
352*4882a593Smuzhiyun return _SUCCESS;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*lock - mutex*/
356*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
357*4882a593Smuzhiyun typedef struct mutex _mutex;
358*4882a593Smuzhiyun #else
359*4882a593Smuzhiyun typedef struct semaphore _mutex;
360*4882a593Smuzhiyun #endif
_rtw_mutex_init(_mutex * pmutex)361*4882a593Smuzhiyun static inline void _rtw_mutex_init(_mutex *pmutex)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
364*4882a593Smuzhiyun mutex_init(pmutex);
365*4882a593Smuzhiyun #else
366*4882a593Smuzhiyun init_MUTEX(pmutex);
367*4882a593Smuzhiyun #endif
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
_rtw_mutex_free(_mutex * pmutex)370*4882a593Smuzhiyun static inline void _rtw_mutex_free(_mutex *pmutex)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
373*4882a593Smuzhiyun mutex_destroy(pmutex);
374*4882a593Smuzhiyun #else
375*4882a593Smuzhiyun #endif
376*4882a593Smuzhiyun }
_rtw_mutex_lock_interruptible(_mutex * pmutex)377*4882a593Smuzhiyun __inline static int _rtw_mutex_lock_interruptible(_mutex *pmutex)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun int ret = 0;
380*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
381*4882a593Smuzhiyun /* mutex_lock(pmutex); */
382*4882a593Smuzhiyun ret = mutex_lock_interruptible(pmutex);
383*4882a593Smuzhiyun #else
384*4882a593Smuzhiyun ret = down_interruptible(pmutex);
385*4882a593Smuzhiyun #endif
386*4882a593Smuzhiyun return ret;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
_rtw_mutex_lock(_mutex * pmutex)389*4882a593Smuzhiyun __inline static int _rtw_mutex_lock(_mutex *pmutex)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun int ret = 0;
392*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
393*4882a593Smuzhiyun mutex_lock(pmutex);
394*4882a593Smuzhiyun #else
395*4882a593Smuzhiyun down(pmutex);
396*4882a593Smuzhiyun #endif
397*4882a593Smuzhiyun return ret;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
_rtw_mutex_unlock(_mutex * pmutex)400*4882a593Smuzhiyun __inline static void _rtw_mutex_unlock(_mutex *pmutex)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
403*4882a593Smuzhiyun mutex_unlock(pmutex);
404*4882a593Smuzhiyun #else
405*4882a593Smuzhiyun up(pmutex);
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /*completion*/
411*4882a593Smuzhiyun typedef struct completion _completion;
_rtw_init_completion(_completion * comp)412*4882a593Smuzhiyun static inline void _rtw_init_completion(_completion *comp)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun init_completion(comp);
415*4882a593Smuzhiyun }
_rtw_wait_for_comp_timeout(_completion * comp,unsigned long timeout)416*4882a593Smuzhiyun static inline unsigned long _rtw_wait_for_comp_timeout(_completion *comp, unsigned long timeout)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun return wait_for_completion_timeout(comp, timeout);
419*4882a593Smuzhiyun }
_rtw_wait_for_comp(_completion * comp)420*4882a593Smuzhiyun static inline void _rtw_wait_for_comp(_completion *comp)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun return wait_for_completion(comp);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun struct __queue {
426*4882a593Smuzhiyun struct list_head queue;
427*4882a593Smuzhiyun _lock lock;
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun typedef unsigned char _buffer;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun typedef struct __queue _queue;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /*list*/
436*4882a593Smuzhiyun #define LIST_CONTAINOR(ptr, type, member) \
437*4882a593Smuzhiyun ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun typedef struct list_head _list;
441*4882a593Smuzhiyun /* Caller must check if the list is empty before calling rtw_list_delete*/
rtw_list_delete(_list * plist)442*4882a593Smuzhiyun __inline static void rtw_list_delete(_list *plist)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun list_del_init(plist);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
get_next(_list * list)447*4882a593Smuzhiyun __inline static _list *get_next(_list *list)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun return list->next;
450*4882a593Smuzhiyun }
get_list_head(_queue * queue)451*4882a593Smuzhiyun __inline static _list *get_list_head(_queue *queue)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun return &(queue->queue);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun #define rtw_list_first_entry(ptr, type, member) list_first_entry(ptr, type, member)
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* hlist */
458*4882a593Smuzhiyun typedef struct hlist_head rtw_hlist_head;
459*4882a593Smuzhiyun typedef struct hlist_node rtw_hlist_node;
460*4882a593Smuzhiyun #define rtw_hlist_for_each_entry(pos, head, member) hlist_for_each_entry(pos, head, member)
461*4882a593Smuzhiyun #define rtw_hlist_for_each_safe(pos, n, head) hlist_for_each_safe(pos, n, head)
462*4882a593Smuzhiyun #define rtw_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
463*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
464*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, n, head, member)
465*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, head, member)
466*4882a593Smuzhiyun #else
467*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, np, n, head, member)
468*4882a593Smuzhiyun #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, node, head, member)
469*4882a593Smuzhiyun #endif
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* RCU */
472*4882a593Smuzhiyun typedef struct rcu_head rtw_rcu_head;
473*4882a593Smuzhiyun #define rtw_rcu_dereference(p) rcu_dereference((p))
474*4882a593Smuzhiyun #define rtw_rcu_dereference_protected(p, c) rcu_dereference_protected(p, c)
475*4882a593Smuzhiyun #define rtw_rcu_assign_pointer(p, v) rcu_assign_pointer((p), (v))
476*4882a593Smuzhiyun #define rtw_rcu_read_lock() rcu_read_lock()
477*4882a593Smuzhiyun #define rtw_rcu_read_unlock() rcu_read_unlock()
478*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34))
479*4882a593Smuzhiyun #define rtw_rcu_access_pointer(p) rcu_access_pointer(p)
480*4882a593Smuzhiyun #endif
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /* rhashtable */
483*4882a593Smuzhiyun #include "../os_dep/linux/rtw_rhashtable.h"
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*thread*/
487*4882a593Smuzhiyun typedef void *_thread_hdl_;
488*4882a593Smuzhiyun typedef int thread_return;
489*4882a593Smuzhiyun typedef void *thread_context;
490*4882a593Smuzhiyun struct thread_hdl{
491*4882a593Smuzhiyun _thread_hdl_ thread_handler;
492*4882a593Smuzhiyun u8 thread_status;
493*4882a593Smuzhiyun };
494*4882a593Smuzhiyun #define THREAD_STATUS_STARTED BIT(0)
495*4882a593Smuzhiyun #define THREAD_STATUS_STOPPED BIT(1)
496*4882a593Smuzhiyun #define RST_THREAD_STATUS(t) (t->thread_status = 0)
497*4882a593Smuzhiyun #define SET_THREAD_STATUS(t, s) (t->thread_status |= s)
498*4882a593Smuzhiyun #define CLR_THREAD_STATUS(t, cl) (t->thread_status &= ~(cl))
499*4882a593Smuzhiyun #define CHK_THREAD_STATUS(t, ck) (t->thread_status & ck)
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun typedef void timer_hdl_return;
502*4882a593Smuzhiyun typedef void *timer_hdl_context;
503*4882a593Smuzhiyun
rtw_thread_enter(char * name)504*4882a593Smuzhiyun static inline void rtw_thread_enter(char *name)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun allow_signal(SIGTERM);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
rtw_thread_exit(_completion * comp)509*4882a593Smuzhiyun static inline void rtw_thread_exit(_completion *comp)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
512*4882a593Smuzhiyun complete_and_exit(comp, 0);
513*4882a593Smuzhiyun #else
514*4882a593Smuzhiyun kthread_complete_and_exit(comp, 0);
515*4882a593Smuzhiyun #endif
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
rtw_thread_start(int (* threadfn)(void * data),void * data,const char namefmt[])518*4882a593Smuzhiyun static inline _thread_hdl_ rtw_thread_start(int (*threadfn)(void *data),
519*4882a593Smuzhiyun void *data, const char namefmt[])
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun _thread_hdl_ _rtw_thread = NULL;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun _rtw_thread = kthread_run(threadfn, data, namefmt);
524*4882a593Smuzhiyun if (IS_ERR(_rtw_thread)) {
525*4882a593Smuzhiyun WARN_ON(!_rtw_thread);
526*4882a593Smuzhiyun _rtw_thread = NULL;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun return _rtw_thread;
529*4882a593Smuzhiyun }
rtw_thread_stop(_thread_hdl_ th)530*4882a593Smuzhiyun static inline bool rtw_thread_stop(_thread_hdl_ th)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return kthread_stop(th);
534*4882a593Smuzhiyun }
rtw_thread_wait_stop(void)535*4882a593Smuzhiyun static inline void rtw_thread_wait_stop(void)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun #if 0
538*4882a593Smuzhiyun while (!kthread_should_stop())
539*4882a593Smuzhiyun rtw_msleep_os(10);
540*4882a593Smuzhiyun #else
541*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
542*4882a593Smuzhiyun while (!kthread_should_stop()) {
543*4882a593Smuzhiyun schedule();
544*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
547*4882a593Smuzhiyun #endif
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
flush_signals_thread(void)550*4882a593Smuzhiyun static inline void flush_signals_thread(void)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun if (signal_pending(current))
553*4882a593Smuzhiyun flush_signals(current);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
558*4882a593Smuzhiyun #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
559*4882a593Smuzhiyun #endif
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun typedef unsigned long systime;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*tasklet*/
564*4882a593Smuzhiyun typedef struct tasklet_struct _tasklet;
565*4882a593Smuzhiyun typedef void (*tasklet_fn_t)(unsigned long);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun #if 1
rtw_tasklet_init(_tasklet * t,tasklet_fn_t func,unsigned long data)568*4882a593Smuzhiyun static inline void rtw_tasklet_init(_tasklet *t, tasklet_fn_t func,
569*4882a593Smuzhiyun unsigned long data)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun tasklet_init(t, func, data);
572*4882a593Smuzhiyun }
rtw_tasklet_kill(_tasklet * t)573*4882a593Smuzhiyun static inline void rtw_tasklet_kill(_tasklet *t)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun tasklet_kill(t);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
rtw_tasklet_schedule(_tasklet * t)578*4882a593Smuzhiyun static inline void rtw_tasklet_schedule(_tasklet *t)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun tasklet_schedule(t);
581*4882a593Smuzhiyun }
rtw_tasklet_hi_schedule(_tasklet * t)582*4882a593Smuzhiyun static inline void rtw_tasklet_hi_schedule(_tasklet *t)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun tasklet_hi_schedule(t);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun #else
587*4882a593Smuzhiyun #define rtw_tasklet_init tasklet_init
588*4882a593Smuzhiyun #define rtw_tasklet_kill tasklet_kill
589*4882a593Smuzhiyun #define rtw_tasklet_schedule tasklet_schedule
590*4882a593Smuzhiyun #define rtw_tasklet_hi_schedule tasklet_hi_schedule
591*4882a593Smuzhiyun #endif
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /*skb_buffer*/
_rtw_skb_alloc(u32 sz)594*4882a593Smuzhiyun static inline struct sk_buff *_rtw_skb_alloc(u32 sz)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
_rtw_skb_free(struct sk_buff * skb)599*4882a593Smuzhiyun static inline void _rtw_skb_free(struct sk_buff *skb)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun dev_kfree_skb_any(skb);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
_rtw_skb_copy(const struct sk_buff * skb)604*4882a593Smuzhiyun static inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
_rtw_skb_clone(struct sk_buff * skb)609*4882a593Smuzhiyun static inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
_rtw_skb_linearize(struct sk_buff * skb)614*4882a593Smuzhiyun static inline int _rtw_skb_linearize(struct sk_buff *skb)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun return skb_linearize(skb);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
_rtw_pskb_copy(struct sk_buff * skb)619*4882a593Smuzhiyun static inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
622*4882a593Smuzhiyun return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
623*4882a593Smuzhiyun #else
624*4882a593Smuzhiyun return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
625*4882a593Smuzhiyun #endif
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
629*4882a593Smuzhiyun /* Porting from linux kernel, for compatible with old kernel. */
skb_tail_pointer(const struct sk_buff * skb)630*4882a593Smuzhiyun static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun return skb->tail;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
skb_reset_tail_pointer(struct sk_buff * skb)635*4882a593Smuzhiyun static inline void skb_reset_tail_pointer(struct sk_buff *skb)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun skb->tail = skb->data;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
skb_set_tail_pointer(struct sk_buff * skb,const int offset)640*4882a593Smuzhiyun static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun skb->tail = skb->data + offset;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
skb_end_pointer(const struct sk_buff * skb)645*4882a593Smuzhiyun static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun return skb->end;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun #endif
rtw_skb_data(struct sk_buff * pkt)650*4882a593Smuzhiyun static inline u8 *rtw_skb_data(struct sk_buff *pkt)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun return pkt->data;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
rtw_skb_len(struct sk_buff * pkt)655*4882a593Smuzhiyun static inline u32 rtw_skb_len(struct sk_buff *pkt)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun return pkt->len;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
rtw_skb_put_zero(struct sk_buff * skb,unsigned int len)660*4882a593Smuzhiyun static inline void *rtw_skb_put_zero(struct sk_buff *skb, unsigned int len)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
663*4882a593Smuzhiyun return skb_put_zero(skb, len);
664*4882a593Smuzhiyun #else
665*4882a593Smuzhiyun void *tmp = skb_put(skb, len);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun memset(tmp, 0, len);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun return tmp;
670*4882a593Smuzhiyun #endif
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /*timer*/
674*4882a593Smuzhiyun typedef struct rtw_timer_list _timer;
675*4882a593Smuzhiyun struct rtw_timer_list {
676*4882a593Smuzhiyun struct timer_list timer;
677*4882a593Smuzhiyun void (*function)(void *);
678*4882a593Smuzhiyun void *arg;
679*4882a593Smuzhiyun };
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
timer_hdl(struct timer_list * in_timer)682*4882a593Smuzhiyun static inline void timer_hdl(struct timer_list *in_timer)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun _timer *ptimer = from_timer(ptimer, in_timer, timer);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun ptimer->function(ptimer->arg);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun #else
timer_hdl(unsigned long cntx)689*4882a593Smuzhiyun static inline void timer_hdl(unsigned long cntx)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun _timer *ptimer = (_timer *)cntx;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun ptimer->function(ptimer->arg);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun #endif
696*4882a593Smuzhiyun
_init_timer(_timer * ptimer,void * pfunc,void * cntx)697*4882a593Smuzhiyun __inline static void _init_timer(_timer *ptimer, void *pfunc, void *cntx)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun ptimer->function = pfunc;
700*4882a593Smuzhiyun ptimer->arg = cntx;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
703*4882a593Smuzhiyun timer_setup(&ptimer->timer, timer_hdl, 0);
704*4882a593Smuzhiyun #else
705*4882a593Smuzhiyun /* setup_timer(ptimer, pfunc,(u32)cntx); */
706*4882a593Smuzhiyun ptimer->timer.function = timer_hdl;
707*4882a593Smuzhiyun ptimer->timer.data = (unsigned long)ptimer;
708*4882a593Smuzhiyun init_timer(&ptimer->timer);
709*4882a593Smuzhiyun #endif
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
_set_timer(_timer * ptimer,u32 delay_time)712*4882a593Smuzhiyun __inline static void _set_timer(_timer *ptimer, u32 delay_time)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun mod_timer(&ptimer->timer , (jiffies + (delay_time * HZ / 1000)));
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
_cancel_timer(_timer * ptimer,u8 * bcancelled)717*4882a593Smuzhiyun __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun *bcancelled = del_timer_sync(&ptimer->timer) == 1 ? 1 : 0;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
_cancel_timer_async(_timer * ptimer)722*4882a593Smuzhiyun __inline static void _cancel_timer_async(_timer *ptimer)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun del_timer(&ptimer->timer);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /*work*/
728*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
729*4882a593Smuzhiyun typedef struct work_struct _workitem;
730*4882a593Smuzhiyun #else
731*4882a593Smuzhiyun typedef struct tq_struct _workitem;
732*4882a593Smuzhiyun #endif
733*4882a593Smuzhiyun
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)734*4882a593Smuzhiyun static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
737*4882a593Smuzhiyun INIT_WORK(pwork, pfunc);
738*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
739*4882a593Smuzhiyun INIT_WORK(pwork, pfunc, pwork);
740*4882a593Smuzhiyun #else
741*4882a593Smuzhiyun INIT_TQUEUE(pwork, pfunc, pwork);
742*4882a593Smuzhiyun #endif
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
_set_workitem(_workitem * pwork)745*4882a593Smuzhiyun __inline static void _set_workitem(_workitem *pwork)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
748*4882a593Smuzhiyun schedule_work(pwork);
749*4882a593Smuzhiyun #else
750*4882a593Smuzhiyun schedule_task(pwork);
751*4882a593Smuzhiyun #endif
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
_cancel_workitem_sync(_workitem * pwork)754*4882a593Smuzhiyun __inline static void _cancel_workitem_sync(_workitem *pwork)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
757*4882a593Smuzhiyun cancel_work_sync(pwork);
758*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
759*4882a593Smuzhiyun flush_scheduled_work();
760*4882a593Smuzhiyun #else
761*4882a593Smuzhiyun flush_scheduled_tasks();
762*4882a593Smuzhiyun #endif
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun #ifdef CONFIG_PHL_CPU_BALANCE
766*4882a593Smuzhiyun typedef struct rtw_work_struct _workitem_cpu;
767*4882a593Smuzhiyun struct rtw_work_struct {
768*4882a593Smuzhiyun /*_workitem must put at top */
769*4882a593Smuzhiyun _workitem wk;
770*4882a593Smuzhiyun /*_workitem must put at top */
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun char work_name[32];
773*4882a593Smuzhiyun struct workqueue_struct *pwkq;
774*4882a593Smuzhiyun u8 cpu_id;
775*4882a593Smuzhiyun };
776*4882a593Smuzhiyun
_config_workitem_cpu(_workitem_cpu * pwork,char * name,u8 cpu_id)777*4882a593Smuzhiyun static inline void _config_workitem_cpu(_workitem_cpu *pwork, char *name, u8 cpu_id)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun pwork->cpu_id = cpu_id;
780*4882a593Smuzhiyun strcpy(pwork->work_name, name);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
_init_workitem_cpu(_workitem_cpu * pwork,void * pfunc,void * cntx)783*4882a593Smuzhiyun static inline void _init_workitem_cpu(_workitem_cpu *pwork, void *pfunc, void *cntx)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun INIT_WORK(&pwork->wk, pfunc);
786*4882a593Smuzhiyun pwork->pwkq = alloc_workqueue(pwork->work_name, WQ_HIGHPRI, 0);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
_set_workitem_cpu(_workitem_cpu * pwork)789*4882a593Smuzhiyun __inline static void _set_workitem_cpu(_workitem_cpu *pwork)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun queue_work_on(pwork->cpu_id, pwork->pwkq, &pwork->wk);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
_cancel_workitem_sync_cpu(_workitem_cpu * pwork)794*4882a593Smuzhiyun __inline static void _cancel_workitem_sync_cpu(_workitem_cpu *pwork)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun cancel_work_sync(&pwork->wk);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun #endif /*CONFIG_PHL_CPU_BALANCE*/
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * Global Mutex: can only be used at PASSIVE level.
802*4882a593Smuzhiyun * */
803*4882a593Smuzhiyun #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
804*4882a593Smuzhiyun { \
805*4882a593Smuzhiyun while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1) { \
806*4882a593Smuzhiyun atomic_dec((atomic_t *)&(_MutexCounter)); \
807*4882a593Smuzhiyun msleep(10); \
808*4882a593Smuzhiyun } \
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
812*4882a593Smuzhiyun { \
813*4882a593Smuzhiyun atomic_dec((atomic_t *)&(_MutexCounter)); \
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun typedef struct net_device *_nic_hdl;
rtw_netif_queue_stopped(struct net_device * pnetdev)818*4882a593Smuzhiyun static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
821*4882a593Smuzhiyun return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
822*4882a593Smuzhiyun netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
823*4882a593Smuzhiyun netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
824*4882a593Smuzhiyun netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
825*4882a593Smuzhiyun #else
826*4882a593Smuzhiyun return netif_queue_stopped(pnetdev);
827*4882a593Smuzhiyun #endif
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun #ifdef CONFIG_HWSIM
831*4882a593Smuzhiyun int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
832*4882a593Smuzhiyun #else
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)833*4882a593Smuzhiyun static inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun #if defined(CONFIG_RTW_FC_FASTFWD)
836*4882a593Smuzhiyun extern int fwdEngine_wifi_rx(struct sk_buff *skb);
837*4882a593Smuzhiyun enum {
838*4882a593Smuzhiyun RE8670_RX_STOP=0,
839*4882a593Smuzhiyun RE8670_RX_CONTINUE,
840*4882a593Smuzhiyun RE8670_RX_STOP_SKBNOFREE,
841*4882a593Smuzhiyun RE8670_RX_END
842*4882a593Smuzhiyun };
843*4882a593Smuzhiyun int ret = 0;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun skb->dev = ndev;
846*4882a593Smuzhiyun skb->data-=14;
847*4882a593Smuzhiyun skb->len+=14;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun ret = fwdEngine_wifi_rx(skb);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if(ret==RE8670_RX_CONTINUE)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun skb->data+=14;
854*4882a593Smuzhiyun skb->len-=14;
855*4882a593Smuzhiyun return netif_rx(skb);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun else if(ret==RE8670_RX_STOP)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun kfree_skb(skb);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun return 0;
863*4882a593Smuzhiyun #else
864*4882a593Smuzhiyun skb->dev = ndev;
865*4882a593Smuzhiyun return netif_rx(skb);
866*4882a593Smuzhiyun #endif
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun #endif
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)871*4882a593Smuzhiyun static inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun skb->dev = ndev;
874*4882a593Smuzhiyun return netif_receive_skb(skb);
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)878*4882a593Smuzhiyun static inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun return napi_gro_receive(napi, skb);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun #endif /* CONFIG_RTW_GRO */
883*4882a593Smuzhiyun #endif /* CONFIG_RTW_NAPI */
884*4882a593Smuzhiyun
rtw_netif_wake_queue(struct net_device * pnetdev)885*4882a593Smuzhiyun static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
888*4882a593Smuzhiyun netif_tx_wake_all_queues(pnetdev);
889*4882a593Smuzhiyun #else
890*4882a593Smuzhiyun netif_wake_queue(pnetdev);
891*4882a593Smuzhiyun #endif
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
rtw_netif_start_queue(struct net_device * pnetdev)894*4882a593Smuzhiyun static inline void rtw_netif_start_queue(struct net_device *pnetdev)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
897*4882a593Smuzhiyun netif_tx_start_all_queues(pnetdev);
898*4882a593Smuzhiyun #else
899*4882a593Smuzhiyun netif_start_queue(pnetdev);
900*4882a593Smuzhiyun #endif
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
rtw_netif_stop_queue(struct net_device * pnetdev)903*4882a593Smuzhiyun static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
906*4882a593Smuzhiyun netif_tx_stop_all_queues(pnetdev);
907*4882a593Smuzhiyun #else
908*4882a593Smuzhiyun netif_stop_queue(pnetdev);
909*4882a593Smuzhiyun #endif
910*4882a593Smuzhiyun }
rtw_netif_device_attach(struct net_device * pnetdev)911*4882a593Smuzhiyun static inline void rtw_netif_device_attach(struct net_device *pnetdev)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun netif_device_attach(pnetdev);
914*4882a593Smuzhiyun }
rtw_netif_device_detach(struct net_device * pnetdev)915*4882a593Smuzhiyun static inline void rtw_netif_device_detach(struct net_device *pnetdev)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun netif_device_detach(pnetdev);
918*4882a593Smuzhiyun }
rtw_netif_carrier_on(struct net_device * pnetdev)919*4882a593Smuzhiyun static inline void rtw_netif_carrier_on(struct net_device *pnetdev)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun netif_carrier_on(pnetdev);
922*4882a593Smuzhiyun }
rtw_netif_carrier_off(struct net_device * pnetdev)923*4882a593Smuzhiyun static inline void rtw_netif_carrier_off(struct net_device *pnetdev)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun netif_carrier_off(pnetdev);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
rtw_merge_string(char * dst,int dst_len,const char * src1,const char * src2)928*4882a593Smuzhiyun static inline int rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun int len = 0;
931*4882a593Smuzhiyun len += snprintf(dst + len, dst_len - len, "%s", src1);
932*4882a593Smuzhiyun len += snprintf(dst + len, dst_len - len, "%s", src2);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun return len;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
938*4882a593Smuzhiyun #define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
939*4882a593Smuzhiyun #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
940*4882a593Smuzhiyun #define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
941*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* Suspend lock prevent system from going suspend */
945*4882a593Smuzhiyun #ifdef CONFIG_WAKELOCK
946*4882a593Smuzhiyun #include <linux/wakelock.h>
947*4882a593Smuzhiyun #elif defined(CONFIG_ANDROID_POWER)
948*4882a593Smuzhiyun #include <linux/android_power.h>
949*4882a593Smuzhiyun #endif
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /* limitation of path length */
952*4882a593Smuzhiyun #define PATH_LENGTH_MAX PATH_MAX
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /* Atomic integer operations */
ATOMIC_SET(ATOMIC_T * v,int i)955*4882a593Smuzhiyun static inline void ATOMIC_SET(ATOMIC_T *v, int i)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun atomic_set(v, i);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
ATOMIC_READ(ATOMIC_T * v)960*4882a593Smuzhiyun static inline int ATOMIC_READ(ATOMIC_T *v)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun return atomic_read(v);
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
ATOMIC_ADD(ATOMIC_T * v,int i)965*4882a593Smuzhiyun static inline void ATOMIC_ADD(ATOMIC_T *v, int i)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun atomic_add(i, v);
968*4882a593Smuzhiyun }
ATOMIC_SUB(ATOMIC_T * v,int i)969*4882a593Smuzhiyun static inline void ATOMIC_SUB(ATOMIC_T *v, int i)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun atomic_sub(i, v);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
ATOMIC_INC(ATOMIC_T * v)974*4882a593Smuzhiyun static inline void ATOMIC_INC(ATOMIC_T *v)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun atomic_inc(v);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
ATOMIC_DEC(ATOMIC_T * v)979*4882a593Smuzhiyun static inline void ATOMIC_DEC(ATOMIC_T *v)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun atomic_dec(v);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)984*4882a593Smuzhiyun static inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun return atomic_add_return(i, v);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)989*4882a593Smuzhiyun static inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun return atomic_sub_return(i, v);
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
ATOMIC_INC_RETURN(ATOMIC_T * v)994*4882a593Smuzhiyun static inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun return atomic_inc_return(v);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
ATOMIC_DEC_RETURN(ATOMIC_T * v)999*4882a593Smuzhiyun static inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun return atomic_dec_return(v);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)1004*4882a593Smuzhiyun static inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
1007*4882a593Smuzhiyun return atomic_add_unless(v, 1, u);
1008*4882a593Smuzhiyun #else
1009*4882a593Smuzhiyun /* only make sure not exceed after this function */
1010*4882a593Smuzhiyun if (ATOMIC_INC_RETURN(v) > u) {
1011*4882a593Smuzhiyun ATOMIC_DEC(v);
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun return 1;
1015*4882a593Smuzhiyun #endif
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun #define NDEV_FMT "%s"
1019*4882a593Smuzhiyun #define NDEV_ARG(ndev) ndev->name
1020*4882a593Smuzhiyun #define ADPT_FMT "%s"
1021*4882a593Smuzhiyun #define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
1022*4882a593Smuzhiyun #define FUNC_NDEV_FMT "%s(%s)"
1023*4882a593Smuzhiyun #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
1024*4882a593Smuzhiyun #define FUNC_ADPT_FMT "%s(%s)"
1025*4882a593Smuzhiyun #define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
1028*4882a593Smuzhiyun struct rtw_netdev_priv_indicator {
1029*4882a593Smuzhiyun void *priv;
1030*4882a593Smuzhiyun u32 sizeof_priv;
1031*4882a593Smuzhiyun };
1032*4882a593Smuzhiyun struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
1033*4882a593Smuzhiyun extern struct net_device *rtw_alloc_etherdev(int sizeof_priv);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
1036*4882a593Smuzhiyun #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(name)
1037*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
1038*4882a593Smuzhiyun #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(ndev->nd_net, name)
1039*4882a593Smuzhiyun #else
1040*4882a593Smuzhiyun #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(dev_net(ndev), name)
1041*4882a593Smuzhiyun #endif
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
1044*4882a593Smuzhiyun #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(name)
1045*4882a593Smuzhiyun #else
1046*4882a593Smuzhiyun #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(&init_net, name)
1047*4882a593Smuzhiyun #endif
1048*4882a593Smuzhiyun
rtw_dump_stack(void)1049*4882a593Smuzhiyun static inline void rtw_dump_stack(void)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun dump_stack();
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun #define rtw_bug_on(condition) BUG_ON(condition)
1054*4882a593Smuzhiyun #define rtw_warn_on(condition) WARN_ON(condition)
1055*4882a593Smuzhiyun #define RTW_DIV_ROUND_UP(n, d) DIV_ROUND_UP(n, d)
1056*4882a593Smuzhiyun #define rtw_sprintf(buf, size, format, arg...) snprintf(buf, size, format, ##arg)
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun #define STRUCT_PACKED __attribute__ ((packed))
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun #ifndef fallthrough
1061*4882a593Smuzhiyun #if __GNUC__ >= 5 || defined(__clang__)
1062*4882a593Smuzhiyun #ifndef __has_attribute
1063*4882a593Smuzhiyun #define __has_attribute(x) 0
1064*4882a593Smuzhiyun #endif
1065*4882a593Smuzhiyun #if __has_attribute(__fallthrough__)
1066*4882a593Smuzhiyun #define fallthrough __attribute__((__fallthrough__))
1067*4882a593Smuzhiyun #endif
1068*4882a593Smuzhiyun #endif
1069*4882a593Smuzhiyun #ifndef fallthrough
1070*4882a593Smuzhiyun #define fallthrough do {} while (0) /* fallthrough */
1071*4882a593Smuzhiyun #endif
1072*4882a593Smuzhiyun #endif
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
1075*4882a593Smuzhiyun #define rtw_dev_addr_mod(dev, offset, addr, len) _rtw_memcpy(&dev->dev_addr[offset], addr, len)
1076*4882a593Smuzhiyun #else
1077*4882a593Smuzhiyun #define rtw_dev_addr_mod dev_addr_mod
1078*4882a593Smuzhiyun #endif
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun #endif /* __OSDEP_LINUX_SERVICE_H_ */
1081