xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/include/osdep_service_linux.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #ifndef __OSDEP_LINUX_SERVICE_H_
16 #define __OSDEP_LINUX_SERVICE_H_
17 
18 #include <linux/version.h>
19 #include <linux/spinlock.h>
20 #include <linux/compiler.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/namei.h>
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
28 #include <linux/kref.h>
29 #endif
30 /* #include <linux/smp_lock.h> */
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/circ_buf.h>
35 #include <asm/uaccess.h>
36 #include <asm/byteorder.h>
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
40 #include <asm/semaphore.h>
41 #else
42 #include <linux/semaphore.h>
43 #endif
44 #include <linux/sem.h>
45 #include <linux/sched.h>
46 #include <linux/etherdevice.h>
47 #include <linux/wireless.h>
48 #include <net/iw_handler.h>
49 #include <net/addrconf.h>
50 #include <linux/if_arp.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/delay.h>
53 #include <linux/interrupt.h>	/* for struct tasklet_struct */
54 #include <linux/ip.h>
55 #include <linux/kthread.h>
56 #include <linux/list.h>
57 #include <linux/vmalloc.h>
58 
59 #ifdef CONFIG_RTKM
60 #include <rtw_mem.h>
61 #endif /* CONFIG_RTKM */
62 
63 #if defined(RTW_XMIT_THREAD_HIGH_PRIORITY) || \
64     defined(RTW_XMIT_THREAD_CB_HIGH_PRIORITY) || \
65     defined(RTW_RECV_THREAD_HIGH_PRIORITY)
66 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
67 #include <uapi/linux/sched/types.h>	/* struct sched_param */
68 #endif
69 #endif
70 
71 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 5, 41))
72 #include <linux/tqueue.h>
73 #endif
74 
75 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
76 #include <uapi/linux/limits.h>
77 #else
78 #include <linux/limits.h>
79 #endif
80 
81 #ifdef RTK_DMP_PLATFORM
82 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
83 #include <linux/pageremap.h>
84 #endif
85 #include <asm/io.h>
86 #endif
87 
88 #ifdef CONFIG_NET_RADIO
89 #define CONFIG_WIRELESS_EXT
90 #endif
91 
92 /* Monitor mode */
93 #include <net/ieee80211_radiotap.h>
94 
95 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
96 #include <linux/ieee80211.h>
97 #endif
98 
99 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && \
100 	 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
101 #define CONFIG_IEEE80211_HT_ADDT_INFO
102 #endif
103 
104 #ifdef CONFIG_IOCTL_CFG80211
105 	/*	#include <linux/ieee80211.h> */
106 	#include <net/cfg80211.h>
107 #else
108 	#ifdef CONFIG_REGD_SRC_FROM_OS
109 	#error "CONFIG_REGD_SRC_FROM_OS requires CONFIG_IOCTL_CFG80211"
110 	#endif
111 #endif /* CONFIG_IOCTL_CFG80211 */
112 
113 
114 #ifdef CONFIG_HAS_EARLYSUSPEND
115 #include <linux/earlysuspend.h>
116 #endif /* CONFIG_HAS_EARLYSUSPEND */
117 
118 #ifdef CONFIG_EFUSE_CONFIG_FILE
119 #include <linux/fs.h>
120 #endif
121 
122 #ifdef CONFIG_USB_HCI
123 #include <linux/usb.h>
124 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
125 #include <linux/usb_ch9.h>
126 #else
127 #include <linux/usb/ch9.h>
128 #endif
129 #endif
130 
131 
132 #if defined(CONFIG_RTW_GRO) && (!defined(CONFIG_RTW_NAPI))
133 
134 	#error "Enable NAPI before enable GRO\n"
135 
136 #endif
137 
138 
139 #if (KERNEL_VERSION(2, 6, 29) > LINUX_VERSION_CODE && defined(CONFIG_RTW_NAPI))
140 
141 	#undef CONFIG_RTW_NAPI
142 	/*#warning "Linux Kernel version too old to support NAPI (should newer than 2.6.29)\n"*/
143 
144 #endif
145 
146 #if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE && defined(CONFIG_RTW_GRO))
147 
148 	#undef CONFIG_RTW_GRO
149 	/*#warning "Linux Kernel version too old to support GRO(should newer than 2.6.33)\n"*/
150 
151 #endif
152 
153 #define ATOMIC_T atomic_t
154 
155 #ifdef DBG_MEMORY_LEAK
156 extern ATOMIC_T _malloc_cnt;
157 extern ATOMIC_T _malloc_size;
158 #endif
159 
_rtw_vmalloc(u32 sz)160 static inline void *_rtw_vmalloc(u32 sz)
161 {
162 	void *pbuf;
163 
164 	pbuf = vmalloc(sz);
165 
166 #ifdef DBG_MEMORY_LEAK
167 	if (pbuf != NULL) {
168 		atomic_inc(&_malloc_cnt);
169 		atomic_add(sz, &_malloc_size);
170 	}
171 #endif /* DBG_MEMORY_LEAK */
172 
173 	return pbuf;
174 }
175 
_rtw_zvmalloc(u32 sz)176 static inline void *_rtw_zvmalloc(u32 sz)
177 {
178 	void *pbuf;
179 
180 	pbuf = _rtw_vmalloc(sz);
181 	if (pbuf != NULL)
182 		memset(pbuf, 0, sz);
183 
184 	return pbuf;
185 }
186 
_rtw_vmfree(void * pbuf,u32 sz)187 static inline void _rtw_vmfree(void *pbuf, u32 sz)
188 {
189 	vfree(pbuf);
190 
191 #ifdef DBG_MEMORY_LEAK
192 	atomic_dec(&_malloc_cnt);
193 	atomic_sub(sz, &_malloc_size);
194 #endif /* DBG_MEMORY_LEAK */
195 }
196 
_rtw_malloc(u32 sz)197 static inline void *_rtw_malloc(u32 sz)
198 {
199 	void *pbuf = NULL;
200 
201 	#ifdef RTK_DMP_PLATFORM
202 	if (sz > 0x4000)
203 		pbuf = dvr_malloc(sz);
204 	else
205 	#endif
206 	{
207 #ifdef CONFIG_RTKM
208 		pbuf = rtkm_kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
209 #else /* !CONFIG_RTKM */
210 		pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
211 #endif /* CONFIG_RTKM */
212 	}
213 
214 #ifdef DBG_MEMORY_LEAK
215 	if (pbuf != NULL) {
216 		atomic_inc(&_malloc_cnt);
217 		atomic_add(sz, &_malloc_size);
218 	}
219 #endif /* DBG_MEMORY_LEAK */
220 
221 	return pbuf;
222 
223 }
224 
_rtw_zmalloc(u32 sz)225 static inline void *_rtw_zmalloc(u32 sz)
226 {
227 #if 0
228 	void *pbuf = _rtw_malloc(sz);
229 
230 	if (pbuf != NULL)
231 		memset(pbuf, 0, sz);
232 #else
233 #ifdef CONFIG_RTKM
234 	void *pbuf = rtkm_kzalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
235 #else /* !CONFIG_RTKM */
236 	/*kzalloc in KERNEL_VERSION(2, 6, 14)*/
237 	void *pbuf = kzalloc( sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
238 #endif /* CONFIG_RTKM */
239 
240 #endif
241 	return pbuf;
242 }
243 
_rtw_mfree(void * pbuf,u32 sz)244 static inline void _rtw_mfree(void *pbuf, u32 sz)
245 {
246 	#ifdef RTK_DMP_PLATFORM
247 	if (sz > 0x4000)
248 		dvr_free(pbuf);
249 	else
250 	#endif
251 	{
252 #ifdef CONFIG_RTKM
253 		rtkm_kfree(pbuf, sz);
254 #else /* !CONFIG_RTKM */
255 		kfree(pbuf);
256 #endif /* CONFIG_RTKM */
257 	}
258 
259 #ifdef DBG_MEMORY_LEAK
260 	atomic_dec(&_malloc_cnt);
261 	atomic_sub(sz, &_malloc_size);
262 #endif /* DBG_MEMORY_LEAK */
263 
264 }
265 
266 #ifdef CONFIG_USB_HCI
267 typedef struct urb *PURB;
268 
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)269 static inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
270 {
271 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
272 	return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
273 	#else
274 	return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
275 	#endif
276 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)277 static inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
278 {
279 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
280 	usb_free_coherent(dev, size, addr, dma);
281 	#else
282 	usb_buffer_free(dev, size, addr, dma);
283 	#endif
284 }
285 #endif /* CONFIG_USB_HCI */
286 
287 
288 /*lock - spinlock*/
289 typedef	spinlock_t _lock;
_rtw_spinlock_init(_lock * plock)290 static inline void _rtw_spinlock_init(_lock *plock)
291 {
292 	spin_lock_init(plock);
293 }
_rtw_spinlock_free(_lock * plock)294 static inline void _rtw_spinlock_free(_lock *plock)
295 {
296 }
_rtw_spinlock(_lock * plock)297 static inline void _rtw_spinlock(_lock *plock)
298 {
299 	spin_lock(plock);
300 }
_rtw_spinunlock(_lock * plock)301 static inline void _rtw_spinunlock(_lock *plock)
302 {
303 	spin_unlock(plock);
304 }
305 
306 #if 0
307 static inline void _rtw_spinlock_ex(_lock *plock)
308 {
309 	spin_lock(plock);
310 }
311 
312 static inline void _rtw_spinunlock_ex(_lock *plock)
313 {
314 
315 	spin_unlock(plock);
316 }
317 #endif
_rtw_spinlock_irq(_lock * plock,unsigned long * flags)318 __inline static void _rtw_spinlock_irq(_lock *plock, unsigned long *flags)
319 {
320 	spin_lock_irqsave(plock, *flags);
321 }
_rtw_spinunlock_irq(_lock * plock,unsigned long * flags)322 __inline static void _rtw_spinunlock_irq(_lock *plock, unsigned long *flags)
323 {
324 	spin_unlock_irqrestore(plock, *flags);
325 }
_rtw_spinlock_bh(_lock * plock)326 __inline static void _rtw_spinlock_bh(_lock *plock)
327 {
328 	spin_lock_bh(plock);
329 }
_rtw_spinunlock_bh(_lock * plock)330 __inline static void _rtw_spinunlock_bh(_lock *plock)
331 {
332 	spin_unlock_bh(plock);
333 }
334 
335 
336 /*lock - semaphore*/
337 typedef struct	semaphore _sema;
_rtw_init_sema(_sema * sema,int init_val)338 static inline void _rtw_init_sema(_sema *sema, int init_val)
339 {
340 	sema_init(sema, init_val);
341 }
_rtw_free_sema(_sema * sema)342 static inline void _rtw_free_sema(_sema *sema)
343 {
344 }
_rtw_up_sema(_sema * sema)345 static inline void _rtw_up_sema(_sema *sema)
346 {
347 	up(sema);
348 }
_rtw_down_sema(_sema * sema)349 static inline u32 _rtw_down_sema(_sema *sema)
350 {
351 	if (down_interruptible(sema))
352 		return _FAIL;
353 	else
354 		return _SUCCESS;
355 }
356 
357 /*lock - mutex*/
358 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
359 	typedef struct mutex		_mutex;
360 #else
361 	typedef struct semaphore	_mutex;
362 #endif
_rtw_mutex_init(_mutex * pmutex)363 static inline void _rtw_mutex_init(_mutex *pmutex)
364 {
365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
366 	mutex_init(pmutex);
367 #else
368 	init_MUTEX(pmutex);
369 #endif
370 }
371 
_rtw_mutex_free(_mutex * pmutex)372 static inline void _rtw_mutex_free(_mutex *pmutex)
373 {
374 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
375 	mutex_destroy(pmutex);
376 #else
377 #endif
378 }
_rtw_mutex_lock_interruptible(_mutex * pmutex)379 __inline static int _rtw_mutex_lock_interruptible(_mutex *pmutex)
380 {
381 	int ret = 0;
382 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
383 	/* mutex_lock(pmutex); */
384 	ret = mutex_lock_interruptible(pmutex);
385 #else
386 	ret = down_interruptible(pmutex);
387 #endif
388 	return ret;
389 }
390 
_rtw_mutex_lock(_mutex * pmutex)391 __inline static int _rtw_mutex_lock(_mutex *pmutex)
392 {
393 	int ret = 0;
394 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
395 	mutex_lock(pmutex);
396 #else
397 	down(pmutex);
398 #endif
399 	return ret;
400 }
401 
_rtw_mutex_unlock(_mutex * pmutex)402 __inline static void _rtw_mutex_unlock(_mutex *pmutex)
403 {
404 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
405 	mutex_unlock(pmutex);
406 #else
407 	up(pmutex);
408 #endif
409 }
410 
411 
412 /*completion*/
413 typedef struct completion _completion;
_rtw_init_completion(_completion * comp)414 static inline void _rtw_init_completion(_completion *comp)
415 {
416 	init_completion(comp);
417 }
_rtw_wait_for_comp_timeout(_completion * comp,unsigned long timeout)418 static inline unsigned long _rtw_wait_for_comp_timeout(_completion *comp, unsigned long timeout)
419 {
420 	return wait_for_completion_timeout(comp, timeout);
421 }
_rtw_wait_for_comp(_completion * comp)422 static inline void _rtw_wait_for_comp(_completion *comp)
423 {
424 	return wait_for_completion(comp);
425 }
426 
427 struct	__queue	{
428 	struct	list_head	queue;
429 	_lock	lock;
430 };
431 
432 typedef unsigned char	_buffer;
433 
434 typedef struct	__queue	_queue;
435 
436 
437 /*list*/
438 #define LIST_CONTAINOR(ptr, type, member) \
439 	((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
440 
441 
442 typedef struct	list_head	_list;
443 /* Caller must check if the list is empty before calling rtw_list_delete*/
rtw_list_delete(_list * plist)444 __inline static void rtw_list_delete(_list *plist)
445 {
446 	list_del_init(plist);
447 }
448 
get_next(_list * list)449 __inline static _list *get_next(_list	*list)
450 {
451 	return list->next;
452 }
get_list_head(_queue * queue)453 __inline static _list	*get_list_head(_queue *queue)
454 {
455 	return &(queue->queue);
456 }
457 #define rtw_list_first_entry(ptr, type, member) list_first_entry(ptr, type, member)
458 
459 /* hlist */
460 typedef struct	hlist_head	rtw_hlist_head;
461 typedef struct	hlist_node	rtw_hlist_node;
462 #define rtw_hlist_for_each_entry(pos, head, member) hlist_for_each_entry(pos, head, member)
463 #define rtw_hlist_for_each_safe(pos, n, head) hlist_for_each_safe(pos, n, head)
464 #define rtw_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
465 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
466 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, n, head, member)
467 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, head, member)
468 #else
469 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, np, n, head, member)
470 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, node, head, member)
471 #endif
472 
473 /* RCU */
474 typedef struct rcu_head rtw_rcu_head;
475 #define rtw_rcu_dereference(p) rcu_dereference((p))
476 #define rtw_rcu_dereference_protected(p, c) rcu_dereference_protected(p, c)
477 #define rtw_rcu_assign_pointer(p, v) rcu_assign_pointer((p), (v))
478 #define rtw_rcu_read_lock() rcu_read_lock()
479 #define rtw_rcu_read_unlock() rcu_read_unlock()
480 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34))
481 #define rtw_rcu_access_pointer(p) rcu_access_pointer(p)
482 #endif
483 
484 /* rhashtable */
485 #include "../os_dep/linux/rtw_rhashtable.h"
486 
487 
488 /*thread*/
489 typedef void *_thread_hdl_;
490 typedef int thread_return;
491 typedef void *thread_context;
492 struct thread_hdl{
493 	_thread_hdl_ thread_handler;
494 	u8 thread_status;
495 };
496 #define THREAD_STATUS_STARTED BIT(0)
497 #define THREAD_STATUS_STOPPED BIT(1)
498 #define RST_THREAD_STATUS(t) (t->thread_status = 0)
499 #define SET_THREAD_STATUS(t, s) 	(t->thread_status |= s)
500 #define CLR_THREAD_STATUS(t, cl)	(t->thread_status &= ~(cl))
501 #define CHK_THREAD_STATUS(t, ck) (t->thread_status & ck)
502 
503 typedef void timer_hdl_return;
504 typedef void *timer_hdl_context;
505 
rtw_thread_enter(char * name)506 static inline void rtw_thread_enter(char *name)
507 {
508 	allow_signal(SIGTERM);
509 }
510 
rtw_thread_exit(_completion * comp)511 static inline void rtw_thread_exit(_completion *comp)
512 {
513 	complete_and_exit(comp, 0);
514 }
515 
rtw_thread_start(int (* threadfn)(void * data),void * data,const char namefmt[])516 static inline _thread_hdl_ rtw_thread_start(int (*threadfn)(void *data),
517 			void *data, const char namefmt[])
518 {
519 	_thread_hdl_ _rtw_thread = NULL;
520 
521 	_rtw_thread = kthread_run(threadfn, data, namefmt);
522 	if (IS_ERR(_rtw_thread)) {
523 		WARN_ON(!_rtw_thread);
524 		_rtw_thread = NULL;
525 	}
526 	return _rtw_thread;
527 }
rtw_thread_stop(_thread_hdl_ th)528 static inline bool rtw_thread_stop(_thread_hdl_ th)
529 {
530 
531 	return kthread_stop(th);
532 }
rtw_thread_wait_stop(void)533 static inline void rtw_thread_wait_stop(void)
534 {
535 	#if 0
536 	while (!kthread_should_stop())
537 		rtw_msleep_os(10);
538 	#else
539 	set_current_state(TASK_INTERRUPTIBLE);
540 	while (!kthread_should_stop()) {
541 		schedule();
542 		set_current_state(TASK_INTERRUPTIBLE);
543 	}
544 	__set_current_state(TASK_RUNNING);
545 	#endif
546 }
547 
flush_signals_thread(void)548 static inline void flush_signals_thread(void)
549 {
550 	if (signal_pending(current))
551 		flush_signals(current);
552 }
553 
554 
555 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
556 	#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
557 #endif
558 
559 typedef unsigned long systime;
560 
561 /*tasklet*/
562 typedef struct tasklet_struct _tasklet;
563 typedef void (*tasklet_fn_t)(unsigned long);
564 
565 #if 1
rtw_tasklet_init(_tasklet * t,tasklet_fn_t func,unsigned long data)566 static inline void rtw_tasklet_init(_tasklet *t, tasklet_fn_t func,
567 							unsigned long data)
568 {
569 	tasklet_init(t, func, data);
570 }
rtw_tasklet_kill(_tasklet * t)571 static inline void rtw_tasklet_kill(_tasklet *t)
572 {
573 	tasklet_kill(t);
574 }
575 
rtw_tasklet_schedule(_tasklet * t)576 static inline void rtw_tasklet_schedule(_tasklet *t)
577 {
578 	tasklet_schedule(t);
579 }
rtw_tasklet_hi_schedule(_tasklet * t)580 static inline void rtw_tasklet_hi_schedule(_tasklet *t)
581 {
582 	tasklet_hi_schedule(t);
583 }
584 #else
585 #define rtw_tasklet_init tasklet_init
586 #define rtw_tasklet_kill tasklet_kill
587 #define rtw_tasklet_schedule tasklet_schedule
588 #define rtw_tasklet_hi_schedule tasklet_hi_schedule
589 #endif
590 
591 /*skb_buffer*/
_rtw_skb_alloc(u32 sz)592 static inline struct sk_buff *_rtw_skb_alloc(u32 sz)
593 {
594 	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
595 }
596 
_rtw_skb_free(struct sk_buff * skb)597 static inline void _rtw_skb_free(struct sk_buff *skb)
598 {
599 	dev_kfree_skb_any(skb);
600 }
601 
_rtw_skb_copy(const struct sk_buff * skb)602 static inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
603 {
604 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
605 }
606 
_rtw_skb_clone(struct sk_buff * skb)607 static inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
608 {
609 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
610 }
611 
_rtw_skb_linearize(struct sk_buff * skb)612 static inline int _rtw_skb_linearize(struct sk_buff *skb)
613 {
614 	return skb_linearize(skb);
615 }
616 
_rtw_pskb_copy(struct sk_buff * skb)617 static inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
618 {
619 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
620 	return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
621 #else
622 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
623 #endif
624 }
625 
626 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
627 /* Porting from linux kernel, for compatible with old kernel. */
skb_tail_pointer(const struct sk_buff * skb)628 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
629 {
630 	return skb->tail;
631 }
632 
skb_reset_tail_pointer(struct sk_buff * skb)633 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
634 {
635 	skb->tail = skb->data;
636 }
637 
skb_set_tail_pointer(struct sk_buff * skb,const int offset)638 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
639 {
640 	skb->tail = skb->data + offset;
641 }
642 
skb_end_pointer(const struct sk_buff * skb)643 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
644 {
645 	return skb->end;
646 }
647 #endif
rtw_skb_data(struct sk_buff * pkt)648 static inline u8 *rtw_skb_data(struct sk_buff *pkt)
649 {
650 	return pkt->data;
651 }
652 
rtw_skb_len(struct sk_buff * pkt)653 static inline u32 rtw_skb_len(struct sk_buff *pkt)
654 {
655 	return pkt->len;
656 }
657 
rtw_skb_put_zero(struct sk_buff * skb,unsigned int len)658 static inline void *rtw_skb_put_zero(struct sk_buff *skb, unsigned int len)
659 {
660 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
661 	return skb_put_zero(skb, len);
662 #else
663 	void *tmp = skb_put(skb, len);
664 
665 	memset(tmp, 0, len);
666 
667 	return tmp;
668 #endif
669 }
670 
671 /*timer*/
672 typedef struct rtw_timer_list _timer;
673 struct rtw_timer_list {
674 	struct timer_list timer;
675 	void (*function)(void *);
676 	void *arg;
677 };
678 
679 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
timer_hdl(struct timer_list * in_timer)680 static inline void timer_hdl(struct timer_list *in_timer)
681 {
682 	_timer *ptimer = from_timer(ptimer, in_timer, timer);
683 
684 	ptimer->function(ptimer->arg);
685 }
686 #else
timer_hdl(unsigned long cntx)687 static inline void timer_hdl(unsigned long cntx)
688 {
689 	_timer *ptimer = (_timer *)cntx;
690 
691 	ptimer->function(ptimer->arg);
692 }
693 #endif
694 
_init_timer(_timer * ptimer,void * pfunc,void * cntx)695 __inline static void _init_timer(_timer *ptimer, void *pfunc, void *cntx)
696 {
697 	ptimer->function = pfunc;
698 	ptimer->arg = cntx;
699 
700 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
701 	timer_setup(&ptimer->timer, timer_hdl, 0);
702 #else
703 	/* setup_timer(ptimer, pfunc,(u32)cntx);	 */
704 	ptimer->timer.function = timer_hdl;
705 	ptimer->timer.data = (unsigned long)ptimer;
706 	init_timer(&ptimer->timer);
707 #endif
708 }
709 
_set_timer(_timer * ptimer,u32 delay_time)710 __inline static void _set_timer(_timer *ptimer, u32 delay_time)
711 {
712 	mod_timer(&ptimer->timer , (jiffies + (delay_time * HZ / 1000)));
713 }
714 
_cancel_timer(_timer * ptimer,u8 * bcancelled)715 __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
716 {
717 	*bcancelled = del_timer_sync(&ptimer->timer) == 1 ? 1 : 0;
718 }
719 
_cancel_timer_async(_timer * ptimer)720 __inline static void _cancel_timer_async(_timer *ptimer)
721 {
722 	del_timer(&ptimer->timer);
723 }
724 
725 /*work*/
726 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
727 typedef struct work_struct _workitem;
728 #else
729 typedef struct tq_struct _workitem;
730 #endif
731 
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)732 static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
733 {
734 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
735 	INIT_WORK(pwork, pfunc);
736 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
737 	INIT_WORK(pwork, pfunc, pwork);
738 #else
739 	INIT_TQUEUE(pwork, pfunc, pwork);
740 #endif
741 }
742 
_set_workitem(_workitem * pwork)743 __inline static void _set_workitem(_workitem *pwork)
744 {
745 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
746 	schedule_work(pwork);
747 #else
748 	schedule_task(pwork);
749 #endif
750 }
751 
_cancel_workitem_sync(_workitem * pwork)752 __inline static void _cancel_workitem_sync(_workitem *pwork)
753 {
754 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
755 	cancel_work_sync(pwork);
756 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
757 	flush_scheduled_work();
758 #else
759 	flush_scheduled_tasks();
760 #endif
761 }
762 /*
763  * Global Mutex: can only be used at PASSIVE level.
764  *   */
765 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter)                              \
766 	{                                                               \
767 		while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1) { \
768 			atomic_dec((atomic_t *)&(_MutexCounter));        \
769 			msleep(10);                          \
770 		}                                                           \
771 	}
772 
773 #define RELEASE_GLOBAL_MUTEX(_MutexCounter)                              \
774 	{                                                               \
775 		atomic_dec((atomic_t *)&(_MutexCounter));        \
776 	}
777 
778 
779 typedef	struct	net_device *_nic_hdl;
rtw_netif_queue_stopped(struct net_device * pnetdev)780 static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
781 {
782 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
783 	return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
784 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
785 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
786 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
787 #else
788 	return netif_queue_stopped(pnetdev);
789 #endif
790 }
791 
792 #ifdef CONFIG_HWSIM
793 int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
794 #else
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)795 static inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
796 {
797 #if defined(CONFIG_RTW_FC_FASTFWD)
798 extern int fwdEngine_wifi_rx(struct sk_buff *skb);
799 enum {
800 	RE8670_RX_STOP=0,
801 	RE8670_RX_CONTINUE,
802 	RE8670_RX_STOP_SKBNOFREE,
803 	RE8670_RX_END
804 };
805 int ret = 0;
806 
807 	skb->dev = ndev;
808 	skb->data-=14;
809 	skb->len+=14;
810 
811 	ret = fwdEngine_wifi_rx(skb);
812 
813 	if(ret==RE8670_RX_CONTINUE)
814 	{
815 		skb->data+=14;
816 		skb->len-=14;
817 	return netif_rx(skb);
818 }
819 	else if(ret==RE8670_RX_STOP)
820 	{
821 		kfree_skb(skb);
822 	}
823 
824 	return 0;
825 #else
826 	skb->dev = ndev;
827 	return netif_rx(skb);
828 #endif
829 }
830 #endif
831 
832 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)833 static inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
834 {
835 	skb->dev = ndev;
836 	return netif_receive_skb(skb);
837 }
838 
839 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)840 static inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
841 {
842 	return napi_gro_receive(napi, skb);
843 }
844 #endif /* CONFIG_RTW_GRO */
845 #endif /* CONFIG_RTW_NAPI */
846 
rtw_netif_wake_queue(struct net_device * pnetdev)847 static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
848 {
849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
850 	netif_tx_wake_all_queues(pnetdev);
851 #else
852 	netif_wake_queue(pnetdev);
853 #endif
854 }
855 
rtw_netif_start_queue(struct net_device * pnetdev)856 static inline void rtw_netif_start_queue(struct net_device *pnetdev)
857 {
858 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
859 	netif_tx_start_all_queues(pnetdev);
860 #else
861 	netif_start_queue(pnetdev);
862 #endif
863 }
864 
rtw_netif_stop_queue(struct net_device * pnetdev)865 static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
866 {
867 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
868 	netif_tx_stop_all_queues(pnetdev);
869 #else
870 	netif_stop_queue(pnetdev);
871 #endif
872 }
rtw_netif_device_attach(struct net_device * pnetdev)873 static inline void rtw_netif_device_attach(struct net_device *pnetdev)
874 {
875 	netif_device_attach(pnetdev);
876 }
rtw_netif_device_detach(struct net_device * pnetdev)877 static inline void rtw_netif_device_detach(struct net_device *pnetdev)
878 {
879 	netif_device_detach(pnetdev);
880 }
rtw_netif_carrier_on(struct net_device * pnetdev)881 static inline void rtw_netif_carrier_on(struct net_device *pnetdev)
882 {
883 	netif_carrier_on(pnetdev);
884 }
rtw_netif_carrier_off(struct net_device * pnetdev)885 static inline void rtw_netif_carrier_off(struct net_device *pnetdev)
886 {
887 	netif_carrier_off(pnetdev);
888 }
889 
rtw_merge_string(char * dst,int dst_len,const char * src1,const char * src2)890 static inline int rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2)
891 {
892 	int	len = 0;
893 	len += snprintf(dst + len, dst_len - len, "%s", src1);
894 	len += snprintf(dst + len, dst_len - len, "%s", src2);
895 
896 	return len;
897 }
898 
899 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
900 	#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
901 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
902 	#define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
903 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
904 
905 
906 /* Suspend lock prevent system from going suspend */
907 #ifdef CONFIG_WAKELOCK
908 	#include <linux/wakelock.h>
909 #elif defined(CONFIG_ANDROID_POWER)
910 	#include <linux/android_power.h>
911 #endif
912 
913 /* limitation of path length */
914 #define PATH_LENGTH_MAX PATH_MAX
915 
916 /* Atomic integer operations */
ATOMIC_SET(ATOMIC_T * v,int i)917 static inline void ATOMIC_SET(ATOMIC_T *v, int i)
918 {
919 	atomic_set(v, i);
920 }
921 
ATOMIC_READ(ATOMIC_T * v)922 static inline int ATOMIC_READ(ATOMIC_T *v)
923 {
924 	return atomic_read(v);
925 }
926 
ATOMIC_ADD(ATOMIC_T * v,int i)927 static inline void ATOMIC_ADD(ATOMIC_T *v, int i)
928 {
929 	atomic_add(i, v);
930 }
ATOMIC_SUB(ATOMIC_T * v,int i)931 static inline void ATOMIC_SUB(ATOMIC_T *v, int i)
932 {
933 	atomic_sub(i, v);
934 }
935 
ATOMIC_INC(ATOMIC_T * v)936 static inline void ATOMIC_INC(ATOMIC_T *v)
937 {
938 	atomic_inc(v);
939 }
940 
ATOMIC_DEC(ATOMIC_T * v)941 static inline void ATOMIC_DEC(ATOMIC_T *v)
942 {
943 	atomic_dec(v);
944 }
945 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)946 static inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
947 {
948 	return atomic_add_return(i, v);
949 }
950 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)951 static inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
952 {
953 	return atomic_sub_return(i, v);
954 }
955 
ATOMIC_INC_RETURN(ATOMIC_T * v)956 static inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
957 {
958 	return atomic_inc_return(v);
959 }
960 
ATOMIC_DEC_RETURN(ATOMIC_T * v)961 static inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
962 {
963 	return atomic_dec_return(v);
964 }
965 
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)966 static inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
967 {
968 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
969 	return atomic_add_unless(v, 1, u);
970 #else
971 	/* only make sure not exceed after this function */
972 	if (ATOMIC_INC_RETURN(v) > u) {
973 		ATOMIC_DEC(v);
974 		return 0;
975 	}
976 	return 1;
977 #endif
978 }
979 
980 #define NDEV_FMT "%s"
981 #define NDEV_ARG(ndev) ndev->name
982 #define ADPT_FMT "%s"
983 #define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
984 #define FUNC_NDEV_FMT "%s(%s)"
985 #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
986 #define FUNC_ADPT_FMT "%s(%s)"
987 #define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
988 
989 #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
990 struct rtw_netdev_priv_indicator {
991 	void *priv;
992 	u32 sizeof_priv;
993 };
994 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
995 extern struct net_device *rtw_alloc_etherdev(int sizeof_priv);
996 
997 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
998 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(name)
999 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
1000 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(ndev->nd_net, name)
1001 #else
1002 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(dev_net(ndev), name)
1003 #endif
1004 
1005 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
1006 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(name)
1007 #else
1008 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(&init_net, name)
1009 #endif
1010 
rtw_dump_stack(void)1011 static inline void rtw_dump_stack(void)
1012 {
1013 	dump_stack();
1014 }
1015 #define rtw_bug_on(condition) BUG_ON(condition)
1016 #define rtw_warn_on(condition) WARN_ON(condition)
1017 #define RTW_DIV_ROUND_UP(n, d)	DIV_ROUND_UP(n, d)
1018 #define rtw_sprintf(buf, size, format, arg...) snprintf(buf, size, format, ##arg)
1019 
1020 #define STRUCT_PACKED __attribute__ ((packed))
1021 
1022 #ifndef fallthrough
1023 #if defined(__has_attribute) && __has_attribute(__fallthrough__)
1024 #define fallthrough __attribute__((__fallthrough__))
1025 #else
1026 #define fallthrough do {} while (0) /* fallthrough */
1027 #endif
1028 #endif
1029 
1030 #endif /* __OSDEP_LINUX_SERVICE_H_ */
1031