xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/include/osdep_service_linux.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #ifndef __OSDEP_LINUX_SERVICE_H_
16 #define __OSDEP_LINUX_SERVICE_H_
17 
18 #include <linux/version.h>
19 #include <linux/spinlock.h>
20 #include <linux/compiler.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/namei.h>
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
28 #include <linux/kref.h>
29 #endif
30 /* #include <linux/smp_lock.h> */
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/circ_buf.h>
35 #include <asm/uaccess.h>
36 #include <asm/byteorder.h>
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
40 #include <asm/semaphore.h>
41 #else
42 #include <linux/semaphore.h>
43 #endif
44 #include <linux/sem.h>
45 #include <linux/sched.h>
46 #include <linux/etherdevice.h>
47 #include <linux/wireless.h>
48 #include <net/iw_handler.h>
49 #include <net/addrconf.h>
50 #include <linux/if_arp.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/delay.h>
53 #include <linux/interrupt.h>	/* for struct tasklet_struct */
54 #include <linux/ip.h>
55 #include <linux/kthread.h>
56 #include <linux/list.h>
57 #include <linux/vmalloc.h>
58 
59 #ifdef CONFIG_RTKM
60 #include <rtw_mem.h>
61 #endif /* CONFIG_RTKM */
62 
63 #ifdef CONFIG_RECV_THREAD_MODE
64 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
65 #include <uapi/linux/sched/types.h>	/* struct sched_param */
66 #endif
67 #endif
68 
69 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 5, 41))
70 #include <linux/tqueue.h>
71 #endif
72 
73 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
74 #include <uapi/linux/limits.h>
75 #else
76 #include <linux/limits.h>
77 #endif
78 
79 #ifdef RTK_DMP_PLATFORM
80 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
81 #include <linux/pageremap.h>
82 #endif
83 #include <asm/io.h>
84 #endif
85 
86 #ifdef CONFIG_NET_RADIO
87 #define CONFIG_WIRELESS_EXT
88 #endif
89 
90 /* Monitor mode */
91 #include <net/ieee80211_radiotap.h>
92 
93 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
94 #include <linux/ieee80211.h>
95 #endif
96 
97 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && \
98 	 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
99 #define CONFIG_IEEE80211_HT_ADDT_INFO
100 #endif
101 
102 #ifdef CONFIG_IOCTL_CFG80211
103 	/*	#include <linux/ieee80211.h> */
104 	#include <net/cfg80211.h>
105 #else
106 	#ifdef CONFIG_REGD_SRC_FROM_OS
107 	#error "CONFIG_REGD_SRC_FROM_OS requires CONFIG_IOCTL_CFG80211"
108 	#endif
109 #endif /* CONFIG_IOCTL_CFG80211 */
110 
111 
112 #ifdef CONFIG_HAS_EARLYSUSPEND
113 #include <linux/earlysuspend.h>
114 #endif /* CONFIG_HAS_EARLYSUSPEND */
115 
116 #ifdef CONFIG_EFUSE_CONFIG_FILE
117 #include <linux/fs.h>
118 #endif
119 
120 #ifdef CONFIG_USB_HCI
121 #include <linux/usb.h>
122 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
123 #include <linux/usb_ch9.h>
124 #else
125 #include <linux/usb/ch9.h>
126 #endif
127 #endif
128 
129 
130 #if defined(CONFIG_RTW_GRO) && (!defined(CONFIG_RTW_NAPI))
131 
132 	#error "Enable NAPI before enable GRO\n"
133 
134 #endif
135 
136 
137 #if (KERNEL_VERSION(2, 6, 29) > LINUX_VERSION_CODE && defined(CONFIG_RTW_NAPI))
138 
139 	#undef CONFIG_RTW_NAPI
140 	/*#warning "Linux Kernel version too old to support NAPI (should newer than 2.6.29)\n"*/
141 
142 #endif
143 
144 #if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE && defined(CONFIG_RTW_GRO))
145 
146 	#undef CONFIG_RTW_GRO
147 	/*#warning "Linux Kernel version too old to support GRO(should newer than 2.6.33)\n"*/
148 
149 #endif
150 
151 #define ATOMIC_T atomic_t
152 
153 #ifdef DBG_MEMORY_LEAK
154 extern ATOMIC_T _malloc_cnt;
155 extern ATOMIC_T _malloc_size;
156 #endif
157 
_rtw_vmalloc(u32 sz)158 static inline void *_rtw_vmalloc(u32 sz)
159 {
160 	void *pbuf;
161 
162 	pbuf = vmalloc(sz);
163 
164 #ifdef DBG_MEMORY_LEAK
165 	if (pbuf != NULL) {
166 		atomic_inc(&_malloc_cnt);
167 		atomic_add(sz, &_malloc_size);
168 	}
169 #endif /* DBG_MEMORY_LEAK */
170 
171 	return pbuf;
172 }
173 
_rtw_zvmalloc(u32 sz)174 static inline void *_rtw_zvmalloc(u32 sz)
175 {
176 	void *pbuf;
177 
178 	pbuf = _rtw_vmalloc(sz);
179 	if (pbuf != NULL)
180 		memset(pbuf, 0, sz);
181 
182 	return pbuf;
183 }
184 
_rtw_vmfree(void * pbuf,u32 sz)185 static inline void _rtw_vmfree(void *pbuf, u32 sz)
186 {
187 	vfree(pbuf);
188 
189 #ifdef DBG_MEMORY_LEAK
190 	atomic_dec(&_malloc_cnt);
191 	atomic_sub(sz, &_malloc_size);
192 #endif /* DBG_MEMORY_LEAK */
193 }
194 
_rtw_malloc(u32 sz)195 static inline void *_rtw_malloc(u32 sz)
196 {
197 	void *pbuf = NULL;
198 
199 	#ifdef RTK_DMP_PLATFORM
200 	if (sz > 0x4000)
201 		pbuf = dvr_malloc(sz);
202 	else
203 	#endif
204 	{
205 #ifdef CONFIG_RTKM
206 		pbuf = rtkm_kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
207 #else /* !CONFIG_RTKM */
208 		pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
209 #endif /* CONFIG_RTKM */
210 	}
211 
212 #ifdef DBG_MEMORY_LEAK
213 	if (pbuf != NULL) {
214 		atomic_inc(&_malloc_cnt);
215 		atomic_add(sz, &_malloc_size);
216 	}
217 #endif /* DBG_MEMORY_LEAK */
218 
219 	return pbuf;
220 
221 }
222 
_rtw_zmalloc(u32 sz)223 static inline void *_rtw_zmalloc(u32 sz)
224 {
225 #if 0
226 	void *pbuf = _rtw_malloc(sz);
227 
228 	if (pbuf != NULL)
229 		memset(pbuf, 0, sz);
230 #else
231 #ifdef CONFIG_RTKM
232 	void *pbuf = rtkm_kzalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
233 #else /* !CONFIG_RTKM */
234 	/*kzalloc in KERNEL_VERSION(2, 6, 14)*/
235 	void *pbuf = kzalloc( sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
236 #endif /* CONFIG_RTKM */
237 
238 #endif
239 	return pbuf;
240 }
241 
_rtw_mfree(void * pbuf,u32 sz)242 static inline void _rtw_mfree(void *pbuf, u32 sz)
243 {
244 	#ifdef RTK_DMP_PLATFORM
245 	if (sz > 0x4000)
246 		dvr_free(pbuf);
247 	else
248 	#endif
249 	{
250 #ifdef CONFIG_RTKM
251 		rtkm_kfree(pbuf, sz);
252 #else /* !CONFIG_RTKM */
253 		kfree(pbuf);
254 #endif /* CONFIG_RTKM */
255 	}
256 
257 #ifdef DBG_MEMORY_LEAK
258 	atomic_dec(&_malloc_cnt);
259 	atomic_sub(sz, &_malloc_size);
260 #endif /* DBG_MEMORY_LEAK */
261 
262 }
263 
264 #ifdef CONFIG_USB_HCI
265 typedef struct urb *PURB;
266 
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)267 static inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
268 {
269 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
270 	return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
271 	#else
272 	return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
273 	#endif
274 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)275 static inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
276 {
277 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
278 	usb_free_coherent(dev, size, addr, dma);
279 	#else
280 	usb_buffer_free(dev, size, addr, dma);
281 	#endif
282 }
283 #endif /* CONFIG_USB_HCI */
284 
285 
286 /*lock - spinlock*/
287 typedef	spinlock_t _lock;
_rtw_spinlock_init(_lock * plock)288 static inline void _rtw_spinlock_init(_lock *plock)
289 {
290 	spin_lock_init(plock);
291 }
_rtw_spinlock_free(_lock * plock)292 static inline void _rtw_spinlock_free(_lock *plock)
293 {
294 }
_rtw_spinlock(_lock * plock)295 static inline void _rtw_spinlock(_lock *plock)
296 {
297 	spin_lock(plock);
298 }
_rtw_spinunlock(_lock * plock)299 static inline void _rtw_spinunlock(_lock *plock)
300 {
301 	spin_unlock(plock);
302 }
303 
304 #if 0
305 static inline void _rtw_spinlock_ex(_lock *plock)
306 {
307 	spin_lock(plock);
308 }
309 
310 static inline void _rtw_spinunlock_ex(_lock *plock)
311 {
312 
313 	spin_unlock(plock);
314 }
315 #endif
_rtw_spinlock_irq(_lock * plock,unsigned long * flags)316 __inline static void _rtw_spinlock_irq(_lock *plock, unsigned long *flags)
317 {
318 	spin_lock_irqsave(plock, *flags);
319 }
_rtw_spinunlock_irq(_lock * plock,unsigned long * flags)320 __inline static void _rtw_spinunlock_irq(_lock *plock, unsigned long *flags)
321 {
322 	spin_unlock_irqrestore(plock, *flags);
323 }
_rtw_spinlock_bh(_lock * plock)324 __inline static void _rtw_spinlock_bh(_lock *plock)
325 {
326 	spin_lock_bh(plock);
327 }
_rtw_spinunlock_bh(_lock * plock)328 __inline static void _rtw_spinunlock_bh(_lock *plock)
329 {
330 	spin_unlock_bh(plock);
331 }
332 
333 
334 /*lock - semaphore*/
335 typedef struct	semaphore _sema;
_rtw_init_sema(_sema * sema,int init_val)336 static inline void _rtw_init_sema(_sema *sema, int init_val)
337 {
338 	sema_init(sema, init_val);
339 }
_rtw_free_sema(_sema * sema)340 static inline void _rtw_free_sema(_sema *sema)
341 {
342 }
_rtw_up_sema(_sema * sema)343 static inline void _rtw_up_sema(_sema *sema)
344 {
345 	up(sema);
346 }
_rtw_down_sema(_sema * sema)347 static inline u32 _rtw_down_sema(_sema *sema)
348 {
349 	if (down_interruptible(sema))
350 		return _FAIL;
351 	else
352 		return _SUCCESS;
353 }
354 
355 /*lock - mutex*/
356 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
357 	typedef struct mutex		_mutex;
358 #else
359 	typedef struct semaphore	_mutex;
360 #endif
_rtw_mutex_init(_mutex * pmutex)361 static inline void _rtw_mutex_init(_mutex *pmutex)
362 {
363 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
364 	mutex_init(pmutex);
365 #else
366 	init_MUTEX(pmutex);
367 #endif
368 }
369 
_rtw_mutex_free(_mutex * pmutex)370 static inline void _rtw_mutex_free(_mutex *pmutex)
371 {
372 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
373 	mutex_destroy(pmutex);
374 #else
375 #endif
376 }
_rtw_mutex_lock_interruptible(_mutex * pmutex)377 __inline static int _rtw_mutex_lock_interruptible(_mutex *pmutex)
378 {
379 	int ret = 0;
380 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
381 	/* mutex_lock(pmutex); */
382 	ret = mutex_lock_interruptible(pmutex);
383 #else
384 	ret = down_interruptible(pmutex);
385 #endif
386 	return ret;
387 }
388 
_rtw_mutex_lock(_mutex * pmutex)389 __inline static int _rtw_mutex_lock(_mutex *pmutex)
390 {
391 	int ret = 0;
392 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
393 	mutex_lock(pmutex);
394 #else
395 	down(pmutex);
396 #endif
397 	return ret;
398 }
399 
_rtw_mutex_unlock(_mutex * pmutex)400 __inline static void _rtw_mutex_unlock(_mutex *pmutex)
401 {
402 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
403 	mutex_unlock(pmutex);
404 #else
405 	up(pmutex);
406 #endif
407 }
408 
409 
410 /*completion*/
411 typedef struct completion _completion;
_rtw_init_completion(_completion * comp)412 static inline void _rtw_init_completion(_completion *comp)
413 {
414 	init_completion(comp);
415 }
_rtw_wait_for_comp_timeout(_completion * comp,unsigned long timeout)416 static inline unsigned long _rtw_wait_for_comp_timeout(_completion *comp, unsigned long timeout)
417 {
418 	return wait_for_completion_timeout(comp, timeout);
419 }
_rtw_wait_for_comp(_completion * comp)420 static inline void _rtw_wait_for_comp(_completion *comp)
421 {
422 	return wait_for_completion(comp);
423 }
424 
425 struct	__queue	{
426 	struct	list_head	queue;
427 	_lock	lock;
428 };
429 
430 typedef unsigned char	_buffer;
431 
432 typedef struct	__queue	_queue;
433 
434 
435 /*list*/
436 #define LIST_CONTAINOR(ptr, type, member) \
437 	((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
438 
439 
440 typedef struct	list_head	_list;
441 /* Caller must check if the list is empty before calling rtw_list_delete*/
rtw_list_delete(_list * plist)442 __inline static void rtw_list_delete(_list *plist)
443 {
444 	list_del_init(plist);
445 }
446 
get_next(_list * list)447 __inline static _list *get_next(_list	*list)
448 {
449 	return list->next;
450 }
get_list_head(_queue * queue)451 __inline static _list	*get_list_head(_queue *queue)
452 {
453 	return &(queue->queue);
454 }
455 #define rtw_list_first_entry(ptr, type, member) list_first_entry(ptr, type, member)
456 
457 /* hlist */
458 typedef struct	hlist_head	rtw_hlist_head;
459 typedef struct	hlist_node	rtw_hlist_node;
460 #define rtw_hlist_for_each_entry(pos, head, member) hlist_for_each_entry(pos, head, member)
461 #define rtw_hlist_for_each_safe(pos, n, head) hlist_for_each_safe(pos, n, head)
462 #define rtw_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
464 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, n, head, member)
465 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, head, member)
466 #else
467 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, np, n, head, member)
468 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, node, head, member)
469 #endif
470 
471 /* RCU */
472 typedef struct rcu_head rtw_rcu_head;
473 #define rtw_rcu_dereference(p) rcu_dereference((p))
474 #define rtw_rcu_dereference_protected(p, c) rcu_dereference_protected(p, c)
475 #define rtw_rcu_assign_pointer(p, v) rcu_assign_pointer((p), (v))
476 #define rtw_rcu_read_lock() rcu_read_lock()
477 #define rtw_rcu_read_unlock() rcu_read_unlock()
478 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34))
479 #define rtw_rcu_access_pointer(p) rcu_access_pointer(p)
480 #endif
481 
482 /* rhashtable */
483 #include "../os_dep/linux/rtw_rhashtable.h"
484 
485 
486 /*thread*/
487 typedef void *_thread_hdl_;
488 typedef int thread_return;
489 typedef void *thread_context;
490 struct thread_hdl{
491 	_thread_hdl_ thread_handler;
492 	u8 thread_status;
493 };
494 #define THREAD_STATUS_STARTED BIT(0)
495 #define THREAD_STATUS_STOPPED BIT(1)
496 #define RST_THREAD_STATUS(t) (t->thread_status = 0)
497 #define SET_THREAD_STATUS(t, s) 	(t->thread_status |= s)
498 #define CLR_THREAD_STATUS(t, cl)	(t->thread_status &= ~(cl))
499 #define CHK_THREAD_STATUS(t, ck) (t->thread_status & ck)
500 
501 typedef void timer_hdl_return;
502 typedef void *timer_hdl_context;
503 
rtw_thread_enter(char * name)504 static inline void rtw_thread_enter(char *name)
505 {
506 	allow_signal(SIGTERM);
507 }
508 
rtw_thread_exit(_completion * comp)509 static inline void rtw_thread_exit(_completion *comp)
510 {
511 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
512 	complete_and_exit(comp, 0);
513 #else
514 	kthread_complete_and_exit(comp, 0);
515 #endif
516 }
517 
rtw_thread_start(int (* threadfn)(void * data),void * data,const char namefmt[])518 static inline _thread_hdl_ rtw_thread_start(int (*threadfn)(void *data),
519 			void *data, const char namefmt[])
520 {
521 	_thread_hdl_ _rtw_thread = NULL;
522 
523 	_rtw_thread = kthread_run(threadfn, data, namefmt);
524 	if (IS_ERR(_rtw_thread)) {
525 		WARN_ON(!_rtw_thread);
526 		_rtw_thread = NULL;
527 	}
528 	return _rtw_thread;
529 }
rtw_thread_stop(_thread_hdl_ th)530 static inline bool rtw_thread_stop(_thread_hdl_ th)
531 {
532 
533 	return kthread_stop(th);
534 }
rtw_thread_wait_stop(void)535 static inline void rtw_thread_wait_stop(void)
536 {
537 	#if 0
538 	while (!kthread_should_stop())
539 		rtw_msleep_os(10);
540 	#else
541 	set_current_state(TASK_INTERRUPTIBLE);
542 	while (!kthread_should_stop()) {
543 		schedule();
544 		set_current_state(TASK_INTERRUPTIBLE);
545 	}
546 	__set_current_state(TASK_RUNNING);
547 	#endif
548 }
549 
flush_signals_thread(void)550 static inline void flush_signals_thread(void)
551 {
552 	if (signal_pending(current))
553 		flush_signals(current);
554 }
555 
556 
557 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
558 	#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
559 #endif
560 
561 typedef unsigned long systime;
562 
563 /*tasklet*/
564 typedef struct tasklet_struct _tasklet;
565 typedef void (*tasklet_fn_t)(unsigned long);
566 
567 #if 1
rtw_tasklet_init(_tasklet * t,tasklet_fn_t func,unsigned long data)568 static inline void rtw_tasklet_init(_tasklet *t, tasklet_fn_t func,
569 							unsigned long data)
570 {
571 	tasklet_init(t, func, data);
572 }
rtw_tasklet_kill(_tasklet * t)573 static inline void rtw_tasklet_kill(_tasklet *t)
574 {
575 	tasklet_kill(t);
576 }
577 
rtw_tasklet_schedule(_tasklet * t)578 static inline void rtw_tasklet_schedule(_tasklet *t)
579 {
580 	tasklet_schedule(t);
581 }
rtw_tasklet_hi_schedule(_tasklet * t)582 static inline void rtw_tasklet_hi_schedule(_tasklet *t)
583 {
584 	tasklet_hi_schedule(t);
585 }
586 #else
587 #define rtw_tasklet_init tasklet_init
588 #define rtw_tasklet_kill tasklet_kill
589 #define rtw_tasklet_schedule tasklet_schedule
590 #define rtw_tasklet_hi_schedule tasklet_hi_schedule
591 #endif
592 
593 /*skb_buffer*/
_rtw_skb_alloc(u32 sz)594 static inline struct sk_buff *_rtw_skb_alloc(u32 sz)
595 {
596 	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
597 }
598 
_rtw_skb_free(struct sk_buff * skb)599 static inline void _rtw_skb_free(struct sk_buff *skb)
600 {
601 	dev_kfree_skb_any(skb);
602 }
603 
_rtw_skb_copy(const struct sk_buff * skb)604 static inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
605 {
606 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
607 }
608 
_rtw_skb_clone(struct sk_buff * skb)609 static inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
610 {
611 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
612 }
613 
_rtw_skb_linearize(struct sk_buff * skb)614 static inline int _rtw_skb_linearize(struct sk_buff *skb)
615 {
616 	return skb_linearize(skb);
617 }
618 
_rtw_pskb_copy(struct sk_buff * skb)619 static inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
620 {
621 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
622 	return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
623 #else
624 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
625 #endif
626 }
627 
628 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
629 /* Porting from linux kernel, for compatible with old kernel. */
skb_tail_pointer(const struct sk_buff * skb)630 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
631 {
632 	return skb->tail;
633 }
634 
skb_reset_tail_pointer(struct sk_buff * skb)635 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
636 {
637 	skb->tail = skb->data;
638 }
639 
skb_set_tail_pointer(struct sk_buff * skb,const int offset)640 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
641 {
642 	skb->tail = skb->data + offset;
643 }
644 
skb_end_pointer(const struct sk_buff * skb)645 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
646 {
647 	return skb->end;
648 }
649 #endif
rtw_skb_data(struct sk_buff * pkt)650 static inline u8 *rtw_skb_data(struct sk_buff *pkt)
651 {
652 	return pkt->data;
653 }
654 
rtw_skb_len(struct sk_buff * pkt)655 static inline u32 rtw_skb_len(struct sk_buff *pkt)
656 {
657 	return pkt->len;
658 }
659 
rtw_skb_put_zero(struct sk_buff * skb,unsigned int len)660 static inline void *rtw_skb_put_zero(struct sk_buff *skb, unsigned int len)
661 {
662 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
663 	return skb_put_zero(skb, len);
664 #else
665 	void *tmp = skb_put(skb, len);
666 
667 	memset(tmp, 0, len);
668 
669 	return tmp;
670 #endif
671 }
672 
673 /*timer*/
674 typedef struct rtw_timer_list _timer;
675 struct rtw_timer_list {
676 	struct timer_list timer;
677 	void (*function)(void *);
678 	void *arg;
679 };
680 
681 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
timer_hdl(struct timer_list * in_timer)682 static inline void timer_hdl(struct timer_list *in_timer)
683 {
684 	_timer *ptimer = from_timer(ptimer, in_timer, timer);
685 
686 	ptimer->function(ptimer->arg);
687 }
688 #else
timer_hdl(unsigned long cntx)689 static inline void timer_hdl(unsigned long cntx)
690 {
691 	_timer *ptimer = (_timer *)cntx;
692 
693 	ptimer->function(ptimer->arg);
694 }
695 #endif
696 
_init_timer(_timer * ptimer,void * pfunc,void * cntx)697 __inline static void _init_timer(_timer *ptimer, void *pfunc, void *cntx)
698 {
699 	ptimer->function = pfunc;
700 	ptimer->arg = cntx;
701 
702 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
703 	timer_setup(&ptimer->timer, timer_hdl, 0);
704 #else
705 	/* setup_timer(ptimer, pfunc,(u32)cntx);	 */
706 	ptimer->timer.function = timer_hdl;
707 	ptimer->timer.data = (unsigned long)ptimer;
708 	init_timer(&ptimer->timer);
709 #endif
710 }
711 
_set_timer(_timer * ptimer,u32 delay_time)712 __inline static void _set_timer(_timer *ptimer, u32 delay_time)
713 {
714 	mod_timer(&ptimer->timer , (jiffies + (delay_time * HZ / 1000)));
715 }
716 
_cancel_timer(_timer * ptimer,u8 * bcancelled)717 __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
718 {
719 	*bcancelled = del_timer_sync(&ptimer->timer) == 1 ? 1 : 0;
720 }
721 
_cancel_timer_async(_timer * ptimer)722 __inline static void _cancel_timer_async(_timer *ptimer)
723 {
724 	del_timer(&ptimer->timer);
725 }
726 
727 /*work*/
728 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
729 typedef struct work_struct _workitem;
730 #else
731 typedef struct tq_struct _workitem;
732 #endif
733 
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)734 static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
735 {
736 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
737 	INIT_WORK(pwork, pfunc);
738 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
739 	INIT_WORK(pwork, pfunc, pwork);
740 #else
741 	INIT_TQUEUE(pwork, pfunc, pwork);
742 #endif
743 }
744 
_set_workitem(_workitem * pwork)745 __inline static void _set_workitem(_workitem *pwork)
746 {
747 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
748 	schedule_work(pwork);
749 #else
750 	schedule_task(pwork);
751 #endif
752 }
753 
_cancel_workitem_sync(_workitem * pwork)754 __inline static void _cancel_workitem_sync(_workitem *pwork)
755 {
756 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
757 	cancel_work_sync(pwork);
758 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
759 	flush_scheduled_work();
760 #else
761 	flush_scheduled_tasks();
762 #endif
763 }
764 
765 #ifdef CONFIG_PHL_CPU_BALANCE
766 typedef struct rtw_work_struct _workitem_cpu;
767 struct rtw_work_struct {
768 	/*_workitem must put at top */
769 	_workitem wk;
770 	/*_workitem must put at top */
771 
772 	char work_name[32];
773 	struct workqueue_struct *pwkq;
774 	u8 cpu_id;
775 };
776 
_config_workitem_cpu(_workitem_cpu * pwork,char * name,u8 cpu_id)777 static inline void _config_workitem_cpu(_workitem_cpu *pwork, char *name, u8 cpu_id)
778 {
779 	pwork->cpu_id = cpu_id;
780 	strcpy(pwork->work_name, name);
781 }
782 
_init_workitem_cpu(_workitem_cpu * pwork,void * pfunc,void * cntx)783 static inline void _init_workitem_cpu(_workitem_cpu *pwork, void *pfunc, void *cntx)
784 {
785 	INIT_WORK(&pwork->wk, pfunc);
786 	pwork->pwkq = alloc_workqueue(pwork->work_name, WQ_HIGHPRI, 0);
787 }
788 
_set_workitem_cpu(_workitem_cpu * pwork)789 __inline static void _set_workitem_cpu(_workitem_cpu *pwork)
790 {
791 	queue_work_on(pwork->cpu_id, pwork->pwkq, &pwork->wk);
792 }
793 
_cancel_workitem_sync_cpu(_workitem_cpu * pwork)794 __inline static void _cancel_workitem_sync_cpu(_workitem_cpu *pwork)
795 {
796 	cancel_work_sync(&pwork->wk);
797 }
798 #endif /*CONFIG_PHL_CPU_BALANCE*/
799 
800 /*
801  * Global Mutex: can only be used at PASSIVE level.
802  *   */
803 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter)                              \
804 	{                                                               \
805 		while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1) { \
806 			atomic_dec((atomic_t *)&(_MutexCounter));        \
807 			msleep(10);                          \
808 		}                                                           \
809 	}
810 
811 #define RELEASE_GLOBAL_MUTEX(_MutexCounter)                              \
812 	{                                                               \
813 		atomic_dec((atomic_t *)&(_MutexCounter));        \
814 	}
815 
816 
817 typedef	struct	net_device *_nic_hdl;
rtw_netif_queue_stopped(struct net_device * pnetdev)818 static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
819 {
820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
821 	return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
822 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
823 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
824 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
825 #else
826 	return netif_queue_stopped(pnetdev);
827 #endif
828 }
829 
830 #ifdef CONFIG_HWSIM
831 int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
832 #else
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)833 static inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
834 {
835 #if defined(CONFIG_RTW_FC_FASTFWD)
836 extern int fwdEngine_wifi_rx(struct sk_buff *skb);
837 enum {
838 	RE8670_RX_STOP=0,
839 	RE8670_RX_CONTINUE,
840 	RE8670_RX_STOP_SKBNOFREE,
841 	RE8670_RX_END
842 };
843 int ret = 0;
844 
845 	skb->dev = ndev;
846 	skb->data-=14;
847 	skb->len+=14;
848 
849 	ret = fwdEngine_wifi_rx(skb);
850 
851 	if(ret==RE8670_RX_CONTINUE)
852 	{
853 		skb->data+=14;
854 		skb->len-=14;
855 	return netif_rx(skb);
856 }
857 	else if(ret==RE8670_RX_STOP)
858 	{
859 		kfree_skb(skb);
860 	}
861 
862 	return 0;
863 #else
864 	skb->dev = ndev;
865 	return netif_rx(skb);
866 #endif
867 }
868 #endif
869 
870 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)871 static inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
872 {
873 	skb->dev = ndev;
874 	return netif_receive_skb(skb);
875 }
876 
877 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)878 static inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
879 {
880 	return napi_gro_receive(napi, skb);
881 }
882 #endif /* CONFIG_RTW_GRO */
883 #endif /* CONFIG_RTW_NAPI */
884 
rtw_netif_wake_queue(struct net_device * pnetdev)885 static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
886 {
887 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
888 	netif_tx_wake_all_queues(pnetdev);
889 #else
890 	netif_wake_queue(pnetdev);
891 #endif
892 }
893 
rtw_netif_start_queue(struct net_device * pnetdev)894 static inline void rtw_netif_start_queue(struct net_device *pnetdev)
895 {
896 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
897 	netif_tx_start_all_queues(pnetdev);
898 #else
899 	netif_start_queue(pnetdev);
900 #endif
901 }
902 
rtw_netif_stop_queue(struct net_device * pnetdev)903 static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
904 {
905 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
906 	netif_tx_stop_all_queues(pnetdev);
907 #else
908 	netif_stop_queue(pnetdev);
909 #endif
910 }
rtw_netif_device_attach(struct net_device * pnetdev)911 static inline void rtw_netif_device_attach(struct net_device *pnetdev)
912 {
913 	netif_device_attach(pnetdev);
914 }
rtw_netif_device_detach(struct net_device * pnetdev)915 static inline void rtw_netif_device_detach(struct net_device *pnetdev)
916 {
917 	netif_device_detach(pnetdev);
918 }
rtw_netif_carrier_on(struct net_device * pnetdev)919 static inline void rtw_netif_carrier_on(struct net_device *pnetdev)
920 {
921 	netif_carrier_on(pnetdev);
922 }
rtw_netif_carrier_off(struct net_device * pnetdev)923 static inline void rtw_netif_carrier_off(struct net_device *pnetdev)
924 {
925 	netif_carrier_off(pnetdev);
926 }
927 
rtw_merge_string(char * dst,int dst_len,const char * src1,const char * src2)928 static inline int rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2)
929 {
930 	int	len = 0;
931 	len += snprintf(dst + len, dst_len - len, "%s", src1);
932 	len += snprintf(dst + len, dst_len - len, "%s", src2);
933 
934 	return len;
935 }
936 
937 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
938 	#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
939 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
940 	#define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
941 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
942 
943 
944 /* Suspend lock prevent system from going suspend */
945 #ifdef CONFIG_WAKELOCK
946 	#include <linux/wakelock.h>
947 #elif defined(CONFIG_ANDROID_POWER)
948 	#include <linux/android_power.h>
949 #endif
950 
951 /* limitation of path length */
952 #define PATH_LENGTH_MAX PATH_MAX
953 
954 /* Atomic integer operations */
ATOMIC_SET(ATOMIC_T * v,int i)955 static inline void ATOMIC_SET(ATOMIC_T *v, int i)
956 {
957 	atomic_set(v, i);
958 }
959 
ATOMIC_READ(ATOMIC_T * v)960 static inline int ATOMIC_READ(ATOMIC_T *v)
961 {
962 	return atomic_read(v);
963 }
964 
ATOMIC_ADD(ATOMIC_T * v,int i)965 static inline void ATOMIC_ADD(ATOMIC_T *v, int i)
966 {
967 	atomic_add(i, v);
968 }
ATOMIC_SUB(ATOMIC_T * v,int i)969 static inline void ATOMIC_SUB(ATOMIC_T *v, int i)
970 {
971 	atomic_sub(i, v);
972 }
973 
ATOMIC_INC(ATOMIC_T * v)974 static inline void ATOMIC_INC(ATOMIC_T *v)
975 {
976 	atomic_inc(v);
977 }
978 
ATOMIC_DEC(ATOMIC_T * v)979 static inline void ATOMIC_DEC(ATOMIC_T *v)
980 {
981 	atomic_dec(v);
982 }
983 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)984 static inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
985 {
986 	return atomic_add_return(i, v);
987 }
988 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)989 static inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
990 {
991 	return atomic_sub_return(i, v);
992 }
993 
ATOMIC_INC_RETURN(ATOMIC_T * v)994 static inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
995 {
996 	return atomic_inc_return(v);
997 }
998 
ATOMIC_DEC_RETURN(ATOMIC_T * v)999 static inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
1000 {
1001 	return atomic_dec_return(v);
1002 }
1003 
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)1004 static inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
1005 {
1006 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
1007 	return atomic_add_unless(v, 1, u);
1008 #else
1009 	/* only make sure not exceed after this function */
1010 	if (ATOMIC_INC_RETURN(v) > u) {
1011 		ATOMIC_DEC(v);
1012 		return 0;
1013 	}
1014 	return 1;
1015 #endif
1016 }
1017 
1018 #define NDEV_FMT "%s"
1019 #define NDEV_ARG(ndev) ndev->name
1020 #define ADPT_FMT "%s"
1021 #define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
1022 #define FUNC_NDEV_FMT "%s(%s)"
1023 #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
1024 #define FUNC_ADPT_FMT "%s(%s)"
1025 #define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
1026 
1027 #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
1028 struct rtw_netdev_priv_indicator {
1029 	void *priv;
1030 	u32 sizeof_priv;
1031 };
1032 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
1033 extern struct net_device *rtw_alloc_etherdev(int sizeof_priv);
1034 
1035 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
1036 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(name)
1037 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
1038 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(ndev->nd_net, name)
1039 #else
1040 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(dev_net(ndev), name)
1041 #endif
1042 
1043 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
1044 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(name)
1045 #else
1046 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(&init_net, name)
1047 #endif
1048 
rtw_dump_stack(void)1049 static inline void rtw_dump_stack(void)
1050 {
1051 	dump_stack();
1052 }
1053 #define rtw_bug_on(condition) BUG_ON(condition)
1054 #define rtw_warn_on(condition) WARN_ON(condition)
1055 #define RTW_DIV_ROUND_UP(n, d)	DIV_ROUND_UP(n, d)
1056 #define rtw_sprintf(buf, size, format, arg...) snprintf(buf, size, format, ##arg)
1057 
1058 #define STRUCT_PACKED __attribute__ ((packed))
1059 
1060 #ifndef fallthrough
1061 #if __GNUC__ >= 5 || defined(__clang__)
1062 #ifndef __has_attribute
1063 #define __has_attribute(x) 0
1064 #endif
1065 #if __has_attribute(__fallthrough__)
1066 #define fallthrough __attribute__((__fallthrough__))
1067 #endif
1068 #endif
1069 #ifndef fallthrough
1070 #define fallthrough do {} while (0) /* fallthrough */
1071 #endif
1072 #endif
1073 
1074 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
1075 #define rtw_dev_addr_mod(dev, offset, addr, len) _rtw_memcpy(&dev->dev_addr[offset], addr, len)
1076 #else
1077 #define rtw_dev_addr_mod dev_addr_mod
1078 #endif
1079 
1080 #endif /* __OSDEP_LINUX_SERVICE_H_ */
1081