1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2017 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #ifndef __OSDEP_LINUX_SERVICE_H_
16 #define __OSDEP_LINUX_SERVICE_H_
17
18 #include <linux/version.h>
19 #include <linux/spinlock.h>
20 #include <linux/compiler.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/namei.h>
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
28 #include <linux/kref.h>
29 #endif
30 /* #include <linux/smp_lock.h> */
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/circ_buf.h>
35 #include <asm/uaccess.h>
36 #include <asm/byteorder.h>
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
40 #include <asm/semaphore.h>
41 #else
42 #include <linux/semaphore.h>
43 #endif
44 #include <linux/sem.h>
45 #include <linux/sched.h>
46 #include <linux/etherdevice.h>
47 #include <linux/wireless.h>
48 #include <net/iw_handler.h>
49 #include <net/addrconf.h>
50 #include <linux/if_arp.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/delay.h>
53 #include <linux/interrupt.h> /* for struct tasklet_struct */
54 #include <linux/ip.h>
55 #include <linux/kthread.h>
56 #include <linux/list.h>
57 #include <linux/vmalloc.h>
58
59 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
60 #include <uapi/linux/sched/types.h>
61 #endif
62
63 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 5, 41))
64 #include <linux/tqueue.h>
65 #endif
66
67 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
68 #include <uapi/linux/limits.h>
69 #else
70 #include <linux/limits.h>
71 #endif
72
73 #ifdef RTK_DMP_PLATFORM
74 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
75 #include <linux/pageremap.h>
76 #endif
77 #include <asm/io.h>
78 #endif
79
80 #ifdef CONFIG_NET_RADIO
81 #define CONFIG_WIRELESS_EXT
82 #endif
83
84 /* Monitor mode */
85 #include <net/ieee80211_radiotap.h>
86
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
88 #include <linux/ieee80211.h>
89 #endif
90
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && \
92 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
93 #define CONFIG_IEEE80211_HT_ADDT_INFO
94 #endif
95
96 #ifdef CONFIG_IOCTL_CFG80211
97 /* #include <linux/ieee80211.h> */
98 #include <net/cfg80211.h>
99 #endif /* CONFIG_IOCTL_CFG80211 */
100
101
102 #ifdef CONFIG_HAS_EARLYSUSPEND
103 #include <linux/earlysuspend.h>
104 #endif /* CONFIG_HAS_EARLYSUSPEND */
105
106 #ifdef CONFIG_EFUSE_CONFIG_FILE
107 #include <linux/fs.h>
108 #endif
109
110 #ifdef CONFIG_USB_HCI
111 #include <linux/usb.h>
112 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
113 #include <linux/usb_ch9.h>
114 #else
115 #include <linux/usb/ch9.h>
116 #endif
117 #endif
118
119 #ifdef CONFIG_BT_COEXIST_SOCKET_TRX
120 #include <net/sock.h>
121 #include <net/tcp.h>
122 #include <linux/udp.h>
123 #include <linux/in.h>
124 #include <linux/netlink.h>
125 #endif /* CONFIG_BT_COEXIST_SOCKET_TRX */
126
127 #ifdef CONFIG_USB_HCI
128 typedef struct urb *PURB;
129 #endif
130
131 #if defined(CONFIG_RTW_GRO) && (!defined(CONFIG_RTW_NAPI))
132
133 #error "Enable NAPI before enable GRO\n"
134
135 #endif
136
137
138 #if (KERNEL_VERSION(2, 6, 29) > LINUX_VERSION_CODE && defined(CONFIG_RTW_NAPI))
139
140 #undef CONFIG_RTW_NAPI
141 /*#warning "Linux Kernel version too old to support NAPI (should newer than 2.6.29)\n"*/
142
143 #endif
144
145 #if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE && defined(CONFIG_RTW_GRO))
146
147 #undef CONFIG_RTW_GRO
148 /*#warning "Linux Kernel version too old to support GRO(should newer than 2.6.33)\n"*/
149
150 #endif
151
152 typedef struct semaphore _sema;
153 typedef spinlock_t _lock;
154 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
155 typedef struct mutex _mutex;
156 #else
157 typedef struct semaphore _mutex;
158 #endif
159 struct rtw_timer_list {
160 struct timer_list timer;
161 void (*function)(void *);
162 void *arg;
163 };
164
165 typedef struct rtw_timer_list _timer;
166 typedef struct completion _completion;
167
168 struct __queue {
169 struct list_head queue;
170 _lock lock;
171 };
172
173 typedef struct sk_buff _pkt;
174 typedef unsigned char _buffer;
175
176 typedef struct __queue _queue;
177 typedef struct list_head _list;
178
179 /* hlist */
180 typedef struct hlist_head rtw_hlist_head;
181 typedef struct hlist_node rtw_hlist_node;
182
183 /* RCU */
184 typedef struct rcu_head rtw_rcu_head;
185 #define rtw_rcu_dereference(p) rcu_dereference((p))
186 #define rtw_rcu_dereference_protected(p, c) rcu_dereference_protected(p, c)
187 #define rtw_rcu_assign_pointer(p, v) rcu_assign_pointer((p), (v))
188 #define rtw_rcu_read_lock() rcu_read_lock()
189 #define rtw_rcu_read_unlock() rcu_read_unlock()
190 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34))
191 #define rtw_rcu_access_pointer(p) rcu_access_pointer(p)
192 #endif
193
194 /* rhashtable */
195 #include "../os_dep/linux/rtw_rhashtable.h"
196
197 typedef int _OS_STATUS;
198 /* typedef u32 _irqL; */
199 typedef unsigned long _irqL;
200 typedef struct net_device *_nic_hdl;
201
202 typedef void *_thread_hdl_;
203 typedef int thread_return;
204 typedef void *thread_context;
205
206 typedef void timer_hdl_return;
207 typedef void *timer_hdl_context;
208
209 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
210 typedef struct work_struct _workitem;
211 #else
212 typedef struct tq_struct _workitem;
213 #endif
214
215 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
216 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
217 #endif
218
219 typedef unsigned long systime;
220 typedef struct tasklet_struct _tasklet;
221
222 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
223 /* Porting from linux kernel, for compatible with old kernel. */
skb_tail_pointer(const struct sk_buff * skb)224 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
225 {
226 return skb->tail;
227 }
228
skb_reset_tail_pointer(struct sk_buff * skb)229 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
230 {
231 skb->tail = skb->data;
232 }
233
skb_set_tail_pointer(struct sk_buff * skb,const int offset)234 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
235 {
236 skb->tail = skb->data + offset;
237 }
238
skb_end_pointer(const struct sk_buff * skb)239 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
240 {
241 return skb->end;
242 }
243 #endif
244
rtw_list_delete(_list * plist)245 __inline static void rtw_list_delete(_list *plist)
246 {
247 list_del_init(plist);
248 }
249
get_next(_list * list)250 __inline static _list *get_next(_list *list)
251 {
252 return list->next;
253 }
254
255 #define LIST_CONTAINOR(ptr, type, member) \
256 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
257
258 #define rtw_list_first_entry(ptr, type, member) list_first_entry(ptr, type, member)
259
260 #define rtw_hlist_for_each_entry(pos, head, member) hlist_for_each_entry(pos, head, member)
261 #define rtw_hlist_for_each_safe(pos, n, head) hlist_for_each_safe(pos, n, head)
262 #define rtw_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
263 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
264 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, n, head, member)
265 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, head, member)
266 #else
267 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, np, n, head, member)
268 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, node, head, member)
269 #endif
270
_enter_critical(_lock * plock,_irqL * pirqL)271 __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
272 {
273 spin_lock_irqsave(plock, *pirqL);
274 }
275
_exit_critical(_lock * plock,_irqL * pirqL)276 __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
277 {
278 spin_unlock_irqrestore(plock, *pirqL);
279 }
280
_enter_critical_ex(_lock * plock,_irqL * pirqL)281 __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
282 {
283 spin_lock_irqsave(plock, *pirqL);
284 }
285
_exit_critical_ex(_lock * plock,_irqL * pirqL)286 __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
287 {
288 spin_unlock_irqrestore(plock, *pirqL);
289 }
290
_enter_critical_bh(_lock * plock,_irqL * pirqL)291 __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
292 {
293 spin_lock_bh(plock);
294 }
295
_exit_critical_bh(_lock * plock,_irqL * pirqL)296 __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
297 {
298 spin_unlock_bh(plock);
299 }
300
enter_critical_bh(_lock * plock)301 __inline static void enter_critical_bh(_lock *plock)
302 {
303 spin_lock_bh(plock);
304 }
305
exit_critical_bh(_lock * plock)306 __inline static void exit_critical_bh(_lock *plock)
307 {
308 spin_unlock_bh(plock);
309 }
310
_enter_critical_mutex(_mutex * pmutex,_irqL * pirqL)311 __inline static int _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
312 {
313 int ret = 0;
314 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
315 /* mutex_lock(pmutex); */
316 ret = mutex_lock_interruptible(pmutex);
317 #else
318 ret = down_interruptible(pmutex);
319 #endif
320 return ret;
321 }
322
323
_enter_critical_mutex_lock(_mutex * pmutex,_irqL * pirqL)324 __inline static int _enter_critical_mutex_lock(_mutex *pmutex, _irqL *pirqL)
325 {
326 int ret = 0;
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
328 mutex_lock(pmutex);
329 #else
330 down(pmutex);
331 #endif
332 return ret;
333 }
334
_exit_critical_mutex(_mutex * pmutex,_irqL * pirqL)335 __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
336 {
337 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
338 mutex_unlock(pmutex);
339 #else
340 up(pmutex);
341 #endif
342 }
343
get_list_head(_queue * queue)344 __inline static _list *get_list_head(_queue *queue)
345 {
346 return &(queue->queue);
347 }
348
349 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
timer_hdl(struct timer_list * in_timer)350 static inline void timer_hdl(struct timer_list *in_timer)
351 #else
352 static inline void timer_hdl(unsigned long cntx)
353 #endif
354 {
355 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
356 _timer *ptimer = from_timer(ptimer, in_timer, timer);
357 #else
358 _timer *ptimer = (_timer *)cntx;
359 #endif
360 ptimer->function(ptimer->arg);
361 }
362
_init_timer(_timer * ptimer,_nic_hdl nic_hdl,void * pfunc,void * cntx)363 __inline static void _init_timer(_timer *ptimer, _nic_hdl nic_hdl, void *pfunc, void *cntx)
364 {
365 ptimer->function = pfunc;
366 ptimer->arg = cntx;
367
368 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
369 timer_setup(&ptimer->timer, timer_hdl, 0);
370 #else
371 /* setup_timer(ptimer, pfunc,(u32)cntx); */
372 ptimer->timer.function = timer_hdl;
373 ptimer->timer.data = (unsigned long)ptimer;
374 init_timer(&ptimer->timer);
375 #endif
376 }
377
_set_timer(_timer * ptimer,u32 delay_time)378 __inline static void _set_timer(_timer *ptimer, u32 delay_time)
379 {
380 mod_timer(&ptimer->timer , (jiffies + (delay_time * HZ / 1000)));
381 }
382
_cancel_timer(_timer * ptimer,u8 * bcancelled)383 __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
384 {
385 *bcancelled = del_timer_sync(&ptimer->timer) == 1 ? 1 : 0;
386 }
387
_cancel_timer_async(_timer * ptimer)388 __inline static void _cancel_timer_async(_timer *ptimer)
389 {
390 del_timer(&ptimer->timer);
391 }
392
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)393 static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
394 {
395 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
396 INIT_WORK(pwork, pfunc);
397 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
398 INIT_WORK(pwork, pfunc, pwork);
399 #else
400 INIT_TQUEUE(pwork, pfunc, pwork);
401 #endif
402 }
403
_set_workitem(_workitem * pwork)404 __inline static void _set_workitem(_workitem *pwork)
405 {
406 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
407 schedule_work(pwork);
408 #else
409 schedule_task(pwork);
410 #endif
411 }
412
_cancel_workitem_sync(_workitem * pwork)413 __inline static void _cancel_workitem_sync(_workitem *pwork)
414 {
415 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
416 cancel_work_sync(pwork);
417 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
418 flush_scheduled_work();
419 #else
420 flush_scheduled_tasks();
421 #endif
422 }
423 /*
424 * Global Mutex: can only be used at PASSIVE level.
425 * */
426
427 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
428 { \
429 while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1) { \
430 atomic_dec((atomic_t *)&(_MutexCounter)); \
431 msleep(10); \
432 } \
433 }
434
435 #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
436 { \
437 atomic_dec((atomic_t *)&(_MutexCounter)); \
438 }
439
rtw_netif_queue_stopped(struct net_device * pnetdev)440 static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
441 {
442 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
443 return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
444 netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
445 netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
446 netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
447 #else
448 return netif_queue_stopped(pnetdev);
449 #endif
450 }
451
rtw_netif_wake_queue(struct net_device * pnetdev)452 static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
453 {
454 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
455 netif_tx_wake_all_queues(pnetdev);
456 #else
457 netif_wake_queue(pnetdev);
458 #endif
459 }
460
rtw_netif_start_queue(struct net_device * pnetdev)461 static inline void rtw_netif_start_queue(struct net_device *pnetdev)
462 {
463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
464 netif_tx_start_all_queues(pnetdev);
465 #else
466 netif_start_queue(pnetdev);
467 #endif
468 }
469
rtw_netif_stop_queue(struct net_device * pnetdev)470 static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
471 {
472 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
473 netif_tx_stop_all_queues(pnetdev);
474 #else
475 netif_stop_queue(pnetdev);
476 #endif
477 }
rtw_netif_device_attach(struct net_device * pnetdev)478 static inline void rtw_netif_device_attach(struct net_device *pnetdev)
479 {
480 netif_device_attach(pnetdev);
481 }
rtw_netif_device_detach(struct net_device * pnetdev)482 static inline void rtw_netif_device_detach(struct net_device *pnetdev)
483 {
484 netif_device_detach(pnetdev);
485 }
rtw_netif_carrier_on(struct net_device * pnetdev)486 static inline void rtw_netif_carrier_on(struct net_device *pnetdev)
487 {
488 netif_carrier_on(pnetdev);
489 }
rtw_netif_carrier_off(struct net_device * pnetdev)490 static inline void rtw_netif_carrier_off(struct net_device *pnetdev)
491 {
492 netif_carrier_off(pnetdev);
493 }
494
rtw_merge_string(char * dst,int dst_len,const char * src1,const char * src2)495 static inline int rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2)
496 {
497 int len = 0;
498 len += snprintf(dst + len, dst_len - len, "%s", src1);
499 len += snprintf(dst + len, dst_len - len, "%s", src2);
500
501 return len;
502 }
503
504 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
505 #define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
506 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
507 #define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
508 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
509
510
511 /* Suspend lock prevent system from going suspend */
512 #ifdef CONFIG_WAKELOCK
513 #include <linux/wakelock.h>
514 #elif defined(CONFIG_ANDROID_POWER)
515 #include <linux/android_power.h>
516 #endif
517
518 /* limitation of path length */
519 #define PATH_LENGTH_MAX PATH_MAX
520
521 /* Atomic integer operations */
522 #define ATOMIC_T atomic_t
523
524 #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
525
526 #define NDEV_FMT "%s"
527 #define NDEV_ARG(ndev) ndev->name
528 #define ADPT_FMT "%s"
529 #define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
530 #define FUNC_NDEV_FMT "%s(%s)"
531 #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
532 #define FUNC_ADPT_FMT "%s(%s)"
533 #define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
534
535 struct rtw_netdev_priv_indicator {
536 void *priv;
537 u32 sizeof_priv;
538 };
539 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
540 extern struct net_device *rtw_alloc_etherdev(int sizeof_priv);
541
542 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
543 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(name)
544 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
545 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(ndev->nd_net, name)
546 #else
547 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(dev_net(ndev), name)
548 #endif
549
550 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
551 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(name)
552 #else
553 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(&init_net, name)
554 #endif
555
556 #define STRUCT_PACKED __attribute__ ((packed))
557
558
559 #endif /* __OSDEP_LINUX_SERVICE_H_ */
560