1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright(c) 2007 - 2017 Realtek Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
6*4882a593Smuzhiyun * under the terms of version 2 of the GNU General Public License as
7*4882a593Smuzhiyun * published by the Free Software Foundation.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but WITHOUT
10*4882a593Smuzhiyun * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12*4882a593Smuzhiyun * more details.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *****************************************************************************/
15*4882a593Smuzhiyun #ifndef __OSDEP_BSD_SERVICE_H_
16*4882a593Smuzhiyun #define __OSDEP_BSD_SERVICE_H_
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <sys/cdefs.h>
20*4882a593Smuzhiyun #include <sys/types.h>
21*4882a593Smuzhiyun #include <sys/systm.h>
22*4882a593Smuzhiyun #include <sys/param.h>
23*4882a593Smuzhiyun #include <sys/sockio.h>
24*4882a593Smuzhiyun #include <sys/sysctl.h>
25*4882a593Smuzhiyun #include <sys/lock.h>
26*4882a593Smuzhiyun #include <sys/mutex.h>
27*4882a593Smuzhiyun #include <sys/mbuf.h>
28*4882a593Smuzhiyun #include <sys/kernel.h>
29*4882a593Smuzhiyun #include <sys/socket.h>
30*4882a593Smuzhiyun #include <sys/systm.h>
31*4882a593Smuzhiyun #include <sys/malloc.h>
32*4882a593Smuzhiyun #include <sys/module.h>
33*4882a593Smuzhiyun #include <sys/bus.h>
34*4882a593Smuzhiyun #include <sys/endian.h>
35*4882a593Smuzhiyun #include <sys/kdb.h>
36*4882a593Smuzhiyun #include <sys/kthread.h>
37*4882a593Smuzhiyun #include <sys/malloc.h>
38*4882a593Smuzhiyun #include <sys/time.h>
39*4882a593Smuzhiyun #include <machine/atomic.h>
40*4882a593Smuzhiyun #include <machine/bus.h>
41*4882a593Smuzhiyun #include <machine/resource.h>
42*4882a593Smuzhiyun #include <sys/rman.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <net/bpf.h>
45*4882a593Smuzhiyun #include <net/if.h>
46*4882a593Smuzhiyun #include <net/if_arp.h>
47*4882a593Smuzhiyun #include <net/ethernet.h>
48*4882a593Smuzhiyun #include <net/if_dl.h>
49*4882a593Smuzhiyun #include <net/if_media.h>
50*4882a593Smuzhiyun #include <net/if_types.h>
51*4882a593Smuzhiyun #include <net/route.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include <netinet/in.h>
55*4882a593Smuzhiyun #include <netinet/in_systm.h>
56*4882a593Smuzhiyun #include <netinet/in_var.h>
57*4882a593Smuzhiyun #include <netinet/if_ether.h>
58*4882a593Smuzhiyun #include <if_ether.h>
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #include <net80211/ieee80211_var.h>
61*4882a593Smuzhiyun #include <net80211/ieee80211_regdomain.h>
62*4882a593Smuzhiyun #include <net80211/ieee80211_radiotap.h>
63*4882a593Smuzhiyun #include <net80211/ieee80211_ratectl.h>
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #include <dev/usb/usb.h>
66*4882a593Smuzhiyun #include <dev/usb/usbdi.h>
67*4882a593Smuzhiyun #include "usbdevs.h"
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define USB_DEBUG_VAR rum_debug
70*4882a593Smuzhiyun #include <dev/usb/usb_debug.h>
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #if 1 //Baron porting from linux, it's all temp solution, needs to check again
73*4882a593Smuzhiyun #include <sys/sema.h>
74*4882a593Smuzhiyun #include <sys/pcpu.h> /* XXX for PCPU_GET */
75*4882a593Smuzhiyun // typedef struct semaphore _sema;
76*4882a593Smuzhiyun typedef struct sema _sema;
77*4882a593Smuzhiyun // typedef spinlock_t _lock;
78*4882a593Smuzhiyun typedef struct mtx _lock;
79*4882a593Smuzhiyun typedef struct mtx _mutex;
80*4882a593Smuzhiyun typedef struct rtw_timer_list _timer;
81*4882a593Smuzhiyun struct list_head {
82*4882a593Smuzhiyun struct list_head *next, *prev;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun struct __queue {
85*4882a593Smuzhiyun struct list_head queue;
86*4882a593Smuzhiyun _lock lock;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun typedef struct mbuf _pkt;
90*4882a593Smuzhiyun typedef struct mbuf _buffer;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun typedef struct __queue _queue;
93*4882a593Smuzhiyun typedef struct list_head _list;
94*4882a593Smuzhiyun typedef int _OS_STATUS;
95*4882a593Smuzhiyun //typedef u32 _irqL;
96*4882a593Smuzhiyun typedef unsigned long _irqL;
97*4882a593Smuzhiyun typedef struct ifnet * _nic_hdl;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun typedef pid_t _thread_hdl_;
100*4882a593Smuzhiyun // typedef struct thread _thread_hdl_;
101*4882a593Smuzhiyun typedef void thread_return;
102*4882a593Smuzhiyun typedef void* thread_context;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun typedef void timer_hdl_return;
105*4882a593Smuzhiyun typedef void* timer_hdl_context;
106*4882a593Smuzhiyun typedef struct work_struct _workitem;
107*4882a593Smuzhiyun typedef struct task _tasklet;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
110*4882a593Smuzhiyun /* emulate a modern version */
111*4882a593Smuzhiyun #define LINUX_VERSION_CODE KERNEL_VERSION(2, 6, 35)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define WIRELESS_EXT -1
114*4882a593Smuzhiyun #define HZ hz
115*4882a593Smuzhiyun #define spin_lock_irqsave mtx_lock_irqsave
116*4882a593Smuzhiyun #define spin_lock_bh mtx_lock_irqsave
117*4882a593Smuzhiyun #define mtx_lock_irqsave(lock, x) mtx_lock(lock)//{local_irq_save((x)); mtx_lock_spin((lock));}
118*4882a593Smuzhiyun //#define IFT_RTW 0xf9 //ifnet allocate type for RTW
119*4882a593Smuzhiyun #define free_netdev if_free
120*4882a593Smuzhiyun #define LIST_CONTAINOR(ptr, type, member) \
121*4882a593Smuzhiyun ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
122*4882a593Smuzhiyun #define container_of(p,t,n) (t*)((p)-&(((t*)0)->n))
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Linux timers are emulated using FreeBSD callout functions
125*4882a593Smuzhiyun * (and taskqueue functionality).
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * Currently no timer stats functionality.
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * See (linux_compat) processes.c
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun struct rtw_timer_list {
133*4882a593Smuzhiyun struct callout callout;
134*4882a593Smuzhiyun void (*function)(void *);
135*4882a593Smuzhiyun void *arg;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun struct workqueue_struct;
139*4882a593Smuzhiyun struct work_struct;
140*4882a593Smuzhiyun typedef void (*work_func_t)(struct work_struct *work);
141*4882a593Smuzhiyun /* Values for the state of an item of work (work_struct) */
142*4882a593Smuzhiyun typedef enum work_state {
143*4882a593Smuzhiyun WORK_STATE_UNSET = 0,
144*4882a593Smuzhiyun WORK_STATE_CALLOUT_PENDING = 1,
145*4882a593Smuzhiyun WORK_STATE_TASK_PENDING = 2,
146*4882a593Smuzhiyun WORK_STATE_WORK_CANCELLED = 3
147*4882a593Smuzhiyun } work_state_t;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun struct work_struct {
150*4882a593Smuzhiyun struct task task; /* FreeBSD task */
151*4882a593Smuzhiyun work_state_t state; /* the pending or otherwise state of work. */
152*4882a593Smuzhiyun work_func_t func;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun #define spin_unlock_irqrestore mtx_unlock_irqrestore
155*4882a593Smuzhiyun #define spin_unlock_bh mtx_unlock_irqrestore
156*4882a593Smuzhiyun #define mtx_unlock_irqrestore(lock,x) mtx_unlock(lock);
157*4882a593Smuzhiyun extern void _rtw_spinlock_init(_lock *plock);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun //modify private structure to match freebsd
160*4882a593Smuzhiyun #define BITS_PER_LONG 32
161*4882a593Smuzhiyun union ktime {
162*4882a593Smuzhiyun s64 tv64;
163*4882a593Smuzhiyun #if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
164*4882a593Smuzhiyun struct {
165*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
166*4882a593Smuzhiyun s32 sec, nsec;
167*4882a593Smuzhiyun #else
168*4882a593Smuzhiyun s32 nsec, sec;
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun } tv;
171*4882a593Smuzhiyun #endif
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun #define kmemcheck_bitfield_begin(name)
174*4882a593Smuzhiyun #define kmemcheck_bitfield_end(name)
175*4882a593Smuzhiyun #define CHECKSUM_NONE 0
176*4882a593Smuzhiyun typedef unsigned char *sk_buff_data_t;
177*4882a593Smuzhiyun typedef union ktime ktime_t; /* Kill this */
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun void rtw_mtx_lock(_lock *plock);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun void rtw_mtx_unlock(_lock *plock);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun * struct sk_buff - socket buffer
185*4882a593Smuzhiyun * @next: Next buffer in list
186*4882a593Smuzhiyun * @prev: Previous buffer in list
187*4882a593Smuzhiyun * @sk: Socket we are owned by
188*4882a593Smuzhiyun * @tstamp: Time we arrived
189*4882a593Smuzhiyun * @dev: Device we arrived on/are leaving by
190*4882a593Smuzhiyun * @transport_header: Transport layer header
191*4882a593Smuzhiyun * @network_header: Network layer header
192*4882a593Smuzhiyun * @mac_header: Link layer header
193*4882a593Smuzhiyun * @_skb_refdst: destination entry (with norefcount bit)
194*4882a593Smuzhiyun * @sp: the security path, used for xfrm
195*4882a593Smuzhiyun * @cb: Control buffer. Free for use by every layer. Put private vars here
196*4882a593Smuzhiyun * @len: Length of actual data
197*4882a593Smuzhiyun * @data_len: Data length
198*4882a593Smuzhiyun * @mac_len: Length of link layer header
199*4882a593Smuzhiyun * @hdr_len: writable header length of cloned skb
200*4882a593Smuzhiyun * @csum: Checksum (must include start/offset pair)
201*4882a593Smuzhiyun * @csum_start: Offset from skb->head where checksumming should start
202*4882a593Smuzhiyun * @csum_offset: Offset from csum_start where checksum should be stored
203*4882a593Smuzhiyun * @local_df: allow local fragmentation
204*4882a593Smuzhiyun * @cloned: Head may be cloned (check refcnt to be sure)
205*4882a593Smuzhiyun * @nohdr: Payload reference only, must not modify header
206*4882a593Smuzhiyun * @pkt_type: Packet class
207*4882a593Smuzhiyun * @fclone: skbuff clone status
208*4882a593Smuzhiyun * @ip_summed: Driver fed us an IP checksum
209*4882a593Smuzhiyun * @priority: Packet queueing priority
210*4882a593Smuzhiyun * @users: User count - see {datagram,tcp}.c
211*4882a593Smuzhiyun * @protocol: Packet protocol from driver
212*4882a593Smuzhiyun * @truesize: Buffer size
213*4882a593Smuzhiyun * @head: Head of buffer
214*4882a593Smuzhiyun * @data: Data head pointer
215*4882a593Smuzhiyun * @tail: Tail pointer
216*4882a593Smuzhiyun * @end: End pointer
217*4882a593Smuzhiyun * @destructor: Destruct function
218*4882a593Smuzhiyun * @mark: Generic packet mark
219*4882a593Smuzhiyun * @nfct: Associated connection, if any
220*4882a593Smuzhiyun * @ipvs_property: skbuff is owned by ipvs
221*4882a593Smuzhiyun * @peeked: this packet has been seen already, so stats have been
222*4882a593Smuzhiyun * done for it, don't do them again
223*4882a593Smuzhiyun * @nf_trace: netfilter packet trace flag
224*4882a593Smuzhiyun * @nfctinfo: Relationship of this skb to the connection
225*4882a593Smuzhiyun * @nfct_reasm: netfilter conntrack re-assembly pointer
226*4882a593Smuzhiyun * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
227*4882a593Smuzhiyun * @skb_iif: ifindex of device we arrived on
228*4882a593Smuzhiyun * @rxhash: the packet hash computed on receive
229*4882a593Smuzhiyun * @queue_mapping: Queue mapping for multiqueue devices
230*4882a593Smuzhiyun * @tc_index: Traffic control index
231*4882a593Smuzhiyun * @tc_verd: traffic control verdict
232*4882a593Smuzhiyun * @ndisc_nodetype: router type (from link layer)
233*4882a593Smuzhiyun * @dma_cookie: a cookie to one of several possible DMA operations
234*4882a593Smuzhiyun * done by skb DMA functions
235*4882a593Smuzhiyun * @secmark: security marking
236*4882a593Smuzhiyun * @vlan_tci: vlan tag control information
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun struct sk_buff {
240*4882a593Smuzhiyun /* These two members must be first. */
241*4882a593Smuzhiyun struct sk_buff *next;
242*4882a593Smuzhiyun struct sk_buff *prev;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun ktime_t tstamp;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun struct sock *sk;
247*4882a593Smuzhiyun //struct net_device *dev;
248*4882a593Smuzhiyun struct ifnet *dev;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun * This is the control buffer. It is free to use for every
252*4882a593Smuzhiyun * layer. Please put your private variables there. If you
253*4882a593Smuzhiyun * want to keep them across layers you have to do a skb_clone()
254*4882a593Smuzhiyun * first. This is owned by whoever has the skb queued ATM.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun char cb[48] __aligned(8);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun unsigned long _skb_refdst;
259*4882a593Smuzhiyun #ifdef CONFIG_XFRM
260*4882a593Smuzhiyun struct sec_path *sp;
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun unsigned int len,
263*4882a593Smuzhiyun data_len;
264*4882a593Smuzhiyun u16 mac_len,
265*4882a593Smuzhiyun hdr_len;
266*4882a593Smuzhiyun union {
267*4882a593Smuzhiyun u32 csum;
268*4882a593Smuzhiyun struct {
269*4882a593Smuzhiyun u16 csum_start;
270*4882a593Smuzhiyun u16 csum_offset;
271*4882a593Smuzhiyun }smbol2;
272*4882a593Smuzhiyun }smbol1;
273*4882a593Smuzhiyun u32 priority;
274*4882a593Smuzhiyun kmemcheck_bitfield_begin(flags1);
275*4882a593Smuzhiyun u8 local_df:1,
276*4882a593Smuzhiyun cloned:1,
277*4882a593Smuzhiyun ip_summed:2,
278*4882a593Smuzhiyun nohdr:1,
279*4882a593Smuzhiyun nfctinfo:3;
280*4882a593Smuzhiyun u8 pkt_type:3,
281*4882a593Smuzhiyun fclone:2,
282*4882a593Smuzhiyun ipvs_property:1,
283*4882a593Smuzhiyun peeked:1,
284*4882a593Smuzhiyun nf_trace:1;
285*4882a593Smuzhiyun kmemcheck_bitfield_end(flags1);
286*4882a593Smuzhiyun u16 protocol;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun void (*destructor)(struct sk_buff *skb);
289*4882a593Smuzhiyun #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
290*4882a593Smuzhiyun struct nf_conntrack *nfct;
291*4882a593Smuzhiyun struct sk_buff *nfct_reasm;
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun #ifdef CONFIG_BRIDGE_NETFILTER
294*4882a593Smuzhiyun struct nf_bridge_info *nf_bridge;
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun int skb_iif;
298*4882a593Smuzhiyun #ifdef CONFIG_NET_SCHED
299*4882a593Smuzhiyun u16 tc_index; /* traffic control index */
300*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
301*4882a593Smuzhiyun u16 tc_verd; /* traffic control verdict */
302*4882a593Smuzhiyun #endif
303*4882a593Smuzhiyun #endif
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun u32 rxhash;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun kmemcheck_bitfield_begin(flags2);
308*4882a593Smuzhiyun u16 queue_mapping:16;
309*4882a593Smuzhiyun #ifdef CONFIG_IPV6_NDISC_NODETYPE
310*4882a593Smuzhiyun u8 ndisc_nodetype:2,
311*4882a593Smuzhiyun deliver_no_wcard:1;
312*4882a593Smuzhiyun #else
313*4882a593Smuzhiyun u8 deliver_no_wcard:1;
314*4882a593Smuzhiyun #endif
315*4882a593Smuzhiyun kmemcheck_bitfield_end(flags2);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* 0/14 bit hole */
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun #ifdef CONFIG_NET_DMA
320*4882a593Smuzhiyun dma_cookie_t dma_cookie;
321*4882a593Smuzhiyun #endif
322*4882a593Smuzhiyun #ifdef CONFIG_NETWORK_SECMARK
323*4882a593Smuzhiyun u32 secmark;
324*4882a593Smuzhiyun #endif
325*4882a593Smuzhiyun union {
326*4882a593Smuzhiyun u32 mark;
327*4882a593Smuzhiyun u32 dropcount;
328*4882a593Smuzhiyun }symbol3;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun u16 vlan_tci;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun sk_buff_data_t transport_header;
333*4882a593Smuzhiyun sk_buff_data_t network_header;
334*4882a593Smuzhiyun sk_buff_data_t mac_header;
335*4882a593Smuzhiyun /* These elements must be at the end, see alloc_skb() for details. */
336*4882a593Smuzhiyun sk_buff_data_t tail;
337*4882a593Smuzhiyun sk_buff_data_t end;
338*4882a593Smuzhiyun unsigned char *head,
339*4882a593Smuzhiyun *data;
340*4882a593Smuzhiyun unsigned int truesize;
341*4882a593Smuzhiyun atomic_t users;
342*4882a593Smuzhiyun };
343*4882a593Smuzhiyun struct sk_buff_head {
344*4882a593Smuzhiyun /* These two members must be first. */
345*4882a593Smuzhiyun struct sk_buff *next;
346*4882a593Smuzhiyun struct sk_buff *prev;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun u32 qlen;
349*4882a593Smuzhiyun _lock lock;
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun #define skb_tail_pointer(skb) skb->tail
skb_put(struct sk_buff * skb,unsigned int len)352*4882a593Smuzhiyun static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun unsigned char *tmp = skb_tail_pointer(skb);
355*4882a593Smuzhiyun //SKB_LINEAR_ASSERT(skb);
356*4882a593Smuzhiyun skb->tail += len;
357*4882a593Smuzhiyun skb->len += len;
358*4882a593Smuzhiyun return tmp;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
__skb_pull(struct sk_buff * skb,unsigned int len)361*4882a593Smuzhiyun static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun skb->len -= len;
364*4882a593Smuzhiyun if(skb->len < skb->data_len)
365*4882a593Smuzhiyun printf("%s(),%d,error!\n",__FUNCTION__,__LINE__);
366*4882a593Smuzhiyun return skb->data += len;
367*4882a593Smuzhiyun }
skb_pull(struct sk_buff * skb,unsigned int len)368*4882a593Smuzhiyun static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun #ifdef PLATFORM_FREEBSD
371*4882a593Smuzhiyun return __skb_pull(skb, len);
372*4882a593Smuzhiyun #else
373*4882a593Smuzhiyun return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
374*4882a593Smuzhiyun #endif //PLATFORM_FREEBSD
375*4882a593Smuzhiyun }
skb_queue_len(const struct sk_buff_head * list_)376*4882a593Smuzhiyun static inline u32 skb_queue_len(const struct sk_buff_head *list_)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun return list_->qlen;
379*4882a593Smuzhiyun }
__skb_insert(struct sk_buff * newsk,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * list)380*4882a593Smuzhiyun static inline void __skb_insert(struct sk_buff *newsk,
381*4882a593Smuzhiyun struct sk_buff *prev, struct sk_buff *next,
382*4882a593Smuzhiyun struct sk_buff_head *list)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun newsk->next = next;
385*4882a593Smuzhiyun newsk->prev = prev;
386*4882a593Smuzhiyun next->prev = prev->next = newsk;
387*4882a593Smuzhiyun list->qlen++;
388*4882a593Smuzhiyun }
__skb_queue_before(struct sk_buff_head * list,struct sk_buff * next,struct sk_buff * newsk)389*4882a593Smuzhiyun static inline void __skb_queue_before(struct sk_buff_head *list,
390*4882a593Smuzhiyun struct sk_buff *next,
391*4882a593Smuzhiyun struct sk_buff *newsk)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun __skb_insert(newsk, next->prev, next, list);
394*4882a593Smuzhiyun }
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)395*4882a593Smuzhiyun static inline void skb_queue_tail(struct sk_buff_head *list,
396*4882a593Smuzhiyun struct sk_buff *newsk)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun mtx_lock(&list->lock);
399*4882a593Smuzhiyun __skb_queue_before(list, (struct sk_buff *)list, newsk);
400*4882a593Smuzhiyun mtx_unlock(&list->lock);
401*4882a593Smuzhiyun }
skb_peek(struct sk_buff_head * list_)402*4882a593Smuzhiyun static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct sk_buff *list = ((struct sk_buff *)list_)->next;
405*4882a593Smuzhiyun if (list == (struct sk_buff *)list_)
406*4882a593Smuzhiyun list = NULL;
407*4882a593Smuzhiyun return list;
408*4882a593Smuzhiyun }
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)409*4882a593Smuzhiyun static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun struct sk_buff *next, *prev;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun list->qlen--;
414*4882a593Smuzhiyun next = skb->next;
415*4882a593Smuzhiyun prev = skb->prev;
416*4882a593Smuzhiyun skb->next = skb->prev = NULL;
417*4882a593Smuzhiyun next->prev = prev;
418*4882a593Smuzhiyun prev->next = next;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
skb_dequeue(struct sk_buff_head * list)421*4882a593Smuzhiyun static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun mtx_lock(&list->lock);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun struct sk_buff *skb = skb_peek(list);
426*4882a593Smuzhiyun if (skb)
427*4882a593Smuzhiyun __skb_unlink(skb, list);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun mtx_unlock(&list->lock);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun return skb;
432*4882a593Smuzhiyun }
skb_reserve(struct sk_buff * skb,int len)433*4882a593Smuzhiyun static inline void skb_reserve(struct sk_buff *skb, int len)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun skb->data += len;
436*4882a593Smuzhiyun skb->tail += len;
437*4882a593Smuzhiyun }
__skb_queue_head_init(struct sk_buff_head * list)438*4882a593Smuzhiyun static inline void __skb_queue_head_init(struct sk_buff_head *list)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun list->prev = list->next = (struct sk_buff *)list;
441*4882a593Smuzhiyun list->qlen = 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun /*
444*4882a593Smuzhiyun * This function creates a split out lock class for each invocation;
445*4882a593Smuzhiyun * this is needed for now since a whole lot of users of the skb-queue
446*4882a593Smuzhiyun * infrastructure in drivers have different locking usage (in hardirq)
447*4882a593Smuzhiyun * than the networking core (in softirq only). In the long run either the
448*4882a593Smuzhiyun * network layer or drivers should need annotation to consolidate the
449*4882a593Smuzhiyun * main types of usage into 3 classes.
450*4882a593Smuzhiyun */
skb_queue_head_init(struct sk_buff_head * list)451*4882a593Smuzhiyun static inline void skb_queue_head_init(struct sk_buff_head *list)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun _rtw_spinlock_init(&list->lock);
454*4882a593Smuzhiyun __skb_queue_head_init(list);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun unsigned long copy_from_user(void *to, const void *from, unsigned long n);
457*4882a593Smuzhiyun unsigned long copy_to_user(void *to, const void *from, unsigned long n);
458*4882a593Smuzhiyun struct sk_buff * dev_alloc_skb(unsigned int size);
459*4882a593Smuzhiyun struct sk_buff *skb_clone(const struct sk_buff *skb);
460*4882a593Smuzhiyun void dev_kfree_skb_any(struct sk_buff *skb);
461*4882a593Smuzhiyun #endif //Baron porting from linux, it's all temp solution, needs to check again
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun #if 1 // kenny add Linux compatibility code for Linux USB driver
465*4882a593Smuzhiyun #include <dev/usb/usb_compat_linux.h>
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun #define __init // __attribute ((constructor))
468*4882a593Smuzhiyun #define __exit // __attribute ((destructor))
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * Definitions for module_init and module_exit macros.
472*4882a593Smuzhiyun *
473*4882a593Smuzhiyun * These macros will use the SYSINIT framework to call a specified
474*4882a593Smuzhiyun * function (with no arguments) on module loading or unloading.
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun */
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun void module_init_exit_wrapper(void *arg);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun #define module_init(initfn) \
481*4882a593Smuzhiyun SYSINIT(mod_init_ ## initfn, \
482*4882a593Smuzhiyun SI_SUB_KLD, SI_ORDER_FIRST, \
483*4882a593Smuzhiyun module_init_exit_wrapper, initfn)
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun #define module_exit(exitfn) \
486*4882a593Smuzhiyun SYSUNINIT(mod_exit_ ## exitfn, \
487*4882a593Smuzhiyun SI_SUB_KLD, SI_ORDER_ANY, \
488*4882a593Smuzhiyun module_init_exit_wrapper, exitfn)
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * The usb_register and usb_deregister functions are used to register
492*4882a593Smuzhiyun * usb drivers with the usb subsystem.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun int usb_register(struct usb_driver *driver);
495*4882a593Smuzhiyun int usb_deregister(struct usb_driver *driver);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /*
498*4882a593Smuzhiyun * usb_get_dev and usb_put_dev - increment/decrement the reference count
499*4882a593Smuzhiyun * of the usb device structure.
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * Original body of usb_get_dev:
502*4882a593Smuzhiyun *
503*4882a593Smuzhiyun * if (dev)
504*4882a593Smuzhiyun * get_device(&dev->dev);
505*4882a593Smuzhiyun * return dev;
506*4882a593Smuzhiyun *
507*4882a593Smuzhiyun * Reference counts are not currently used in this compatibility
508*4882a593Smuzhiyun * layer. So these functions will do nothing.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun static inline struct usb_device *
usb_get_dev(struct usb_device * dev)511*4882a593Smuzhiyun usb_get_dev(struct usb_device *dev)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun return dev;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun static inline void
usb_put_dev(struct usb_device * dev)517*4882a593Smuzhiyun usb_put_dev(struct usb_device *dev)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun // rtw_usb_compat_linux
524*4882a593Smuzhiyun int rtw_usb_submit_urb(struct urb *urb, uint16_t mem_flags);
525*4882a593Smuzhiyun int rtw_usb_unlink_urb(struct urb *urb);
526*4882a593Smuzhiyun int rtw_usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe);
527*4882a593Smuzhiyun int rtw_usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe,
528*4882a593Smuzhiyun uint8_t request, uint8_t requesttype,
529*4882a593Smuzhiyun uint16_t value, uint16_t index, void *data,
530*4882a593Smuzhiyun uint16_t size, usb_timeout_t timeout);
531*4882a593Smuzhiyun int rtw_usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index);
532*4882a593Smuzhiyun int rtw_usb_setup_endpoint(struct usb_device *dev,
533*4882a593Smuzhiyun struct usb_host_endpoint *uhe, usb_size_t bufsize);
534*4882a593Smuzhiyun struct urb *rtw_usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags);
535*4882a593Smuzhiyun struct usb_host_endpoint *rtw_usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep);
536*4882a593Smuzhiyun struct usb_host_interface *rtw_usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index);
537*4882a593Smuzhiyun struct usb_interface *rtw_usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no);
538*4882a593Smuzhiyun void *rtw_usbd_get_intfdata(struct usb_interface *intf);
539*4882a593Smuzhiyun void rtw_usb_linux_register(void *arg);
540*4882a593Smuzhiyun void rtw_usb_linux_deregister(void *arg);
541*4882a593Smuzhiyun void rtw_usb_linux_free_device(struct usb_device *dev);
542*4882a593Smuzhiyun void rtw_usb_free_urb(struct urb *urb);
543*4882a593Smuzhiyun void rtw_usb_init_urb(struct urb *urb);
544*4882a593Smuzhiyun void rtw_usb_kill_urb(struct urb *urb);
545*4882a593Smuzhiyun void rtw_usb_set_intfdata(struct usb_interface *intf, void *data);
546*4882a593Smuzhiyun void rtw_usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev,
547*4882a593Smuzhiyun struct usb_host_endpoint *uhe, void *buf,
548*4882a593Smuzhiyun int length, usb_complete_t callback, void *arg);
549*4882a593Smuzhiyun int rtw_usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe,
550*4882a593Smuzhiyun void *data, int len, uint16_t *pactlen, usb_timeout_t timeout);
551*4882a593Smuzhiyun void *usb_get_intfdata(struct usb_interface *intf);
552*4882a593Smuzhiyun int usb_linux_init_endpoints(struct usb_device *udev);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun typedef struct urb * PURB;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun typedef unsigned gfp_t;
559*4882a593Smuzhiyun #define __GFP_WAIT ((gfp_t)0x10u) /* Can wait and reschedule? */
560*4882a593Smuzhiyun #define __GFP_HIGH ((gfp_t)0x20u) /* Should access emergency pools? */
561*4882a593Smuzhiyun #define __GFP_IO ((gfp_t)0x40u) /* Can start physical IO? */
562*4882a593Smuzhiyun #define __GFP_FS ((gfp_t)0x80u) /* Can call down to low-level FS? */
563*4882a593Smuzhiyun #define __GFP_COLD ((gfp_t)0x100u) /* Cache-cold page required */
564*4882a593Smuzhiyun #define __GFP_NOWARN ((gfp_t)0x200u) /* Suppress page allocation failure warning */
565*4882a593Smuzhiyun #define __GFP_REPEAT ((gfp_t)0x400u) /* Retry the allocation. Might fail */
566*4882a593Smuzhiyun #define __GFP_NOFAIL ((gfp_t)0x800u) /* Retry for ever. Cannot fail */
567*4882a593Smuzhiyun #define __GFP_NORETRY ((gfp_t)0x1000u)/* Do not retry. Might fail */
568*4882a593Smuzhiyun #define __GFP_NO_GROW ((gfp_t)0x2000u)/* Slab internal usage */
569*4882a593Smuzhiyun #define __GFP_COMP ((gfp_t)0x4000u)/* Add compound page metadata */
570*4882a593Smuzhiyun #define __GFP_ZERO ((gfp_t)0x8000u)/* Return zeroed page on success */
571*4882a593Smuzhiyun #define __GFP_NOMEMALLOC ((gfp_t)0x10000u) /* Don't use emergency reserves */
572*4882a593Smuzhiyun #define __GFP_HARDWALL ((gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* This equals 0, but use constants in case they ever change */
575*4882a593Smuzhiyun #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
576*4882a593Smuzhiyun /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
577*4882a593Smuzhiyun #define GFP_ATOMIC (__GFP_HIGH)
578*4882a593Smuzhiyun #define GFP_NOIO (__GFP_WAIT)
579*4882a593Smuzhiyun #define GFP_NOFS (__GFP_WAIT | __GFP_IO)
580*4882a593Smuzhiyun #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
581*4882a593Smuzhiyun #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
582*4882a593Smuzhiyun #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
583*4882a593Smuzhiyun __GFP_HIGHMEM)
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun #endif // kenny add Linux compatibility code for Linux USB
587*4882a593Smuzhiyun
get_next(_list * list)588*4882a593Smuzhiyun __inline static _list *get_next(_list *list)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun return list->next;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
get_list_head(_queue * queue)593*4882a593Smuzhiyun __inline static _list *get_list_head(_queue *queue)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun return (&(queue->queue));
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun #define LIST_CONTAINOR(ptr, type, member) \
600*4882a593Smuzhiyun ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun
_enter_critical(_lock * plock,_irqL * pirqL)603*4882a593Smuzhiyun __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun spin_lock_irqsave(plock, *pirqL);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
_exit_critical(_lock * plock,_irqL * pirqL)608*4882a593Smuzhiyun __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun spin_unlock_irqrestore(plock, *pirqL);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
_enter_critical_ex(_lock * plock,_irqL * pirqL)613*4882a593Smuzhiyun __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun spin_lock_irqsave(plock, *pirqL);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
_exit_critical_ex(_lock * plock,_irqL * pirqL)618*4882a593Smuzhiyun __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun spin_unlock_irqrestore(plock, *pirqL);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
_enter_critical_bh(_lock * plock,_irqL * pirqL)623*4882a593Smuzhiyun __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun spin_lock_bh(plock, *pirqL);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
_exit_critical_bh(_lock * plock,_irqL * pirqL)628*4882a593Smuzhiyun __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun spin_unlock_bh(plock, *pirqL);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
_enter_critical_mutex(_mutex * pmutex,_irqL * pirqL)633*4882a593Smuzhiyun __inline static void _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun mtx_lock(pmutex);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun
_exit_critical_mutex(_mutex * pmutex,_irqL * pirqL)641*4882a593Smuzhiyun __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun mtx_unlock(pmutex);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun }
__list_del(struct list_head * prev,struct list_head * next)647*4882a593Smuzhiyun static inline void __list_del(struct list_head * prev, struct list_head * next)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun next->prev = prev;
650*4882a593Smuzhiyun prev->next = next;
651*4882a593Smuzhiyun }
INIT_LIST_HEAD(struct list_head * list)652*4882a593Smuzhiyun static inline void INIT_LIST_HEAD(struct list_head *list)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun list->next = list;
655*4882a593Smuzhiyun list->prev = list;
656*4882a593Smuzhiyun }
rtw_list_delete(_list * plist)657*4882a593Smuzhiyun __inline static void rtw_list_delete(_list *plist)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun __list_del(plist->prev, plist->next);
660*4882a593Smuzhiyun INIT_LIST_HEAD(plist);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
timer_hdl(void * ctx)663*4882a593Smuzhiyun static inline void timer_hdl(void *ctx)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun _timer *timer = (_timer *)ctx;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun rtw_mtx_lock(NULL);
668*4882a593Smuzhiyun if (callout_pending(&timer->callout)) {
669*4882a593Smuzhiyun /* callout was reset */
670*4882a593Smuzhiyun rtw_mtx_unlock(NULL);
671*4882a593Smuzhiyun return;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (!callout_active(&timer->callout)) {
675*4882a593Smuzhiyun /* callout was stopped */
676*4882a593Smuzhiyun rtw_mtx_unlock(NULL);
677*4882a593Smuzhiyun return;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun callout_deactivate(&timer->callout);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun timer->function(timer->arg);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun rtw_mtx_unlock(NULL);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
_init_timer(_timer * ptimer,_nic_hdl padapter,void * pfunc,void * cntx)687*4882a593Smuzhiyun static inline void _init_timer(_timer *ptimer, _nic_hdl padapter, void *pfunc, void *cntx)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun ptimer->function = pfunc;
690*4882a593Smuzhiyun ptimer->arg = cntx;
691*4882a593Smuzhiyun callout_init(&ptimer->callout, CALLOUT_MPSAFE);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
_set_timer(_timer * ptimer,u32 delay_time)694*4882a593Smuzhiyun __inline static void _set_timer(_timer *ptimer,u32 delay_time)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun if (ptimer->function && ptimer->arg) {
697*4882a593Smuzhiyun rtw_mtx_lock(NULL);
698*4882a593Smuzhiyun callout_reset(&ptimer->callout, delay_time, timer_hdl, ptimer);
699*4882a593Smuzhiyun rtw_mtx_unlock(NULL);
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
_cancel_timer(_timer * ptimer,u8 * bcancelled)703*4882a593Smuzhiyun __inline static void _cancel_timer(_timer *ptimer,u8 *bcancelled)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun rtw_mtx_lock(NULL);
706*4882a593Smuzhiyun callout_drain(&ptimer->callout);
707*4882a593Smuzhiyun rtw_mtx_unlock(NULL);
708*4882a593Smuzhiyun *bcancelled = 1; /* assume an pending timer to be canceled */
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
_init_workitem(_workitem * pwork,void * pfunc,PVOID cntx)711*4882a593Smuzhiyun __inline static void _init_workitem(_workitem *pwork, void *pfunc, PVOID cntx)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun printf("%s Not implement yet! \n",__FUNCTION__);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
_set_workitem(_workitem * pwork)716*4882a593Smuzhiyun __inline static void _set_workitem(_workitem *pwork)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun printf("%s Not implement yet! \n",__FUNCTION__);
719*4882a593Smuzhiyun // schedule_work(pwork);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun //
723*4882a593Smuzhiyun // Global Mutex: can only be used at PASSIVE level.
724*4882a593Smuzhiyun //
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
727*4882a593Smuzhiyun { \
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
731*4882a593Smuzhiyun { \
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun #define ATOMIC_INIT(i) { (i) }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun static __inline void thread_enter(char *name);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun //Atomic integer operations
739*4882a593Smuzhiyun typedef uint32_t ATOMIC_T ;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun #define rtw_netdev_priv(netdev) (((struct ifnet *)netdev)->if_softc)
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun #define rtw_free_netdev(netdev) if_free((netdev))
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun #define NDEV_FMT "%s"
746*4882a593Smuzhiyun #define NDEV_ARG(ndev) ""
747*4882a593Smuzhiyun #define ADPT_FMT "%s"
748*4882a593Smuzhiyun #define ADPT_ARG(adapter) ""
749*4882a593Smuzhiyun #define FUNC_NDEV_FMT "%s"
750*4882a593Smuzhiyun #define FUNC_NDEV_ARG(ndev) __func__
751*4882a593Smuzhiyun #define FUNC_ADPT_FMT "%s"
752*4882a593Smuzhiyun #define FUNC_ADPT_ARG(adapter) __func__
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun #define STRUCT_PACKED
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun #endif
757*4882a593Smuzhiyun
758