xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/include/osdep_service_bsd.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #ifndef __OSDEP_BSD_SERVICE_H_
16 #define __OSDEP_BSD_SERVICE_H_
17 
18 
19 #include <sys/cdefs.h>
20 #include <sys/types.h>
21 #include <sys/systm.h>
22 #include <sys/param.h>
23 #include <sys/sockio.h>
24 #include <sys/sysctl.h>
25 #include <sys/lock.h>
26 #include <sys/mutex.h>
27 #include <sys/mbuf.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kdb.h>
36 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/time.h>
39 #include <machine/atomic.h>
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/rman.h>
43 
44 #include <net/bpf.h>
45 #include <net/if.h>
46 #include <net/if_arp.h>
47 #include <net/ethernet.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/route.h>
52 
53 
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/if_ether.h>
58 #include <if_ether.h>
59 
60 #include <net80211/ieee80211_var.h>
61 #include <net80211/ieee80211_regdomain.h>
62 #include <net80211/ieee80211_radiotap.h>
63 #include <net80211/ieee80211_ratectl.h>
64 
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbdi.h>
67 #include "usbdevs.h"
68 
69 #define	USB_DEBUG_VAR rum_debug
70 #include <dev/usb/usb_debug.h>
71 
72 #if 1 //Baron porting from linux, it's all temp solution, needs to check again
73 #include <sys/sema.h>
74 #include <sys/pcpu.h> /* XXX for PCPU_GET */
75 //	typedef struct 	semaphore _sema;
76 	typedef struct 	sema _sema;
77 //	typedef	spinlock_t	_lock;
78 	typedef	struct mtx	_lock;
79 	typedef struct mtx 		_mutex;
80 	typedef struct rtw_timer_list _timer;
81 	struct list_head {
82 	struct list_head *next, *prev;
83 	};
84 	struct	__queue	{
85 		struct	list_head	queue;
86 		_lock	lock;
87 	};
88 
89 	typedef struct mbuf	_buffer;
90 
91 	typedef struct	__queue	_queue;
92 	typedef struct	list_head	_list;
93 
94 	typedef	struct	ifnet * _nic_hdl;
95 
96 	typedef pid_t		_thread_hdl_;
97 //	typedef struct thread		_thread_hdl_;
98 	typedef void		thread_return;
99 	typedef void*	thread_context;
100 
101 	typedef void timer_hdl_return;
102 	typedef void* timer_hdl_context;
103 	typedef struct work_struct _workitem;
104 	typedef struct task _tasklet;
105 
106 #define   KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
107 /* emulate a modern version */
108 #define LINUX_VERSION_CODE KERNEL_VERSION(2, 6, 35)
109 
110 #define WIRELESS_EXT -1
111 #define HZ hz
112 
113 //#define IFT_RTW	0xf9 //ifnet allocate type for RTW
114 #define free_netdev if_free
115 #define LIST_CONTAINOR(ptr, type, member) \
116         ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
117 #define container_of(p,t,n) (t*)((p)-&(((t*)0)->n))
118 
119 /*lock - spinlock*/
_rtw_spinlock_init(_lock * plock)120 static inline void _rtw_spinlock_init(_lock *plock)
121 {
122 	mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
123 }
124 
_rtw_spinlock_free(_lock * plock)125 static inline void _rtw_spinlock_free(_lock *plock)
126 {
127 	mtx_destroy(plock);
128 }
129 
_rtw_spinlock(_lock * plock)130 static inline void _rtw_spinlock(_lock	*plock)
131 {
132 	mtx_lock(plock);
133 }
134 
_rtw_spinunlock(_lock * plock)135 static inline void _rtw_spinunlock(_lock *plock)
136 {
137 	mtx_unlock(plock);
138 }
139 
140 #if 0
_rtw_spinlock_ex(_lock * plock)141 static inline void _rtw_spinlock_ex(_lock *plock)
142 {
143 	mtx_lock(plock);
144 }
145 
_rtw_spinunlock_ex(_lock * plock)146 static inline void _rtw_spinunlock_ex(_lock *plock)
147 {
148 	mtx_unlock(plock);
149 }
150 #endif
151 
_rtw_spinlock_irq(_lock * plock,unsigned long * flags)152 __inline static void _rtw_spinlock_irq(_lock *plock, unsigned long *flags)
153 {
154 	mtx_lock(plock); /*{local_irq_save((x)); mtx_lock_spin((lock));}*/
155 }
156 
_rtw_spinunlock_irq(_lock * plock,unsigned long * flags)157 __inline static void _rtw_spinunlock_irq(_lock *plock, unsigned long *flags)
158 {
159 	mtx_unlock(plock);
160 }
161 
_rtw_spinlock_bh(_lock * plock)162 __inline static void _rtw_spinlock_bh(_lock *plock)
163 {
164 	mtx_lock(plock);/*{local_irq_save((x)); mtx_lock_spin((lock));}*/
165 }
166 
_rtw_spinunlock_bh(_lock * plock)167 __inline static void _rtw_spinunlock_bh(_lock *plock)
168 {
169 	mtx_unlock(plock);
170 }
171 
172 /*lock - semaphore*/
_rtw_init_sema(_sema * sema,int init_val)173 static inline void _rtw_init_sema(_sema *sema, int init_val)
174 {
175 	sema_init(sema, init_val, "rtw_drv");
176 }
177 
_rtw_free_sema(_sema * sema)178 static inline void _rtw_free_sema(_sema *sema)
179 {
180 	sema_destroy(sema);
181 }
182 
_rtw_up_sema(_sema * sema)183 static inline void _rtw_up_sema(_sema *sema)
184 {
185 	sema_post(sema);
186 }
187 
_rtw_down_sema(_sema * sema)188 static inline u32 _rtw_down_sema(_sema *sema)
189 {
190 	sema_wait(sema);
191 	return  _SUCCESS;
192 }
193 
194 /*lock - mutex*/
_rtw_mutex_init(_mutex * pmutex)195 static inline void _rtw_mutex_init(_mutex *pmutex)
196 {
197 	mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
198 }
199 
_rtw_mutex_free(_mutex * pmutex)200 static inline void _rtw_mutex_free(_mutex *pmutex)
201 {
202 	sema_destroy(pmutex);
203 }
204 
_rtw_mutex_lock_interruptible(_mutex * pmutex)205 __inline static void _rtw_mutex_lock_interruptible(_mutex *pmutex)
206 {
207 
208 	mtx_lock(pmutex);
209 
210 }
_rtw_mutex_lock(_mutex * pmutex)211 __inline static void _rtw_mutex_lock(_mutex *pmutex)
212 {
213 
214 	mtx_lock(pmutex);
215 
216 }
217 
_rtw_mutex_unlock(_mutex * pmutex)218 __inline static void _rtw_mutex_unlock(_mutex *pmutex)
219 {
220 	mtx_unlock(pmutex);
221 }
222 
_rtw_vmalloc(u32 sz)223 static inline void *_rtw_vmalloc(u32 sz)
224 {
225 	void *pbuf;
226 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
227 
228 	return pbuf;
229 }
230 
_rtw_zvmalloc(u32 sz)231 static inline void *_rtw_zvmalloc(u32 sz)
232 {
233 	void *pbuf;
234 
235 	pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
236 	return pbuf;
237 }
238 
_rtw_vmfree(void * pbuf,u32 sz)239 static inline void _rtw_vmfree(void *pbuf, u32 sz)
240 {
241 	free(pbuf, M_DEVBUF);
242 }
243 
_rtw_malloc(u32 sz)244 static inline void *_rtw_malloc(u32 sz)
245 {
246 	void *pbuf = NULL;
247 
248 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
249 	return pbuf;
250 }
_rtw_zmalloc(u32 sz)251 static inline void *_rtw_zmalloc(u32 sz)
252 {
253 	return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
254 }
_rtw_mfree(void * pbuf,u32 sz)255 static inline void _rtw_mfree(void *pbuf, u32 sz)
256 {
257 	free(pbuf, M_DEVBUF);
258 }
259 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)260 static inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
261 {
262 	return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
263 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)264 static inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
265 {
266 	free(addr, M_USBDEV);
267 }
268 #endif /* CONFIG_USB_HCI */
269 
270 struct sk_buff *_rtw_skb_alloc(u32 sz);
271 void _rtw_skb_free(struct sk_buff *skb);
_rtw_skb_copy(const struct sk_buff * skb)272 static inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
273 {
274 	return NULL;
275 }
276 
_rtw_skb_clone(struct sk_buff * skb)277 static inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
278 {
279 	return skb_clone(skb);
280 }
281 
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)282 static inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
283 {
284 	return (*ndev->if_input)(ndev, skb);
285 }
286 
287 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)288 static inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
289 {
290 	rtw_warn_on(1);
291 	return -1;
292 }
293 
294 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)295 static inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
296 {
297 	rtw_warn_on(1);
298 	return -1;
299 }
300 #endif /* CONFIG_RTW_GRO */
301 #endif /* CONFIG_RTW_NAPI */
302 
303 
304 /*
305  * Linux timers are emulated using FreeBSD callout functions
306  * (and taskqueue functionality).
307  *
308  * Currently no timer stats functionality.
309  *
310  * See (linux_compat) processes.c
311  *
312  */
313 struct rtw_timer_list {
314 	struct callout callout;
315 	void (*function)(void *);
316 	void *arg;
317 };
318 
319 struct workqueue_struct;
320 struct work_struct;
321 typedef void (*work_func_t)(struct work_struct *work);
322 /* Values for the state of an item of work (work_struct) */
323 typedef enum work_state {
324         WORK_STATE_UNSET = 0,
325         WORK_STATE_CALLOUT_PENDING = 1,
326         WORK_STATE_TASK_PENDING = 2,
327         WORK_STATE_WORK_CANCELLED = 3
328 } work_state_t;
329 
330 struct work_struct {
331         struct task task; /* FreeBSD task */
332         work_state_t state; /* the pending or otherwise state of work. */
333         work_func_t func;
334 };
335 
336 //modify private structure to match freebsd
337 #define BITS_PER_LONG 32
338 union ktime {
339 	s64	tv64;
340 #if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
341 	struct {
342 #ifdef __BIG_ENDIAN
343 	s32	sec, nsec;
344 #else
345 	s32	nsec, sec;
346 #endif
347 	} tv;
348 #endif
349 };
350 #define kmemcheck_bitfield_begin(name)
351 #define kmemcheck_bitfield_end(name)
352 #define CHECKSUM_NONE 0
353 typedef unsigned char *sk_buff_data_t;
354 typedef union ktime ktime_t;		/* Kill this */
355 
356 void rtw_mtx_lock(_lock *plock);
357 
358 void rtw_mtx_unlock(_lock *plock);
359 
360 /**
361  *	struct sk_buff - socket buffer
362  *	@next: Next buffer in list
363  *	@prev: Previous buffer in list
364  *	@sk: Socket we are owned by
365  *	@tstamp: Time we arrived
366  *	@dev: Device we arrived on/are leaving by
367  *	@transport_header: Transport layer header
368  *	@network_header: Network layer header
369  *	@mac_header: Link layer header
370  *	@_skb_refdst: destination entry (with norefcount bit)
371  *	@sp: the security path, used for xfrm
372  *	@cb: Control buffer. Free for use by every layer. Put private vars here
373  *	@len: Length of actual data
374  *	@data_len: Data length
375  *	@mac_len: Length of link layer header
376  *	@hdr_len: writable header length of cloned skb
377  *	@csum: Checksum (must include start/offset pair)
378  *	@csum_start: Offset from skb->head where checksumming should start
379  *	@csum_offset: Offset from csum_start where checksum should be stored
380  *	@local_df: allow local fragmentation
381  *	@cloned: Head may be cloned (check refcnt to be sure)
382  *	@nohdr: Payload reference only, must not modify header
383  *	@pkt_type: Packet class
384  *	@fclone: skbuff clone status
385  *	@ip_summed: Driver fed us an IP checksum
386  *	@priority: Packet queueing priority
387  *	@users: User count - see {datagram,tcp}.c
388  *	@protocol: Packet protocol from driver
389  *	@truesize: Buffer size
390  *	@head: Head of buffer
391  *	@data: Data head pointer
392  *	@tail: Tail pointer
393  *	@end: End pointer
394  *	@destructor: Destruct function
395  *	@mark: Generic packet mark
396  *	@nfct: Associated connection, if any
397  *	@ipvs_property: skbuff is owned by ipvs
398  *	@peeked: this packet has been seen already, so stats have been
399  *		done for it, don't do them again
400  *	@nf_trace: netfilter packet trace flag
401  *	@nfctinfo: Relationship of this skb to the connection
402  *	@nfct_reasm: netfilter conntrack re-assembly pointer
403  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
404  *	@skb_iif: ifindex of device we arrived on
405  *	@rxhash: the packet hash computed on receive
406  *	@queue_mapping: Queue mapping for multiqueue devices
407  *	@tc_index: Traffic control index
408  *	@tc_verd: traffic control verdict
409  *	@ndisc_nodetype: router type (from link layer)
410  *	@dma_cookie: a cookie to one of several possible DMA operations
411  *		done by skb DMA functions
412  *	@secmark: security marking
413  *	@vlan_tci: vlan tag control information
414  */
415 
416 struct sk_buff {
417 	/* These two members must be first. */
418 	struct sk_buff		*next;
419 	struct sk_buff		*prev;
420 
421 	ktime_t			tstamp;
422 
423 	struct sock		*sk;
424 	//struct net_device	*dev;
425 	struct ifnet *dev;
426 
427 	/*
428 	 * This is the control buffer. It is free to use for every
429 	 * layer. Please put your private variables there. If you
430 	 * want to keep them across layers you have to do a skb_clone()
431 	 * first. This is owned by whoever has the skb queued ATM.
432 	 */
433 	char			cb[48] __aligned(8);
434 
435 	unsigned long		_skb_refdst;
436 #ifdef CONFIG_XFRM
437 	struct	sec_path	*sp;
438 #endif
439 	unsigned int		len,
440 				data_len;
441 	u16			mac_len,
442 				hdr_len;
443 	union {
444 		u32		csum;
445 		struct {
446 			u16	csum_start;
447 			u16	csum_offset;
448 		}smbol2;
449 	}smbol1;
450 	u32			priority;
451 	kmemcheck_bitfield_begin(flags1);
452 	u8			local_df:1,
453 				cloned:1,
454 				ip_summed:2,
455 				nohdr:1,
456 				nfctinfo:3;
457 	u8			pkt_type:3,
458 				fclone:2,
459 				ipvs_property:1,
460 				peeked:1,
461 				nf_trace:1;
462 	kmemcheck_bitfield_end(flags1);
463 	u16			protocol;
464 
465 	void			(*destructor)(struct sk_buff *skb);
466 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
467 	struct nf_conntrack	*nfct;
468 	struct sk_buff		*nfct_reasm;
469 #endif
470 #ifdef CONFIG_BRIDGE_NETFILTER
471 	struct nf_bridge_info	*nf_bridge;
472 #endif
473 
474 	int			skb_iif;
475 #ifdef CONFIG_NET_SCHED
476 	u16			tc_index;	/* traffic control index */
477 #ifdef CONFIG_NET_CLS_ACT
478 	u16			tc_verd;	/* traffic control verdict */
479 #endif
480 #endif
481 
482 	u32			rxhash;
483 
484 	kmemcheck_bitfield_begin(flags2);
485 	u16			queue_mapping:16;
486 #ifdef CONFIG_IPV6_NDISC_NODETYPE
487 	u8			ndisc_nodetype:2,
488 				deliver_no_wcard:1;
489 #else
490 	u8			deliver_no_wcard:1;
491 #endif
492 	kmemcheck_bitfield_end(flags2);
493 
494 	/* 0/14 bit hole */
495 
496 #ifdef CONFIG_NET_DMA
497 	dma_cookie_t		dma_cookie;
498 #endif
499 #ifdef CONFIG_NETWORK_SECMARK
500 	u32			secmark;
501 #endif
502 	union {
503 		u32		mark;
504 		u32		dropcount;
505 	}symbol3;
506 
507 	u16			vlan_tci;
508 
509 	sk_buff_data_t		transport_header;
510 	sk_buff_data_t		network_header;
511 	sk_buff_data_t		mac_header;
512 	/* These elements must be at the end, see alloc_skb() for details.  */
513 	sk_buff_data_t		tail;
514 	sk_buff_data_t		end;
515 	unsigned char		*head,
516 				*data;
517 	unsigned int		truesize;
518 	ATOMIC_T		users;
519 };
520 struct sk_buff_head {
521 	/* These two members must be first. */
522 	struct sk_buff	*next;
523 	struct sk_buff	*prev;
524 
525 	u32		qlen;
526 	_lock	lock;
527 };
528 #define skb_tail_pointer(skb)	skb->tail
skb_put(struct sk_buff * skb,unsigned int len)529 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
530 {
531 	unsigned char *tmp = skb_tail_pointer(skb);
532 	//SKB_LINEAR_ASSERT(skb);
533 	skb->tail += len;
534 	skb->len  += len;
535 	return tmp;
536 }
537 
__skb_pull(struct sk_buff * skb,unsigned int len)538 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
539 {
540 	skb->len -= len;
541 	if(skb->len < skb->data_len)
542 		printf("%s(),%d,error!\n",__FUNCTION__,__LINE__);
543 	return skb->data += len;
544 }
skb_pull(struct sk_buff * skb,unsigned int len)545 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
546 {
547 	return __skb_pull(skb, len);
548 }
skb_queue_len(const struct sk_buff_head * list_)549 static inline u32 skb_queue_len(const struct sk_buff_head *list_)
550 {
551 	return list_->qlen;
552 }
__skb_insert(struct sk_buff * newsk,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * list)553 static inline void __skb_insert(struct sk_buff *newsk,
554 				struct sk_buff *prev, struct sk_buff *next,
555 				struct sk_buff_head *list)
556 {
557 	newsk->next = next;
558 	newsk->prev = prev;
559 	next->prev  = prev->next = newsk;
560 	list->qlen++;
561 }
__skb_queue_before(struct sk_buff_head * list,struct sk_buff * next,struct sk_buff * newsk)562 static inline void __skb_queue_before(struct sk_buff_head *list,
563 				      struct sk_buff *next,
564 				      struct sk_buff *newsk)
565 {
566 	__skb_insert(newsk, next->prev, next, list);
567 }
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)568 static inline void skb_queue_tail(struct sk_buff_head *list,
569 				   struct sk_buff *newsk)
570 {
571 	mtx_lock(&list->lock);
572 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
573 	mtx_unlock(&list->lock);
574 }
skb_peek(struct sk_buff_head * list_)575 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
576 {
577 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
578 	if (list == (struct sk_buff *)list_)
579 		list = NULL;
580 	return list;
581 }
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)582 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
583 {
584 	struct sk_buff *next, *prev;
585 
586 	list->qlen--;
587 	next	   = skb->next;
588 	prev	   = skb->prev;
589 	skb->next  = skb->prev = NULL;
590 	next->prev = prev;
591 	prev->next = next;
592 }
593 
skb_dequeue(struct sk_buff_head * list)594 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
595 {
596 	mtx_lock(&list->lock);
597 
598 	struct sk_buff *skb = skb_peek(list);
599 	if (skb)
600 		__skb_unlink(skb, list);
601 
602 	mtx_unlock(&list->lock);
603 
604 	return skb;
605 }
skb_reserve(struct sk_buff * skb,int len)606 static inline void skb_reserve(struct sk_buff *skb, int len)
607 {
608 	skb->data += len;
609 	skb->tail += len;
610 }
__skb_queue_head_init(struct sk_buff_head * list)611 static inline void __skb_queue_head_init(struct sk_buff_head *list)
612 {
613 	list->prev = list->next = (struct sk_buff *)list;
614 	list->qlen = 0;
615 }
616 /*
617  * This function creates a split out lock class for each invocation;
618  * this is needed for now since a whole lot of users of the skb-queue
619  * infrastructure in drivers have different locking usage (in hardirq)
620  * than the networking core (in softirq only). In the long run either the
621  * network layer or drivers should need annotation to consolidate the
622  * main types of usage into 3 classes.
623  */
skb_queue_head_init(struct sk_buff_head * list)624 static inline void skb_queue_head_init(struct sk_buff_head *list)
625 {
626 	_rtw_spinlock_init(&list->lock);
627 	__skb_queue_head_init(list);
628 }
rtw_skb_data(struct sk_buff * pkt)629 static inline u8 *rtw_skb_data(struct sk_buff *pkt)
630 {
631 	return pkt->data;
632 }
633 
rtw_skb_len(struct sk_buff * pkt)634 static inline u32 rtw_skb_len(struct sk_buff *pkt)
635 {
636 	return pkt->len;
637 }
638 
639 unsigned long copy_from_user(void *to, const void *from, unsigned long n);
640 unsigned long copy_to_user(void *to, const void *from, unsigned long n);
641 struct sk_buff * dev_alloc_skb(unsigned int size);
642 struct sk_buff *skb_clone(const struct sk_buff *skb);
643 void dev_kfree_skb_any(struct sk_buff *skb);
644 #endif //Baron porting from linux, it's all temp solution, needs to check again
645 
646 
647 #if 1 // kenny add Linux compatibility code for Linux USB driver
648 #include <dev/usb/usb_compat_linux.h>
649 
650 #define __init		// __attribute ((constructor))
651 #define __exit		// __attribute ((destructor))
652 
653 /*
654  * Definitions for module_init and module_exit macros.
655  *
656  * These macros will use the SYSINIT framework to call a specified
657  * function (with no arguments) on module loading or unloading.
658  *
659  */
660 
661 void module_init_exit_wrapper(void *arg);
662 
663 #define module_init(initfn)                             \
664         SYSINIT(mod_init_ ## initfn,                    \
665                 SI_SUB_KLD, SI_ORDER_FIRST,             \
666                 module_init_exit_wrapper, initfn)
667 
668 #define module_exit(exitfn)                             \
669         SYSUNINIT(mod_exit_ ## exitfn,                  \
670                   SI_SUB_KLD, SI_ORDER_ANY,             \
671                   module_init_exit_wrapper, exitfn)
672 
673 /*
674  * The usb_register and usb_deregister functions are used to register
675  * usb drivers with the usb subsystem.
676  */
677 int usb_register(struct usb_driver *driver);
678 int usb_deregister(struct usb_driver *driver);
679 
680 /*
681  * usb_get_dev and usb_put_dev - increment/decrement the reference count
682  * of the usb device structure.
683  *
684  * Original body of usb_get_dev:
685  *
686  *       if (dev)
687  *               get_device(&dev->dev);
688  *       return dev;
689  *
690  * Reference counts are not currently used in this compatibility
691  * layer. So these functions will do nothing.
692  */
693 static inline struct usb_device *
usb_get_dev(struct usb_device * dev)694 usb_get_dev(struct usb_device *dev)
695 {
696         return dev;
697 }
698 
699 static inline void
usb_put_dev(struct usb_device * dev)700 usb_put_dev(struct usb_device *dev)
701 {
702         return;
703 }
704 
705 
706 // rtw_usb_compat_linux
707 int rtw_usb_submit_urb(struct urb *urb, uint16_t mem_flags);
708 int rtw_usb_unlink_urb(struct urb *urb);
709 int rtw_usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe);
710 int rtw_usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe,
711     uint8_t request, uint8_t requesttype,
712     uint16_t value, uint16_t index, void *data,
713     uint16_t size, usb_timeout_t timeout);
714 int rtw_usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index);
715 int rtw_usb_setup_endpoint(struct usb_device *dev,
716     struct usb_host_endpoint *uhe, usb_size_t bufsize);
717 struct urb *rtw_usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags);
718 struct usb_host_endpoint *rtw_usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep);
719 struct usb_host_interface *rtw_usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index);
720 struct usb_interface *rtw_usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no);
721 void *rtw_usb_get_intfdata(struct usb_interface *intf);
722 void rtw_usb_linux_register(void *arg);
723 void rtw_usb_linux_deregister(void *arg);
724 void rtw_usb_linux_free_device(struct usb_device *dev);
725 void rtw_usb_free_urb(struct urb *urb);
726 void rtw_usb_init_urb(struct urb *urb);
727 void rtw_usb_kill_urb(struct urb *urb);
728 void rtw_usb_set_intfdata(struct usb_interface *intf, void *data);
729 void rtw_usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev,
730     struct usb_host_endpoint *uhe, void *buf,
731     int length, usb_complete_t callback, void *arg);
732 int rtw_usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe,
733     void *data, int len, uint16_t *pactlen, usb_timeout_t timeout);
734 void *usb_get_intfdata(struct usb_interface *intf);
735 int usb_linux_init_endpoints(struct usb_device *udev);
736 
737 
738 
739 typedef struct urb *  PURB;
740 
741 typedef unsigned gfp_t;
742 #define __GFP_WAIT      ((gfp_t)0x10u)  /* Can wait and reschedule? */
743 #define __GFP_HIGH      ((gfp_t)0x20u)  /* Should access emergency pools? */
744 #define __GFP_IO        ((gfp_t)0x40u)  /* Can start physical IO? */
745 #define __GFP_FS        ((gfp_t)0x80u)  /* Can call down to low-level FS? */
746 #define __GFP_COLD      ((gfp_t)0x100u) /* Cache-cold page required */
747 #define __GFP_NOWARN    ((gfp_t)0x200u) /* Suppress page allocation failure warning */
748 #define __GFP_REPEAT    ((gfp_t)0x400u) /* Retry the allocation.  Might fail */
749 #define __GFP_NOFAIL    ((gfp_t)0x800u) /* Retry for ever.  Cannot fail */
750 #define __GFP_NORETRY   ((gfp_t)0x1000u)/* Do not retry.  Might fail */
751 #define __GFP_NO_GROW   ((gfp_t)0x2000u)/* Slab internal usage */
752 #define __GFP_COMP      ((gfp_t)0x4000u)/* Add compound page metadata */
753 #define __GFP_ZERO      ((gfp_t)0x8000u)/* Return zeroed page on success */
754 #define __GFP_NOMEMALLOC ((gfp_t)0x10000u) /* Don't use emergency reserves */
755 #define __GFP_HARDWALL   ((gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
756 
757 /* This equals 0, but use constants in case they ever change */
758 #define GFP_NOWAIT      (GFP_ATOMIC & ~__GFP_HIGH)
759 /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
760 #define GFP_ATOMIC      (__GFP_HIGH)
761 #define GFP_NOIO        (__GFP_WAIT)
762 #define GFP_NOFS        (__GFP_WAIT | __GFP_IO)
763 #define GFP_KERNEL      (__GFP_WAIT | __GFP_IO | __GFP_FS)
764 #define GFP_USER        (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
765 #define GFP_HIGHUSER    (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
766                          __GFP_HIGHMEM)
767 
768 
769 #endif // kenny add Linux compatibility code for Linux USB
770 
get_next(_list * list)771 __inline static _list *get_next(_list	*list)
772 {
773 	return list->next;
774 }
775 
get_list_head(_queue * queue)776 __inline static _list	*get_list_head(_queue	*queue)
777 {
778 	return (&(queue->queue));
779 }
780 
781 
782 #define LIST_CONTAINOR(ptr, type, member) \
783         ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
784 
785 
__list_del(struct list_head * prev,struct list_head * next)786 static inline void __list_del(struct list_head * prev, struct list_head * next)
787 {
788 	next->prev = prev;
789 	prev->next = next;
790 }
INIT_LIST_HEAD(struct list_head * list)791 static inline void INIT_LIST_HEAD(struct list_head *list)
792 {
793 	list->next = list;
794 	list->prev = list;
795 }
rtw_list_delete(_list * plist)796 __inline static void rtw_list_delete(_list *plist)
797 {
798 	__list_del(plist->prev, plist->next);
799 	INIT_LIST_HEAD(plist);
800 }
801 
timer_hdl(void * ctx)802 static inline void timer_hdl(void *ctx)
803 {
804 	_timer *timer = (_timer *)ctx;
805 
806 	rtw_mtx_lock(NULL);
807 	if (callout_pending(&timer->callout)) {
808 		/* callout was reset */
809 		rtw_mtx_unlock(NULL);
810 		return;
811 	}
812 
813 	if (!callout_active(&timer->callout)) {
814 		/* callout was stopped */
815 		rtw_mtx_unlock(NULL);
816 		return;
817 	}
818 
819 	callout_deactivate(&timer->callout);
820 
821 	timer->function(timer->arg);
822 
823 	rtw_mtx_unlock(NULL);
824 }
825 
_init_timer(_timer * ptimer,void * pfunc,void * cntx)826 static inline void _init_timer(_timer *ptimer, void *pfunc, void *cntx)
827 {
828 	ptimer->function = pfunc;
829 	ptimer->arg = cntx;
830 	callout_init(&ptimer->callout, CALLOUT_MPSAFE);
831 }
832 
_set_timer(_timer * ptimer,u32 delay_time)833 __inline static void _set_timer(_timer *ptimer,u32 delay_time)
834 {
835 	if (ptimer->function && ptimer->arg) {
836 		rtw_mtx_lock(NULL);
837 		callout_reset(&ptimer->callout, delay_time, timer_hdl, ptimer);
838 		rtw_mtx_unlock(NULL);
839 	}
840 }
841 
_cancel_timer(_timer * ptimer,u8 * bcancelled)842 __inline static void _cancel_timer(_timer *ptimer,u8 *bcancelled)
843 {
844 	rtw_mtx_lock(NULL);
845 	callout_drain(&ptimer->callout);
846 	rtw_mtx_unlock(NULL);
847 	*bcancelled = 1; /* assume an pending timer to be canceled */
848 }
849 
_init_workitem(_workitem * pwork,void * pfunc,PVOID cntx)850 __inline static void _init_workitem(_workitem *pwork, void *pfunc, PVOID cntx)
851 {
852 	printf("%s Not implement yet! \n",__FUNCTION__);
853 }
854 
_set_workitem(_workitem * pwork)855 __inline static void _set_workitem(_workitem *pwork)
856 {
857 	printf("%s Not implement yet! \n",__FUNCTION__);
858 //	schedule_work(pwork);
859 }
860 
861 //
862 // Global Mutex: can only be used at PASSIVE level.
863 //
864 
865 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter)                              \
866 {                                                               \
867 }
868 
869 #define RELEASE_GLOBAL_MUTEX(_MutexCounter)                              \
870 {                                                               \
871 }
872 
873 /* Atomic integer operations */
874 #define ATOMIC_T atomic_t
875 
876 #define ATOMIC_INIT(i)  { (i) }
ATOMIC_SET(ATOMIC_T * v,int i)877 static inline void ATOMIC_SET(ATOMIC_T *v, int i)
878 {
879 	atomic_set_int(v, i);
880 }
881 
ATOMIC_READ(ATOMIC_T * v)882 static inline int ATOMIC_READ(ATOMIC_T *v)
883 {
884 	return atomic_load_acq_32(v);
885 }
886 
ATOMIC_ADD(ATOMIC_T * v,int i)887 static inline void ATOMIC_ADD(ATOMIC_T *v, int i)
888 {
889 	atomic_add_int(v, i);
890 }
ATOMIC_SUB(ATOMIC_T * v,int i)891 static inline void ATOMIC_SUB(ATOMIC_T *v, int i)
892 {
893 	atomic_subtract_int(v, i);
894 }
895 
ATOMIC_INC(ATOMIC_T * v)896 static inline void ATOMIC_INC(ATOMIC_T *v)
897 {
898 	atomic_add_int(v, 1);
899 }
900 
ATOMIC_DEC(ATOMIC_T * v)901 static inline void ATOMIC_DEC(ATOMIC_T *v)
902 {
903 	atomic_subtract_int(v, 1);
904 }
905 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)906 static inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
907 {
908 	atomic_add_int(v, i);
909 	return atomic_load_acq_32(v);
910 }
911 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)912 static inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
913 {
914 	atomic_subtract_int(v, i);
915 	return atomic_load_acq_32(v);
916 }
917 
ATOMIC_INC_RETURN(ATOMIC_T * v)918 static inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
919 {
920 	atomic_add_int(v, 1);
921 	return atomic_load_acq_32(v);
922 }
923 
ATOMIC_DEC_RETURN(ATOMIC_T * v)924 static inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
925 {
926 	atomic_subtract_int(v, 1);
927 	return atomic_load_acq_32(v);
928 }
929 
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)930 static inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
931 {
932 	#error "TBD\n"
933 }
934 
935 /*task*/
936 typedef void (*task_fn_t)(void *context, int pending);
937 #if 0 /*taskqueue -- asynchronous task execution*/
938 
939 TASK_INIT(struct task *task, int priority, task_fn_t func,
940 	 void *context);
941 
942 TASK_INITIALIZER(int priority, task_fn_t func, void *context);
943 
944 TASKQUEUE_DECLARE(name);
945 
946 TASKQUEUE_DEFINE(name, taskqueue_enqueue_fn enqueue, void *context,
947 	 init);
948 
949 #endif
rtw_tasklet_init(_tasklet * t,task_fn_t func,unsigned long data)950 static inline void rtw_tasklet_init(_tasklet *t,task_fn_t func,
951 							unsigned long data)
952 {
953 	TASK_INIT(t, 0, func, padapter);
954 }
955 static inline void rtw_tasklet_kill(_tasklet *t)
956 
957 
958 }
959 
960 static inline void rtw_tasklet_schedule(_tasklet *t)
961 {
962 
963 }
964 static inline void rtw_tasklet_hi_schedule(_tasklet *t)
965 {
966 
967 }
968 
969 
970 /*thread*/
971 static inline void rtw_thread_enter(char *name)
972 {
973 	printf("%s", "RTKTHREAD_enter");
974 }
975 
976 static inline void rtw_thread_exit(_completion *comp)
977 {
978 	printf("%s", "RTKTHREAD_exit");
979 }
980 
981 #include <sys/unistd.h>		/* for RFHIGHPID */
982 static inline _thread_hdl_ rtw_thread_start(int (*threadfn)(void *data),
983 			void *data, const char namefmt[])
984 {
985 	_thread_hdl_ _rtw_thread = NULL;
986 	struct proc *p;
987 	struct thread *td;
988 
989 	_rtw_thread = kproc_kthread_add(mp_xmit_packet_thread, data,
990 			&p, &td, RFHIGHPID, 0, namefmt, namefmt);
991 
992 	if (_rtw_thread < 0)
993 		_rtw_thread = NULL;
994 	return _rtw_thread;
995 }
996 
997 static inline bool rtw_thread_stop(_thread_hdl_ th)
998 {
999 	return _FALSE;
1000 }
1001 static inline void rtw_thread_wait_stop(void)
1002 {
1003 
1004 }
1005 __inline static void flush_signals_thread(void)
1006 {
1007 
1008 }
1009 
1010 #define rtw_dump_stack(void) do {} while (0)
1011 #define rtw_bug_on(condition) do {} while (0)
1012 #define rtw_warn_on(condition) do {} while (0)
1013 #define rtw_sprintf(buf, size, format, arg...) do {} while (0)
1014 
1015 #define rtw_netdev_priv(netdev) (((struct ifnet *)netdev)->if_softc)
1016 #define rtw_free_netdev(netdev) if_free((netdev))
1017 
1018 #define RTW_DIV_ROUND_UP(n, d)	(((n) + (d - 1)) / d)
1019 
1020 #define NDEV_FMT "%s"
1021 #define NDEV_ARG(ndev) ""
1022 #define ADPT_FMT "%s"
1023 #define ADPT_ARG(adapter) ""
1024 #define FUNC_NDEV_FMT "%s"
1025 #define FUNC_NDEV_ARG(ndev) __func__
1026 #define FUNC_ADPT_FMT "%s"
1027 #define FUNC_ADPT_ARG(adapter) __func__
1028 
1029 #define STRUCT_PACKED
1030 
1031 #endif
1032 
1033