xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rtl8189es/os_dep/osdep_service.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17  *
18  *
19  ******************************************************************************/
20 
21 
22 #define _OSDEP_SERVICE_C_
23 
24 #include <drv_types.h>
25 
26 #define RT_TAG	'1178'
27 
28 #ifdef DBG_MEMORY_LEAK
29 #ifdef PLATFORM_LINUX
30 atomic_t _malloc_cnt = ATOMIC_INIT(0);
31 atomic_t _malloc_size = ATOMIC_INIT(0);
32 #endif
33 #endif /* DBG_MEMORY_LEAK */
34 
35 
36 #if defined(PLATFORM_LINUX)
37 /*
38 * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
39 * @return: one of RTW_STATUS_CODE
40 */
RTW_STATUS_CODE(int error_code)41 inline int RTW_STATUS_CODE(int error_code){
42 	if(error_code >=0)
43 		return _SUCCESS;
44 
45 	switch(error_code) {
46 		//case -ETIMEDOUT:
47 		//	return RTW_STATUS_TIMEDOUT;
48 		default:
49 			return _FAIL;
50 	}
51 }
52 #else
RTW_STATUS_CODE(int error_code)53 inline int RTW_STATUS_CODE(int error_code){
54 	return error_code;
55 }
56 #endif
57 
rtw_atoi(u8 * s)58 u32 rtw_atoi(u8* s)
59 {
60 
61 	int num=0,flag=0;
62 	int i;
63 	for(i=0;i<=strlen(s);i++)
64 	{
65 	  if(s[i] >= '0' && s[i] <= '9')
66 		 num = num * 10 + s[i] -'0';
67 	  else if(s[0] == '-' && i==0)
68 		 flag =1;
69 	  else
70 		  break;
71 	 }
72 
73 	if(flag == 1)
74 	   num = num * -1;
75 
76 	 return(num);
77 
78 }
79 
_rtw_vmalloc(u32 sz)80 inline u8* _rtw_vmalloc(u32 sz)
81 {
82 	u8 	*pbuf;
83 #ifdef PLATFORM_LINUX
84 	pbuf = vmalloc(sz);
85 #endif
86 #ifdef PLATFORM_FREEBSD
87 	pbuf = malloc(sz,M_DEVBUF,M_NOWAIT);
88 #endif
89 
90 #ifdef PLATFORM_WINDOWS
91 	NdisAllocateMemoryWithTag(&pbuf,sz, RT_TAG);
92 #endif
93 
94 #ifdef DBG_MEMORY_LEAK
95 #ifdef PLATFORM_LINUX
96 	if ( pbuf != NULL) {
97 		atomic_inc(&_malloc_cnt);
98 		atomic_add(sz, &_malloc_size);
99 	}
100 #endif
101 #endif /* DBG_MEMORY_LEAK */
102 
103 	return pbuf;
104 }
105 
_rtw_zvmalloc(u32 sz)106 inline u8* _rtw_zvmalloc(u32 sz)
107 {
108 	u8 	*pbuf;
109 #ifdef PLATFORM_LINUX
110 	pbuf = _rtw_vmalloc(sz);
111 	if (pbuf != NULL)
112 		memset(pbuf, 0, sz);
113 #endif
114 #ifdef PLATFORM_FREEBSD
115 	pbuf = malloc(sz,M_DEVBUF,M_ZERO|M_NOWAIT);
116 #endif
117 #ifdef PLATFORM_WINDOWS
118 	NdisAllocateMemoryWithTag(&pbuf,sz, RT_TAG);
119 	if (pbuf != NULL)
120 		NdisFillMemory(pbuf, sz, 0);
121 #endif
122 
123 	return pbuf;
124 }
125 
_rtw_vmfree(u8 * pbuf,u32 sz)126 inline void _rtw_vmfree(u8 *pbuf, u32 sz)
127 {
128 #ifdef	PLATFORM_LINUX
129 	vfree(pbuf);
130 #endif
131 #ifdef PLATFORM_FREEBSD
132 	free(pbuf,M_DEVBUF);
133 #endif
134 #ifdef PLATFORM_WINDOWS
135 	NdisFreeMemory(pbuf,sz, 0);
136 #endif
137 
138 #ifdef DBG_MEMORY_LEAK
139 #ifdef PLATFORM_LINUX
140 	atomic_dec(&_malloc_cnt);
141 	atomic_sub(sz, &_malloc_size);
142 #endif
143 #endif /* DBG_MEMORY_LEAK */
144 }
145 
_rtw_malloc(u32 sz)146 u8* _rtw_malloc(u32 sz)
147 {
148 
149 	u8 	*pbuf=NULL;
150 
151 #ifdef PLATFORM_LINUX
152 #ifdef RTK_DMP_PLATFORM
153 	if(sz > 0x4000)
154 		pbuf = (u8 *)dvr_malloc(sz);
155 	else
156 #endif
157 		pbuf = kmalloc(sz,in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
158 
159 #endif
160 #ifdef PLATFORM_FREEBSD
161 	pbuf = malloc(sz,M_DEVBUF,M_NOWAIT);
162 #endif
163 #ifdef PLATFORM_WINDOWS
164 
165 	NdisAllocateMemoryWithTag(&pbuf,sz, RT_TAG);
166 
167 #endif
168 
169 #ifdef DBG_MEMORY_LEAK
170 #ifdef PLATFORM_LINUX
171 	if ( pbuf != NULL) {
172 		atomic_inc(&_malloc_cnt);
173 		atomic_add(sz, &_malloc_size);
174 	}
175 #endif
176 #endif /* DBG_MEMORY_LEAK */
177 
178 	return pbuf;
179 
180 }
181 
182 
_rtw_zmalloc(u32 sz)183 u8* _rtw_zmalloc(u32 sz)
184 {
185 #ifdef PLATFORM_FREEBSD
186 	return malloc(sz,M_DEVBUF,M_ZERO|M_NOWAIT);
187 #else // PLATFORM_FREEBSD
188 	u8 	*pbuf = _rtw_malloc(sz);
189 
190 	if (pbuf != NULL) {
191 
192 #ifdef PLATFORM_LINUX
193 		memset(pbuf, 0, sz);
194 #endif
195 
196 #ifdef PLATFORM_WINDOWS
197 		NdisFillMemory(pbuf, sz, 0);
198 #endif
199 
200 	}
201 
202 	return pbuf;
203 #endif // PLATFORM_FREEBSD
204 }
205 
_rtw_mfree(u8 * pbuf,u32 sz)206 void	_rtw_mfree(u8 *pbuf, u32 sz)
207 {
208 
209 #ifdef	PLATFORM_LINUX
210 #ifdef RTK_DMP_PLATFORM
211 	if(sz > 0x4000)
212 		dvr_free(pbuf);
213 	else
214 #endif
215 		kfree(pbuf);
216 
217 #endif
218 #ifdef PLATFORM_FREEBSD
219 	free(pbuf,M_DEVBUF);
220 #endif
221 #ifdef PLATFORM_WINDOWS
222 
223 	NdisFreeMemory(pbuf,sz, 0);
224 
225 #endif
226 
227 #ifdef DBG_MEMORY_LEAK
228 #ifdef PLATFORM_LINUX
229 	atomic_dec(&_malloc_cnt);
230 	atomic_sub(sz, &_malloc_size);
231 #endif
232 #endif /* DBG_MEMORY_LEAK */
233 
234 }
235 
236 #ifdef PLATFORM_FREEBSD
237 //review again
dev_alloc_skb(unsigned int size)238 struct sk_buff * dev_alloc_skb(unsigned int size)
239 {
240 	struct sk_buff *skb=NULL;
241     	u8 *data=NULL;
242 
243 	//skb = (struct sk_buff *)_rtw_zmalloc(sizeof(struct sk_buff)); // for skb->len, etc.
244 	skb = (struct sk_buff *)_rtw_malloc(sizeof(struct sk_buff));
245 	if(!skb)
246 		goto out;
247 	data = _rtw_malloc(size);
248 	if(!data)
249 		goto nodata;
250 
251 	skb->head = (unsigned char*)data;
252 	skb->data = (unsigned char*)data;
253 	skb->tail = (unsigned char*)data;
254 	skb->end = (unsigned char*)data + size;
255 	skb->len = 0;
256 	//printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head);
257 
258 out:
259 	return skb;
260 nodata:
261 	_rtw_mfree((u8 *)skb, sizeof(struct sk_buff));
262 	skb = NULL;
263 goto out;
264 
265 }
266 
dev_kfree_skb_any(struct sk_buff * skb)267 void dev_kfree_skb_any(struct sk_buff *skb)
268 {
269 	//printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head);
270 	if(skb->head)
271 		_rtw_mfree(skb->head, 0);
272 	//printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb);
273 	if(skb)
274 		_rtw_mfree((u8 *)skb, 0);
275 }
skb_clone(const struct sk_buff * skb)276 struct sk_buff *skb_clone(const struct sk_buff *skb)
277 {
278 	return NULL;
279 }
280 
281 #endif /* PLATFORM_FREEBSD */
282 
_rtw_skb_alloc(u32 sz)283 inline struct sk_buff *_rtw_skb_alloc(u32 sz)
284 {
285 #ifdef PLATFORM_LINUX
286 	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
287 #endif /* PLATFORM_LINUX */
288 
289 #ifdef PLATFORM_FREEBSD
290 	return dev_alloc_skb(sz);
291 #endif /* PLATFORM_FREEBSD */
292 }
293 
_rtw_skb_free(struct sk_buff * skb)294 inline void _rtw_skb_free(struct sk_buff *skb)
295 {
296 	dev_kfree_skb_any(skb);
297 }
298 
_rtw_skb_copy(const struct sk_buff * skb)299 inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
300 {
301 #ifdef PLATFORM_LINUX
302 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
303 #endif /* PLATFORM_LINUX */
304 
305 #ifdef PLATFORM_FREEBSD
306 	return NULL;
307 #endif /* PLATFORM_FREEBSD */
308 }
309 
_rtw_skb_clone(struct sk_buff * skb)310 inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
311 {
312 #ifdef PLATFORM_LINUX
313 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
314 #endif /* PLATFORM_LINUX */
315 
316 #ifdef PLATFORM_FREEBSD
317 	return skb_clone(skb);
318 #endif /* PLATFORM_FREEBSD */
319 }
320 
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)321 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
322 {
323 #ifdef PLATFORM_LINUX
324 	skb->dev = ndev;
325 		return netif_rx(skb);
326 #endif /* PLATFORM_LINUX */
327 
328 #ifdef PLATFORM_FREEBSD
329 	return (*ndev->if_input)(ndev, skb);
330 #endif /* PLATFORM_FREEBSD */
331 }
332 
_rtw_skb_queue_purge(struct sk_buff_head * list)333 void _rtw_skb_queue_purge(struct sk_buff_head *list)
334 {
335 	struct sk_buff *skb;
336 
337 	while ((skb = skb_dequeue(list)) != NULL)
338 		_rtw_skb_free(skb);
339 }
340 
341 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)342 inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
343 {
344 #ifdef PLATFORM_LINUX
345 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
346 	return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
347 #else
348 	return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
349 #endif
350 #endif /* PLATFORM_LINUX */
351 
352 #ifdef PLATFORM_FREEBSD
353 	return (malloc(size, M_USBDEV, M_NOWAIT | M_ZERO));
354 #endif /* PLATFORM_FREEBSD */
355 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)356 inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
357 {
358 #ifdef PLATFORM_LINUX
359 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
360 	usb_free_coherent(dev, size, addr, dma);
361 #else
362 	usb_buffer_free(dev, size, addr, dma);
363 #endif
364 #endif /* PLATFORM_LINUX */
365 
366 #ifdef PLATFORM_FREEBSD
367 	free(addr, M_USBDEV);
368 #endif /* PLATFORM_FREEBSD */
369 }
370 #endif /* CONFIG_USB_HCI */
371 
372 #if defined(DBG_MEM_ALLOC)
373 
374 struct rtw_mem_stat {
375 	ATOMIC_T alloc; // the memory bytes we allocate currently
376 	ATOMIC_T peak; // the peak memory bytes we allocate
377 	ATOMIC_T alloc_cnt; // the alloc count for alloc currently
378 	ATOMIC_T alloc_err_cnt; // the error times we fail to allocate memory
379 };
380 
381 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
382 #ifdef RTW_MEM_FUNC_STAT
383 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
384 #endif
385 
386 char *MSTAT_TYPE_str[] = {
387 	"VIR",
388 	"PHY",
389 	"SKB",
390 	"USB",
391 };
392 
393 #ifdef RTW_MEM_FUNC_STAT
394 char *MSTAT_FUNC_str[] = {
395 	"UNSP",
396 	"IO",
397 	"TXIO",
398 	"RXIO",
399 	"TX",
400 	"RX",
401 };
402 #endif
403 
rtw_mstat_dump(void * sel)404 void rtw_mstat_dump(void *sel)
405 {
406 	int i;
407 	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
408 #ifdef RTW_MEM_FUNC_STAT
409 	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
410 #endif
411 
412 	int vir_alloc, vir_peak, vir_alloc_err, phy_alloc, phy_peak, phy_alloc_err;
413 	int tx_alloc, tx_peak, tx_alloc_err, rx_alloc, rx_peak, rx_alloc_err;
414 
415 	for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
416 		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
417 		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
418 		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
419 		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
420 	}
421 
422 	#ifdef RTW_MEM_FUNC_STAT
423 	for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
424 		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
425 		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
426 		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
427 		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
428 	}
429 	#endif
430 
431 	DBG_871X_SEL_NL(sel, "===================== MSTAT =====================\n");
432 	DBG_871X_SEL_NL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
433 	DBG_871X_SEL_NL(sel, "-------------------------------------------------\n");
434 	for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
435 		DBG_871X_SEL_NL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
436 	}
437 	#ifdef RTW_MEM_FUNC_STAT
438 	DBG_871X_SEL_NL(sel, "-------------------------------------------------\n");
439 	for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
440 		DBG_871X_SEL_NL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
441 	}
442 	#endif
443 }
444 
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)445 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
446 {
447 	static u32 update_time = 0;
448 	int peak, alloc;
449 	int i;
450 
451 	/* initialization */
452 	if(!update_time) {
453 		for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
454 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
455 			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
456 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
457 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
458 		}
459 		#ifdef RTW_MEM_FUNC_STAT
460 		for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
461 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
462 			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
463 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
464 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
465 		}
466 		#endif
467 	}
468 
469 	switch(status) {
470 		case MSTAT_ALLOC_SUCCESS:
471 			ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
472 			alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
473 			peak=ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
474 			if (peak<alloc)
475 				ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
476 
477 			#ifdef RTW_MEM_FUNC_STAT
478 			ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
479 			alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
480 			peak=ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
481 			if (peak<alloc)
482 				ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
483 			#endif
484 			break;
485 
486 		case MSTAT_ALLOC_FAIL:
487 			ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
488 			#ifdef RTW_MEM_FUNC_STAT
489 			ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
490 			#endif
491 			break;
492 
493 		case MSTAT_FREE:
494 			ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
495 			ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
496 			#ifdef RTW_MEM_FUNC_STAT
497 			ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
498 			ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
499 			#endif
500 			break;
501 	};
502 
503 	//if (rtw_get_passing_time_ms(update_time) > 5000) {
504 	//	rtw_mstat_dump(RTW_DBGDUMP);
505 		update_time=rtw_get_current_time();
506 	//}
507 }
508 
509 #ifndef SIZE_MAX
510 	#define SIZE_MAX (~(size_t)0)
511 #endif
512 
513 struct mstat_sniff_rule {
514 	enum mstat_f flags;
515 	size_t lb;
516 	size_t hb;
517 };
518 
519 struct mstat_sniff_rule mstat_sniff_rules[] = {
520 	{MSTAT_TYPE_PHY, 4097, SIZE_MAX},
521 };
522 
523 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules)/sizeof(struct mstat_sniff_rule);
524 
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)525 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
526 {
527 	int i;
528 	for (i = 0; i<mstat_sniff_rule_num; i++) {
529 		if (mstat_sniff_rules[i].flags == flags
530 				&& mstat_sniff_rules[i].lb <= size
531 				&& mstat_sniff_rules[i].hb >= size)
532 			return _TRUE;
533 	}
534 
535 	return _FALSE;
536 }
537 
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)538 inline u8* dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
539 {
540 	u8  *p;
541 
542 	if (match_mstat_sniff_rules(flags, sz))
543 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
544 
545 	p=_rtw_vmalloc((sz));
546 
547 	rtw_mstat_update(
548 		flags
549 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
550 		, sz
551 	);
552 
553 	return p;
554 }
555 
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)556 inline u8* dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
557 {
558 	u8 *p;
559 
560 	if (match_mstat_sniff_rules(flags, sz))
561 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
562 
563 	p=_rtw_zvmalloc((sz));
564 
565 	rtw_mstat_update(
566 		flags
567 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
568 		, sz
569 	);
570 
571 	return p;
572 }
573 
dbg_rtw_vmfree(u8 * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)574 inline void dbg_rtw_vmfree(u8 *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
575 {
576 
577 	if (match_mstat_sniff_rules(flags, sz))
578 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
579 
580 	_rtw_vmfree((pbuf), (sz));
581 
582 	rtw_mstat_update(
583 		flags
584 		, MSTAT_FREE
585 		, sz
586 	);
587 }
588 
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)589 inline u8* dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
590 {
591 	u8 *p;
592 
593 	if (match_mstat_sniff_rules(flags, sz))
594 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
595 
596 	p=_rtw_malloc((sz));
597 
598 	rtw_mstat_update(
599 		flags
600 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
601 		, sz
602 	);
603 
604 	return p;
605 }
606 
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)607 inline u8* dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
608 {
609 	u8 *p;
610 
611 	if (match_mstat_sniff_rules(flags, sz))
612 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
613 
614 	p = _rtw_zmalloc((sz));
615 
616 	rtw_mstat_update(
617 		flags
618 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
619 		, sz
620 	);
621 
622 	return p;
623 }
624 
dbg_rtw_mfree(u8 * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)625 inline void dbg_rtw_mfree(u8 *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
626 {
627 	if (match_mstat_sniff_rules(flags, sz))
628 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
629 
630 	_rtw_mfree((pbuf), (sz));
631 
632 	rtw_mstat_update(
633 		flags
634 		, MSTAT_FREE
635 		, sz
636 	);
637 }
638 
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)639 inline struct sk_buff * dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
640 {
641 	struct sk_buff *skb;
642 	unsigned int truesize = 0;
643 
644 	skb = _rtw_skb_alloc(size);
645 
646 	if(skb)
647 		truesize = skb->truesize;
648 
649 	if(!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
650 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
651 
652 	rtw_mstat_update(
653 		flags
654 		, skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
655 		, truesize
656 	);
657 
658 	return skb;
659 }
660 
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)661 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
662 {
663 	unsigned int truesize = skb->truesize;
664 
665 	if(match_mstat_sniff_rules(flags, truesize))
666 		DBG_871X("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
667 
668 	_rtw_skb_free(skb);
669 
670 	rtw_mstat_update(
671 		flags
672 		, MSTAT_FREE
673 		, truesize
674 	);
675 }
676 
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)677 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
678 {
679 	struct sk_buff *skb_cp;
680 	unsigned int truesize = skb->truesize;
681 	unsigned int cp_truesize = 0;
682 
683 	skb_cp = _rtw_skb_copy(skb);
684 	if(skb_cp)
685 		cp_truesize = skb_cp->truesize;
686 
687 	if(!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
688 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
689 
690 	rtw_mstat_update(
691 		flags
692 		, skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
693 		, truesize
694 	);
695 
696 	return skb_cp;
697 }
698 
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)699 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
700 {
701 	struct sk_buff *skb_cl;
702 	unsigned int truesize = skb->truesize;
703 	unsigned int cl_truesize = 0;
704 
705 	skb_cl = _rtw_skb_clone(skb);
706 	if(skb_cl)
707 		cl_truesize = skb_cl->truesize;
708 
709 	if(!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
710 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
711 
712 	rtw_mstat_update(
713 		flags
714 		, skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
715 		, truesize
716 	);
717 
718 	return skb_cl;
719 }
720 
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)721 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
722 {
723 	int ret;
724 	unsigned int truesize = skb->truesize;
725 
726 	if(match_mstat_sniff_rules(flags, truesize))
727 		DBG_871X("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
728 
729 	ret = _rtw_netif_rx(ndev, skb);
730 
731 	rtw_mstat_update(
732 		flags
733 		, MSTAT_FREE
734 		, truesize
735 	);
736 
737 	return ret;
738 }
739 
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)740 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
741 {
742 	struct sk_buff *skb;
743 
744 	while ((skb = skb_dequeue(list)) != NULL)
745 		dbg_rtw_skb_free(skb, flags, func, line);
746 }
747 
748 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)749 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
750 {
751 	void *p;
752 
753 	if(match_mstat_sniff_rules(flags, size))
754 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
755 
756 	p = _rtw_usb_buffer_alloc(dev, size, dma);
757 
758 	rtw_mstat_update(
759 		flags
760 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
761 		, size
762 	);
763 
764 	return p;
765 }
766 
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)767 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
768 {
769 
770 	if(match_mstat_sniff_rules(flags, size))
771 		DBG_871X("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
772 
773 	_rtw_usb_buffer_free(dev, size, addr, dma);
774 
775 	rtw_mstat_update(
776 		flags
777 		, MSTAT_FREE
778 		, size
779 	);
780 }
781 #endif /* CONFIG_USB_HCI */
782 
783 #endif /* defined(DBG_MEM_ALLOC) */
784 
rtw_malloc2d(int h,int w,size_t size)785 void* rtw_malloc2d(int h, int w, size_t size)
786 {
787 	int j;
788 
789 	void **a = (void **) rtw_zmalloc( h*sizeof(void *) + h*w*size );
790 	if(a == NULL)
791 	{
792 		DBG_871X("%s: alloc memory fail!\n", __FUNCTION__);
793 		return NULL;
794 	}
795 
796 	for( j=0; j<h; j++ )
797 		a[j] = ((char *)(a+h)) + j*w*size;
798 
799 	return a;
800 }
801 
rtw_mfree2d(void * pbuf,int h,int w,int size)802 void rtw_mfree2d(void *pbuf, int h, int w, int size)
803 {
804 	rtw_mfree((u8 *)pbuf, h*sizeof(void*) + w*h*size);
805 }
806 
_rtw_memcpy(void * dst,const void * src,u32 sz)807 void _rtw_memcpy(void *dst, const void *src, u32 sz)
808 {
809 
810 #if defined (PLATFORM_LINUX)|| defined (PLATFORM_FREEBSD)
811 
812 	memcpy(dst, src, sz);
813 
814 #endif
815 
816 #ifdef PLATFORM_WINDOWS
817 
818 	NdisMoveMemory(dst, src, sz);
819 
820 #endif
821 
822 }
823 
_rtw_memcmp(void * dst,void * src,u32 sz)824 int	_rtw_memcmp(void *dst, void *src, u32 sz)
825 {
826 
827 #if defined (PLATFORM_LINUX)|| defined (PLATFORM_FREEBSD)
828 //under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0
829 
830 	if (!(memcmp(dst, src, sz)))
831 		return _TRUE;
832 	else
833 		return _FALSE;
834 #endif
835 
836 
837 #ifdef PLATFORM_WINDOWS
838 //under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1
839 
840 	if (NdisEqualMemory (dst, src, sz))
841 		return _TRUE;
842 	else
843 		return _FALSE;
844 
845 #endif
846 
847 
848 
849 }
850 
_rtw_memset(void * pbuf,int c,u32 sz)851 void _rtw_memset(void *pbuf, int c, u32 sz)
852 {
853 
854 #if defined (PLATFORM_LINUX)|| defined (PLATFORM_FREEBSD)
855 
856         memset(pbuf, c, sz);
857 
858 #endif
859 
860 #ifdef PLATFORM_WINDOWS
861 #if 0
862 	NdisZeroMemory(pbuf, sz);
863 	if (c != 0) memset(pbuf, c, sz);
864 #else
865 	NdisFillMemory(pbuf, sz, c);
866 #endif
867 #endif
868 
869 }
870 
871 #ifdef PLATFORM_FREEBSD
__list_add(_list * pnew,_list * pprev,_list * pnext)872 static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
873  {
874          pnext->prev = pnew;
875          pnew->next = pnext;
876          pnew->prev = pprev;
877          pprev->next = pnew;
878 }
879 #endif /* PLATFORM_FREEBSD */
880 
881 
_rtw_init_listhead(_list * list)882 void _rtw_init_listhead(_list *list)
883 {
884 
885 #ifdef PLATFORM_LINUX
886 
887         INIT_LIST_HEAD(list);
888 
889 #endif
890 
891 #ifdef PLATFORM_FREEBSD
892          list->next = list;
893          list->prev = list;
894 #endif
895 #ifdef PLATFORM_WINDOWS
896 
897         NdisInitializeListHead(list);
898 
899 #endif
900 
901 }
902 
903 
904 /*
905 For the following list_xxx operations,
906 caller must guarantee the atomic context.
907 Otherwise, there will be racing condition.
908 */
rtw_is_list_empty(_list * phead)909 u32	rtw_is_list_empty(_list *phead)
910 {
911 
912 #ifdef PLATFORM_LINUX
913 
914 	if (list_empty(phead))
915 		return _TRUE;
916 	else
917 		return _FALSE;
918 
919 #endif
920 #ifdef PLATFORM_FREEBSD
921 
922 	if (phead->next == phead)
923 		return _TRUE;
924 	else
925 		return _FALSE;
926 
927 #endif
928 
929 
930 #ifdef PLATFORM_WINDOWS
931 
932 	if (IsListEmpty(phead))
933 		return _TRUE;
934 	else
935 		return _FALSE;
936 
937 #endif
938 
939 
940 }
941 
rtw_list_insert_head(_list * plist,_list * phead)942 void rtw_list_insert_head(_list *plist, _list *phead)
943 {
944 
945 #ifdef PLATFORM_LINUX
946 	list_add(plist, phead);
947 #endif
948 
949 #ifdef PLATFORM_FREEBSD
950 	__list_add(plist, phead, phead->next);
951 #endif
952 
953 #ifdef PLATFORM_WINDOWS
954 	InsertHeadList(phead, plist);
955 #endif
956 }
957 
rtw_list_insert_tail(_list * plist,_list * phead)958 void rtw_list_insert_tail(_list *plist, _list *phead)
959 {
960 
961 #ifdef PLATFORM_LINUX
962 
963 	list_add_tail(plist, phead);
964 
965 #endif
966 #ifdef PLATFORM_FREEBSD
967 
968 	__list_add(plist, phead->prev, phead);
969 
970 #endif
971 #ifdef PLATFORM_WINDOWS
972 
973   InsertTailList(phead, plist);
974 
975 #endif
976 
977 }
978 
rtw_init_timer(_timer * ptimer,void * padapter,void * pfunc)979 void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc)
980 {
981 	_adapter *adapter = (_adapter *)padapter;
982 
983 #ifdef PLATFORM_LINUX
984 	_init_timer(ptimer, adapter->pnetdev, pfunc, adapter);
985 #endif
986 #ifdef PLATFORM_FREEBSD
987 	_init_timer(ptimer, adapter->pifp, pfunc, adapter->mlmepriv.nic_hdl);
988 #endif
989 #ifdef PLATFORM_WINDOWS
990 	_init_timer(ptimer, adapter->hndis_adapter, pfunc, adapter->mlmepriv.nic_hdl);
991 #endif
992 }
993 
994 /*
995 
996 Caller must check if the list is empty before calling rtw_list_delete
997 
998 */
999 
1000 
_rtw_init_sema(_sema * sema,int init_val)1001 void _rtw_init_sema(_sema	*sema, int init_val)
1002 {
1003 
1004 #ifdef PLATFORM_LINUX
1005 
1006 	sema_init(sema, init_val);
1007 
1008 #endif
1009 #ifdef PLATFORM_FREEBSD
1010 	sema_init(sema, init_val, "rtw_drv");
1011 #endif
1012 #ifdef PLATFORM_OS_XP
1013 
1014 	KeInitializeSemaphore(sema, init_val,  SEMA_UPBND); // count=0;
1015 
1016 #endif
1017 
1018 #ifdef PLATFORM_OS_CE
1019 	if(*sema == NULL)
1020 		*sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1021 #endif
1022 
1023 }
1024 
_rtw_free_sema(_sema * sema)1025 void _rtw_free_sema(_sema	*sema)
1026 {
1027 #ifdef PLATFORM_FREEBSD
1028 	sema_destroy(sema);
1029 #endif
1030 #ifdef PLATFORM_OS_CE
1031 	CloseHandle(*sema);
1032 #endif
1033 
1034 }
1035 
_rtw_up_sema(_sema * sema)1036 void _rtw_up_sema(_sema	*sema)
1037 {
1038 
1039 #ifdef PLATFORM_LINUX
1040 
1041 	up(sema);
1042 
1043 #endif
1044 #ifdef PLATFORM_FREEBSD
1045 	sema_post(sema);
1046 #endif
1047 #ifdef PLATFORM_OS_XP
1048 
1049 	KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1,  FALSE );
1050 
1051 #endif
1052 
1053 #ifdef PLATFORM_OS_CE
1054 	ReleaseSemaphore(*sema,  1,  NULL );
1055 #endif
1056 }
1057 
_rtw_down_sema(_sema * sema)1058 u32 _rtw_down_sema(_sema *sema)
1059 {
1060 
1061 #ifdef PLATFORM_LINUX
1062 
1063 	if (down_interruptible(sema))
1064 		return _FAIL;
1065 	else
1066 		return _SUCCESS;
1067 
1068 #endif
1069 #ifdef PLATFORM_FREEBSD
1070 	sema_wait(sema);
1071 	return  _SUCCESS;
1072 #endif
1073 #ifdef PLATFORM_OS_XP
1074 
1075 	if(STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1076 		return  _SUCCESS;
1077 	else
1078 		return _FAIL;
1079 #endif
1080 
1081 #ifdef PLATFORM_OS_CE
1082 	if(WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE ))
1083 		return _SUCCESS;
1084 	else
1085 		return _FAIL;
1086 #endif
1087 }
1088 
thread_exit(_completion * comp)1089 inline void thread_exit(_completion *comp)
1090 {
1091 #ifdef PLATFORM_LINUX
1092 	complete_and_exit(comp, 0);
1093 #endif
1094 
1095 #ifdef PLATFORM_FREEBSD
1096 	printf("%s", "RTKTHREAD_exit");
1097 #endif
1098 
1099 #ifdef PLATFORM_OS_CE
1100 	ExitThread(STATUS_SUCCESS);
1101 #endif
1102 
1103 #ifdef PLATFORM_OS_XP
1104 	PsTerminateSystemThread(STATUS_SUCCESS);
1105 #endif
1106 }
1107 
_rtw_init_completion(_completion * comp)1108 inline void _rtw_init_completion(_completion *comp)
1109 {
1110 #ifdef PLATFORM_LINUX
1111 	init_completion(comp);
1112 #endif
1113 }
_rtw_wait_for_comp_timeout(_completion * comp)1114 inline void _rtw_wait_for_comp_timeout(_completion *comp)
1115 {
1116 #ifdef PLATFORM_LINUX
1117 	wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1118 #endif
1119 }
_rtw_wait_for_comp(_completion * comp)1120 inline void _rtw_wait_for_comp(_completion *comp)
1121 {
1122 #ifdef PLATFORM_LINUX
1123 	wait_for_completion(comp);
1124 #endif
1125 }
1126 
rtw_thread_stop(_thread_hdl_ th)1127 inline bool rtw_thread_stop(_thread_hdl_ th)
1128 {
1129 #ifdef PLATFORM_LINUX
1130 	return kthread_stop(th);
1131 #endif
1132 }
1133 
rtw_thread_should_stop(void)1134 inline bool rtw_thread_should_stop(void)
1135 {
1136 #ifdef PLATFORM_LINUX
1137 	return kthread_should_stop();
1138 #endif
1139 }
1140 
_rtw_mutex_init(_mutex * pmutex)1141 void	_rtw_mutex_init(_mutex *pmutex)
1142 {
1143 #ifdef PLATFORM_LINUX
1144 
1145 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
1146 	mutex_init(pmutex);
1147 #else
1148 	init_MUTEX(pmutex);
1149 #endif
1150 
1151 #endif
1152 #ifdef PLATFORM_FREEBSD
1153 	mtx_init(pmutex, "", NULL, MTX_DEF|MTX_RECURSE);
1154 #endif
1155 #ifdef PLATFORM_OS_XP
1156 
1157 	KeInitializeMutex(pmutex, 0);
1158 
1159 #endif
1160 
1161 #ifdef PLATFORM_OS_CE
1162 	*pmutex =  CreateMutex( NULL, _FALSE, NULL);
1163 #endif
1164 }
1165 
1166 void	_rtw_mutex_free(_mutex *pmutex);
_rtw_mutex_free(_mutex * pmutex)1167 void	_rtw_mutex_free(_mutex *pmutex)
1168 {
1169 #ifdef PLATFORM_LINUX
1170 
1171 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
1172 	mutex_destroy(pmutex);
1173 #else
1174 #endif
1175 
1176 #ifdef PLATFORM_FREEBSD
1177 	sema_destroy(pmutex);
1178 #endif
1179 
1180 #endif
1181 
1182 #ifdef PLATFORM_OS_XP
1183 
1184 #endif
1185 
1186 #ifdef PLATFORM_OS_CE
1187 
1188 #endif
1189 }
1190 
_rtw_spinlock_init(_lock * plock)1191 void	_rtw_spinlock_init(_lock *plock)
1192 {
1193 
1194 #ifdef PLATFORM_LINUX
1195 
1196 	spin_lock_init(plock);
1197 
1198 #endif
1199 #ifdef PLATFORM_FREEBSD
1200 		mtx_init(plock, "", NULL, MTX_DEF|MTX_RECURSE);
1201 #endif
1202 #ifdef PLATFORM_WINDOWS
1203 
1204 	NdisAllocateSpinLock(plock);
1205 
1206 #endif
1207 
1208 }
1209 
_rtw_spinlock_free(_lock * plock)1210 void	_rtw_spinlock_free(_lock *plock)
1211 {
1212 #ifdef PLATFORM_FREEBSD
1213 	 mtx_destroy(plock);
1214 #endif
1215 
1216 #ifdef PLATFORM_WINDOWS
1217 
1218 	NdisFreeSpinLock(plock);
1219 
1220 #endif
1221 
1222 }
1223 #ifdef PLATFORM_FREEBSD
1224 extern PADAPTER prtw_lock;
1225 
rtw_mtx_lock(_lock * plock)1226 void rtw_mtx_lock(_lock *plock){
1227 	if(prtw_lock){
1228 		mtx_lock(&prtw_lock->glock);
1229 	}
1230 	else{
1231 		printf("%s prtw_lock==NULL",__FUNCTION__);
1232 	}
1233 }
rtw_mtx_unlock(_lock * plock)1234 void rtw_mtx_unlock(_lock *plock){
1235 	if(prtw_lock){
1236 		mtx_unlock(&prtw_lock->glock);
1237 	}
1238 	else{
1239 		printf("%s prtw_lock==NULL",__FUNCTION__);
1240 	}
1241 
1242 }
1243 #endif //PLATFORM_FREEBSD
1244 
1245 
_rtw_spinlock(_lock * plock)1246 void	_rtw_spinlock(_lock	*plock)
1247 {
1248 
1249 #ifdef PLATFORM_LINUX
1250 
1251 	spin_lock(plock);
1252 
1253 #endif
1254 #ifdef PLATFORM_FREEBSD
1255 	mtx_lock(plock);
1256 #endif
1257 #ifdef PLATFORM_WINDOWS
1258 
1259 	NdisAcquireSpinLock(plock);
1260 
1261 #endif
1262 
1263 }
1264 
_rtw_spinunlock(_lock * plock)1265 void	_rtw_spinunlock(_lock *plock)
1266 {
1267 
1268 #ifdef PLATFORM_LINUX
1269 
1270 	spin_unlock(plock);
1271 
1272 #endif
1273 #ifdef PLATFORM_FREEBSD
1274 	mtx_unlock(plock);
1275 #endif
1276 #ifdef PLATFORM_WINDOWS
1277 
1278 	NdisReleaseSpinLock(plock);
1279 
1280 #endif
1281 }
1282 
1283 
_rtw_spinlock_ex(_lock * plock)1284 void	_rtw_spinlock_ex(_lock	*plock)
1285 {
1286 
1287 #ifdef PLATFORM_LINUX
1288 
1289 	spin_lock(plock);
1290 
1291 #endif
1292 #ifdef PLATFORM_FREEBSD
1293 	mtx_lock(plock);
1294 #endif
1295 #ifdef PLATFORM_WINDOWS
1296 
1297 	NdisDprAcquireSpinLock(plock);
1298 
1299 #endif
1300 
1301 }
1302 
_rtw_spinunlock_ex(_lock * plock)1303 void	_rtw_spinunlock_ex(_lock *plock)
1304 {
1305 
1306 #ifdef PLATFORM_LINUX
1307 
1308 	spin_unlock(plock);
1309 
1310 #endif
1311 #ifdef PLATFORM_FREEBSD
1312 	mtx_unlock(plock);
1313 #endif
1314 #ifdef PLATFORM_WINDOWS
1315 
1316 	NdisDprReleaseSpinLock(plock);
1317 
1318 #endif
1319 }
1320 
1321 
1322 
_rtw_init_queue(_queue * pqueue)1323 void	_rtw_init_queue(_queue	*pqueue)
1324 {
1325 
1326 	_rtw_init_listhead(&(pqueue->queue));
1327 
1328 	_rtw_spinlock_init(&(pqueue->lock));
1329 
1330 }
1331 
_rtw_queue_empty(_queue * pqueue)1332 u32	  _rtw_queue_empty(_queue	*pqueue)
1333 {
1334 	return (rtw_is_list_empty(&(pqueue->queue)));
1335 }
1336 
1337 
rtw_end_of_queue_search(_list * head,_list * plist)1338 u32 rtw_end_of_queue_search(_list *head, _list *plist)
1339 {
1340 	if (head == plist)
1341 		return _TRUE;
1342 	else
1343 		return _FALSE;
1344 }
1345 
1346 
rtw_get_current_time(void)1347 u32	rtw_get_current_time(void)
1348 {
1349 
1350 #ifdef PLATFORM_LINUX
1351 	return jiffies;
1352 #endif
1353 #ifdef PLATFORM_FREEBSD
1354 	struct timeval tvp;
1355 	getmicrotime(&tvp);
1356 	return tvp.tv_sec;
1357 #endif
1358 #ifdef PLATFORM_WINDOWS
1359 	LARGE_INTEGER	SystemTime;
1360 	NdisGetCurrentSystemTime(&SystemTime);
1361 	return (u32)(SystemTime.LowPart);// count of 100-nanosecond intervals
1362 #endif
1363 }
1364 
rtw_systime_to_ms(u32 systime)1365 inline u32 rtw_systime_to_ms(u32 systime)
1366 {
1367 #ifdef PLATFORM_LINUX
1368 	return systime * 1000 / HZ;
1369 #endif
1370 #ifdef PLATFORM_FREEBSD
1371 	return systime * 1000;
1372 #endif
1373 #ifdef PLATFORM_WINDOWS
1374 	return systime / 10000 ;
1375 #endif
1376 }
1377 
rtw_ms_to_systime(u32 ms)1378 inline u32 rtw_ms_to_systime(u32 ms)
1379 {
1380 #ifdef PLATFORM_LINUX
1381 	return ms * HZ / 1000;
1382 #endif
1383 #ifdef PLATFORM_FREEBSD
1384 	return ms /1000;
1385 #endif
1386 #ifdef PLATFORM_WINDOWS
1387 	return ms * 10000 ;
1388 #endif
1389 }
1390 
1391 // the input parameter start use the same unit as returned by rtw_get_current_time
rtw_get_passing_time_ms(u32 start)1392 inline s32 rtw_get_passing_time_ms(u32 start)
1393 {
1394 #ifdef PLATFORM_LINUX
1395 	return rtw_systime_to_ms(jiffies-start);
1396 #endif
1397 #ifdef PLATFORM_FREEBSD
1398 	return rtw_systime_to_ms(rtw_get_current_time());
1399 #endif
1400 #ifdef PLATFORM_WINDOWS
1401 	LARGE_INTEGER	SystemTime;
1402 	NdisGetCurrentSystemTime(&SystemTime);
1403 	return rtw_systime_to_ms((u32)(SystemTime.LowPart) - start) ;
1404 #endif
1405 }
1406 
rtw_get_time_interval_ms(u32 start,u32 end)1407 inline s32 rtw_get_time_interval_ms(u32 start, u32 end)
1408 {
1409 #ifdef PLATFORM_LINUX
1410 	return rtw_systime_to_ms(end-start);
1411 #endif
1412 #ifdef PLATFORM_FREEBSD
1413 	return rtw_systime_to_ms(rtw_get_current_time());
1414 #endif
1415 #ifdef PLATFORM_WINDOWS
1416 	return rtw_systime_to_ms(end-start);
1417 #endif
1418 }
1419 
1420 
rtw_sleep_schedulable(int ms)1421 void rtw_sleep_schedulable(int ms)
1422 {
1423 
1424 #ifdef PLATFORM_LINUX
1425 
1426     u32 delta;
1427 
1428     delta = (ms * HZ)/1000;//(ms)
1429     if (delta == 0) {
1430         delta = 1;// 1 ms
1431     }
1432     set_current_state(TASK_INTERRUPTIBLE);
1433     if (schedule_timeout(delta) != 0) {
1434         return ;
1435     }
1436     return;
1437 
1438 #endif
1439 #ifdef PLATFORM_FREEBSD
1440 	DELAY(ms*1000);
1441 	return ;
1442 #endif
1443 
1444 #ifdef PLATFORM_WINDOWS
1445 
1446 	NdisMSleep(ms*1000); //(us)*1000=(ms)
1447 
1448 #endif
1449 
1450 }
1451 
1452 
rtw_msleep_os(int ms)1453 void rtw_msleep_os(int ms)
1454 {
1455 
1456 #ifdef PLATFORM_LINUX
1457 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1458 	if (ms < 20) {
1459 		unsigned long us = ms * 1000UL;
1460 		usleep_range(us, us + 1000UL);
1461 	} else
1462 	#endif
1463   	msleep((unsigned int)ms);
1464 
1465 #endif
1466 #ifdef PLATFORM_FREEBSD
1467        //Delay for delay microseconds
1468 	DELAY(ms*1000);
1469 	return ;
1470 #endif
1471 #ifdef PLATFORM_WINDOWS
1472 
1473 	NdisMSleep(ms*1000); //(us)*1000=(ms)
1474 
1475 #endif
1476 
1477 
1478 }
rtw_usleep_os(int us)1479 void rtw_usleep_os(int us)
1480 {
1481 #ifdef PLATFORM_LINUX
1482 
1483 	// msleep((unsigned int)us);
1484 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1485 	usleep_range(us, us + 1);
1486 	#else
1487 	if ( 1 < (us/1000) )
1488       		msleep(1);
1489       else
1490 		msleep( (us/1000) + 1);
1491 	#endif
1492 #endif
1493 
1494 #ifdef PLATFORM_FREEBSD
1495 	//Delay for delay microseconds
1496 	DELAY(us);
1497 
1498 	return ;
1499 #endif
1500 #ifdef PLATFORM_WINDOWS
1501 
1502 	NdisMSleep(us); //(us)
1503 
1504 #endif
1505 
1506 
1507 }
1508 
1509 
1510 #ifdef DBG_DELAY_OS
_rtw_mdelay_os(int ms,const char * func,const int line)1511 void _rtw_mdelay_os(int ms, const char *func, const int line)
1512 {
1513 	#if 0
1514 	if(ms>10)
1515 		DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1516 		rtw_msleep_os(ms);
1517 	return;
1518 	#endif
1519 
1520 
1521 	DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1522 
1523 #if defined(PLATFORM_LINUX)
1524 
1525    	mdelay((unsigned long)ms);
1526 
1527 #elif defined(PLATFORM_WINDOWS)
1528 
1529 	NdisStallExecution(ms*1000); //(us)*1000=(ms)
1530 
1531 #endif
1532 
1533 
1534 }
_rtw_udelay_os(int us,const char * func,const int line)1535 void _rtw_udelay_os(int us, const char *func, const int line)
1536 {
1537 
1538 	#if 0
1539 	if(us > 1000) {
1540 	DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1541 		rtw_usleep_os(us);
1542 		return;
1543 	}
1544 	#endif
1545 
1546 
1547 	DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1548 
1549 
1550 #if defined(PLATFORM_LINUX)
1551 
1552       udelay((unsigned long)us);
1553 
1554 #elif defined(PLATFORM_WINDOWS)
1555 
1556 	NdisStallExecution(us); //(us)
1557 
1558 #endif
1559 
1560 }
1561 #else
rtw_mdelay_os(int ms)1562 void rtw_mdelay_os(int ms)
1563 {
1564 
1565 #ifdef PLATFORM_LINUX
1566 
1567    	mdelay((unsigned long)ms);
1568 
1569 #endif
1570 #ifdef PLATFORM_FREEBSD
1571 	DELAY(ms*1000);
1572 	return ;
1573 #endif
1574 #ifdef PLATFORM_WINDOWS
1575 
1576 	NdisStallExecution(ms*1000); //(us)*1000=(ms)
1577 
1578 #endif
1579 
1580 
1581 }
rtw_udelay_os(int us)1582 void rtw_udelay_os(int us)
1583 {
1584 
1585 #ifdef PLATFORM_LINUX
1586 
1587       udelay((unsigned long)us);
1588 
1589 #endif
1590 #ifdef PLATFORM_FREEBSD
1591 	//Delay for delay microseconds
1592 	DELAY(us);
1593 	return ;
1594 #endif
1595 #ifdef PLATFORM_WINDOWS
1596 
1597 	NdisStallExecution(us); //(us)
1598 
1599 #endif
1600 
1601 }
1602 #endif
1603 
rtw_yield_os(void)1604 void rtw_yield_os(void)
1605 {
1606 #ifdef PLATFORM_LINUX
1607 	yield();
1608 #endif
1609 #ifdef PLATFORM_FREEBSD
1610 	yield();
1611 #endif
1612 #ifdef PLATFORM_WINDOWS
1613 	SwitchToThread();
1614 #endif
1615 }
1616 
1617 #define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
1618 #define RTW_SUSPEND_EXT_LOCK_NAME "rtw_wifi_ext"
1619 #define RTW_SUSPEND_RX_LOCK_NAME "rtw_wifi_rx"
1620 #define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
1621 #define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
1622 #define RTW_RESUME_SCAN_LOCK_NAME "rtw_wifi_scan"
1623 #ifdef CONFIG_WAKELOCK
1624 static struct wake_lock rtw_suspend_lock;
1625 static struct wake_lock rtw_suspend_ext_lock;
1626 static struct wake_lock rtw_suspend_rx_lock;
1627 static struct wake_lock rtw_suspend_traffic_lock;
1628 static struct wake_lock rtw_suspend_resume_lock;
1629 static struct wake_lock rtw_resume_scan_lock;
1630 #elif defined(CONFIG_ANDROID_POWER)
1631 static android_suspend_lock_t rtw_suspend_lock ={
1632 	.name = RTW_SUSPEND_LOCK_NAME
1633 };
1634 static android_suspend_lock_t rtw_suspend_ext_lock ={
1635 	.name = RTW_SUSPEND_EXT_LOCK_NAME
1636 };
1637 static android_suspend_lock_t rtw_suspend_rx_lock ={
1638 	.name = RTW_SUSPEND_RX_LOCK_NAME
1639 };
1640 static android_suspend_lock_t rtw_suspend_traffic_lock ={
1641 	.name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
1642 };
1643 static android_suspend_lock_t rtw_suspend_resume_lock ={
1644 	.name = RTW_SUSPEND_RESUME_LOCK_NAME
1645 };
1646 static android_suspend_lock_t rtw_resume_scan_lock ={
1647 	.name = RTW_RESUME_SCAN_LOCK_NAME
1648 };
1649 #endif
1650 
rtw_suspend_lock_init(void)1651 inline void rtw_suspend_lock_init(void)
1652 {
1653 	#ifdef CONFIG_WAKELOCK
1654 	wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
1655 	wake_lock_init(&rtw_suspend_ext_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_EXT_LOCK_NAME);
1656 	wake_lock_init(&rtw_suspend_rx_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RX_LOCK_NAME);
1657 	wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
1658 	wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
1659 	wake_lock_init(&rtw_resume_scan_lock, WAKE_LOCK_SUSPEND, RTW_RESUME_SCAN_LOCK_NAME);
1660 	#elif defined(CONFIG_ANDROID_POWER)
1661 	android_init_suspend_lock(&rtw_suspend_lock);
1662 	android_init_suspend_lock(&rtw_suspend_ext_lock);
1663 	android_init_suspend_lock(&rtw_suspend_rx_lock);
1664 	android_init_suspend_lock(&rtw_suspend_traffic_lock);
1665 	android_init_suspend_lock(&rtw_suspend_resume_lock);
1666 	android_init_suspend_lock(&rtw_resume_scan_lock);
1667 	#endif
1668 }
1669 
rtw_suspend_lock_uninit(void)1670 inline void rtw_suspend_lock_uninit(void)
1671 {
1672 	#ifdef CONFIG_WAKELOCK
1673 	wake_lock_destroy(&rtw_suspend_lock);
1674 	wake_lock_destroy(&rtw_suspend_ext_lock);
1675 	wake_lock_destroy(&rtw_suspend_rx_lock);
1676 	wake_lock_destroy(&rtw_suspend_traffic_lock);
1677 	wake_lock_destroy(&rtw_suspend_resume_lock);
1678 	wake_lock_destroy(&rtw_resume_scan_lock);
1679 	#elif defined(CONFIG_ANDROID_POWER)
1680 	android_uninit_suspend_lock(&rtw_suspend_lock);
1681 	android_uninit_suspend_lock(&rtw_suspend_ext_lock);
1682 	android_uninit_suspend_lock(&rtw_suspend_rx_lock);
1683 	android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
1684 	android_uninit_suspend_lock(&rtw_suspend_resume_lock);
1685 	android_uninit_suspend_lock(&rtw_resume_scan_lock);
1686 	#endif
1687 }
1688 
rtw_lock_suspend(void)1689 inline void rtw_lock_suspend(void)
1690 {
1691 	#ifdef CONFIG_WAKELOCK
1692 	wake_lock(&rtw_suspend_lock);
1693 	#elif defined(CONFIG_ANDROID_POWER)
1694 	android_lock_suspend(&rtw_suspend_lock);
1695 	#endif
1696 
1697 	#if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1698 	//DBG_871X("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count);
1699 	#endif
1700 }
1701 
rtw_unlock_suspend(void)1702 inline void rtw_unlock_suspend(void)
1703 {
1704 	#ifdef CONFIG_WAKELOCK
1705 	wake_unlock(&rtw_suspend_lock);
1706 	#elif defined(CONFIG_ANDROID_POWER)
1707 	android_unlock_suspend(&rtw_suspend_lock);
1708 	#endif
1709 
1710 	#if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1711 	//DBG_871X("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count);
1712 	#endif
1713 }
1714 
rtw_resume_lock_suspend(void)1715 inline void rtw_resume_lock_suspend(void)
1716 {
1717 	#ifdef CONFIG_WAKELOCK
1718 	wake_lock(&rtw_suspend_resume_lock);
1719 	#elif defined(CONFIG_ANDROID_POWER)
1720 	android_lock_suspend(&rtw_suspend_resume_lock);
1721 	#endif
1722 
1723 	#if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1724 	//DBG_871X("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count);
1725 	#endif
1726 }
1727 
rtw_resume_unlock_suspend(void)1728 inline void rtw_resume_unlock_suspend(void)
1729 {
1730 	#ifdef CONFIG_WAKELOCK
1731 	wake_unlock(&rtw_suspend_resume_lock);
1732 	#elif defined(CONFIG_ANDROID_POWER)
1733 	android_unlock_suspend(&rtw_suspend_resume_lock);
1734 	#endif
1735 
1736 	#if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1737 	//DBG_871X("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count);
1738 	#endif
1739 }
1740 
rtw_lock_suspend_timeout(u32 timeout_ms)1741 inline void rtw_lock_suspend_timeout(u32 timeout_ms)
1742 {
1743 	#ifdef CONFIG_WAKELOCK
1744 	wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
1745 	#elif defined(CONFIG_ANDROID_POWER)
1746 	android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
1747 	#endif
1748 }
1749 
rtw_lock_ext_suspend_timeout(u32 timeout_ms)1750 inline void rtw_lock_ext_suspend_timeout(u32 timeout_ms)
1751 {
1752 	#ifdef CONFIG_WAKELOCK
1753 	wake_lock_timeout(&rtw_suspend_ext_lock, rtw_ms_to_systime(timeout_ms));
1754 	#elif defined(CONFIG_ANDROID_POWER)
1755 	android_lock_suspend_auto_expire(&rtw_suspend_ext_lock, rtw_ms_to_systime(timeout_ms));
1756 	#endif
1757 	//DBG_871X("EXT lock timeout:%d\n", timeout_ms);
1758 }
1759 
rtw_lock_rx_suspend_timeout(u32 timeout_ms)1760 inline void rtw_lock_rx_suspend_timeout(u32 timeout_ms)
1761 {
1762 	#ifdef CONFIG_WAKELOCK
1763 	wake_lock_timeout(&rtw_suspend_rx_lock, rtw_ms_to_systime(timeout_ms));
1764 	#elif defined(CONFIG_ANDROID_POWER)
1765 	android_lock_suspend_auto_expire(&rtw_suspend_rx_lock, rtw_ms_to_systime(timeout_ms));
1766 	#endif
1767 	//DBG_871X("RX lock timeout:%d\n", timeout_ms);
1768 }
1769 
1770 
rtw_lock_traffic_suspend_timeout(u32 timeout_ms)1771 inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
1772 {
1773 	#ifdef CONFIG_WAKELOCK
1774 	wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
1775 	#elif defined(CONFIG_ANDROID_POWER)
1776 	android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
1777 	#endif
1778 	//DBG_871X("traffic lock timeout:%d\n", timeout_ms);
1779 }
1780 
rtw_lock_resume_scan_timeout(u32 timeout_ms)1781 inline void rtw_lock_resume_scan_timeout(u32 timeout_ms)
1782 {
1783 	#ifdef CONFIG_WAKELOCK
1784 	wake_lock_timeout(&rtw_resume_scan_lock, rtw_ms_to_systime(timeout_ms));
1785 	#elif defined(CONFIG_ANDROID_POWER)
1786 	android_lock_suspend_auto_expire(&rtw_resume_scan_lock, rtw_ms_to_systime(timeout_ms));
1787 	#endif
1788 	//DBG_871X("resume scan lock:%d\n", timeout_ms);
1789 }
1790 
ATOMIC_SET(ATOMIC_T * v,int i)1791 inline void ATOMIC_SET(ATOMIC_T *v, int i)
1792 {
1793 	#ifdef PLATFORM_LINUX
1794 	atomic_set(v,i);
1795 	#elif defined(PLATFORM_WINDOWS)
1796 	*v=i;// other choice????
1797 	#elif defined(PLATFORM_FREEBSD)
1798 	atomic_set_int(v,i);
1799 	#endif
1800 }
1801 
ATOMIC_READ(ATOMIC_T * v)1802 inline int ATOMIC_READ(ATOMIC_T *v)
1803 {
1804 	#ifdef PLATFORM_LINUX
1805 	return atomic_read(v);
1806 	#elif defined(PLATFORM_WINDOWS)
1807 	return *v; // other choice????
1808 	#elif defined(PLATFORM_FREEBSD)
1809 	return atomic_load_acq_32(v);
1810 	#endif
1811 }
1812 
ATOMIC_ADD(ATOMIC_T * v,int i)1813 inline void ATOMIC_ADD(ATOMIC_T *v, int i)
1814 {
1815 	#ifdef PLATFORM_LINUX
1816 	atomic_add(i,v);
1817 	#elif defined(PLATFORM_WINDOWS)
1818 	InterlockedAdd(v,i);
1819 	#elif defined(PLATFORM_FREEBSD)
1820 	atomic_add_int(v,i);
1821 	#endif
1822 }
ATOMIC_SUB(ATOMIC_T * v,int i)1823 inline void ATOMIC_SUB(ATOMIC_T *v, int i)
1824 {
1825 	#ifdef PLATFORM_LINUX
1826 	atomic_sub(i,v);
1827 	#elif defined(PLATFORM_WINDOWS)
1828 	InterlockedAdd(v,-i);
1829 	#elif defined(PLATFORM_FREEBSD)
1830 	atomic_subtract_int(v,i);
1831 	#endif
1832 }
1833 
ATOMIC_INC(ATOMIC_T * v)1834 inline void ATOMIC_INC(ATOMIC_T *v)
1835 {
1836 	#ifdef PLATFORM_LINUX
1837 	atomic_inc(v);
1838 	#elif defined(PLATFORM_WINDOWS)
1839 	InterlockedIncrement(v);
1840 	#elif defined(PLATFORM_FREEBSD)
1841 	atomic_add_int(v,1);
1842 	#endif
1843 }
1844 
ATOMIC_DEC(ATOMIC_T * v)1845 inline void ATOMIC_DEC(ATOMIC_T *v)
1846 {
1847 	#ifdef PLATFORM_LINUX
1848 	atomic_dec(v);
1849 	#elif defined(PLATFORM_WINDOWS)
1850 	InterlockedDecrement(v);
1851 	#elif defined(PLATFORM_FREEBSD)
1852 	atomic_subtract_int(v,1);
1853 	#endif
1854 }
1855 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)1856 inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
1857 {
1858 	#ifdef PLATFORM_LINUX
1859 	return atomic_add_return(i,v);
1860 	#elif defined(PLATFORM_WINDOWS)
1861 	return InterlockedAdd(v,i);
1862 	#elif defined(PLATFORM_FREEBSD)
1863 	atomic_add_int(v,i);
1864 	return atomic_load_acq_32(v);
1865 	#endif
1866 }
1867 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)1868 inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
1869 {
1870 	#ifdef PLATFORM_LINUX
1871 	return atomic_sub_return(i,v);
1872 	#elif defined(PLATFORM_WINDOWS)
1873 	return InterlockedAdd(v,-i);
1874 	#elif defined(PLATFORM_FREEBSD)
1875 	atomic_subtract_int(v,i);
1876 	return atomic_load_acq_32(v);
1877 	#endif
1878 }
1879 
ATOMIC_INC_RETURN(ATOMIC_T * v)1880 inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
1881 {
1882 	#ifdef PLATFORM_LINUX
1883 	return atomic_inc_return(v);
1884 	#elif defined(PLATFORM_WINDOWS)
1885 	return InterlockedIncrement(v);
1886 	#elif defined(PLATFORM_FREEBSD)
1887 	atomic_add_int(v,1);
1888 	return atomic_load_acq_32(v);
1889 	#endif
1890 }
1891 
ATOMIC_DEC_RETURN(ATOMIC_T * v)1892 inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
1893 {
1894 	#ifdef PLATFORM_LINUX
1895 	return atomic_dec_return(v);
1896 	#elif defined(PLATFORM_WINDOWS)
1897 	return InterlockedDecrement(v);
1898 	#elif defined(PLATFORM_FREEBSD)
1899 	atomic_subtract_int(v,1);
1900 	return atomic_load_acq_32(v);
1901 	#endif
1902 }
1903 
1904 
1905 #ifdef PLATFORM_LINUX
1906 /*
1907 * Open a file with the specific @param path, @param flag, @param mode
1908 * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
1909 * @param path the path of the file to open
1910 * @param flag file operation flags, please refer to linux document
1911 * @param mode please refer to linux document
1912 * @return Linux specific error code
1913 */
openFile(struct file ** fpp,char * path,int flag,int mode)1914 static int openFile(struct file **fpp, char *path, int flag, int mode)
1915 {
1916 	struct file *fp;
1917 
1918 	fp=filp_open(path, flag, mode);
1919 	if(IS_ERR(fp)) {
1920 		*fpp=NULL;
1921 		return PTR_ERR(fp);
1922 	}
1923 	else {
1924 		*fpp=fp;
1925 		return 0;
1926 	}
1927 }
1928 
1929 /*
1930 * Close the file with the specific @param fp
1931 * @param fp the pointer of struct file to close
1932 * @return always 0
1933 */
closeFile(struct file * fp)1934 static int closeFile(struct file *fp)
1935 {
1936 	filp_close(fp,NULL);
1937 	return 0;
1938 }
1939 
readFile(struct file * fp,char * buf,int len)1940 static int readFile(struct file *fp,char *buf,int len)
1941 {
1942 	int rlen=0, sum=0;
1943 
1944 	if (!fp->f_op || !fp->f_op->read)
1945 		return -EPERM;
1946 
1947 	while(sum<len) {
1948 		rlen=fp->f_op->read(fp,buf+sum,len-sum, &fp->f_pos);
1949 		if(rlen>0)
1950 			sum+=rlen;
1951 		else if(0 != rlen)
1952 			return rlen;
1953 		else
1954 			break;
1955 	}
1956 
1957 	return  sum;
1958 
1959 }
1960 
writeFile(struct file * fp,char * buf,int len)1961 static int writeFile(struct file *fp,char *buf,int len)
1962 {
1963 	int wlen=0, sum=0;
1964 
1965 	if (!fp->f_op || !fp->f_op->write)
1966 		return -EPERM;
1967 
1968 	while(sum<len) {
1969 		wlen=fp->f_op->write(fp,buf+sum,len-sum, &fp->f_pos);
1970 		if(wlen>0)
1971 			sum+=wlen;
1972 		else if(0 != wlen)
1973 			return wlen;
1974 		else
1975 			break;
1976 	}
1977 
1978 	return sum;
1979 
1980 }
1981 
1982 /*
1983 * Test if the specifi @param path is a file and readable
1984 * @param path the path of the file to test
1985 * @return Linux specific error code
1986 */
isFileReadable(char * path)1987 static int isFileReadable(char *path)
1988 {
1989 	struct file *fp;
1990 	int ret = 0;
1991 	mm_segment_t oldfs;
1992 	char buf;
1993 
1994 	fp=filp_open(path, O_RDONLY, 0);
1995 	if(IS_ERR(fp)) {
1996 		ret = PTR_ERR(fp);
1997 	}
1998 	else {
1999 		oldfs = get_fs(); set_fs(KERNEL_DS);
2000 
2001 		if(1!=readFile(fp, &buf, 1))
2002 			ret = PTR_ERR(fp);
2003 
2004 		set_fs(oldfs);
2005 		filp_close(fp,NULL);
2006 	}
2007 	return ret;
2008 }
2009 
2010 /*
2011 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2012 * @param path the path of the file to open and read
2013 * @param buf the starting address of the buffer to store file content
2014 * @param sz how many bytes to read at most
2015 * @return the byte we've read, or Linux specific error code
2016 */
retriveFromFile(char * path,u8 * buf,u32 sz)2017 static int retriveFromFile(char *path, u8* buf, u32 sz)
2018 {
2019 	int ret =-1;
2020 	mm_segment_t oldfs;
2021 	struct file *fp;
2022 
2023 	if(path && buf) {
2024 		if( 0 == (ret=openFile(&fp,path, O_RDONLY, 0)) ){
2025 			DBG_871X("%s openFile path:%s fp=%p\n",__FUNCTION__, path ,fp);
2026 
2027 			oldfs = get_fs(); set_fs(KERNEL_DS);
2028 			ret=readFile(fp, buf, sz);
2029 			set_fs(oldfs);
2030 			closeFile(fp);
2031 
2032 			DBG_871X("%s readFile, ret:%d\n",__FUNCTION__, ret);
2033 
2034 		} else {
2035 			DBG_871X("%s openFile path:%s Fail, ret:%d\n",__FUNCTION__, path, ret);
2036 		}
2037 	} else {
2038 		DBG_871X("%s NULL pointer\n",__FUNCTION__);
2039 		ret =  -EINVAL;
2040 	}
2041 	return ret;
2042 }
2043 
2044 /*
2045 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2046 * @param path the path of the file to open and write
2047 * @param buf the starting address of the data to write into file
2048 * @param sz how many bytes to write at most
2049 * @return the byte we've written, or Linux specific error code
2050 */
storeToFile(char * path,u8 * buf,u32 sz)2051 static int storeToFile(char *path, u8* buf, u32 sz)
2052 {
2053 	int ret =0;
2054 	mm_segment_t oldfs;
2055 	struct file *fp;
2056 
2057 	if(path && buf) {
2058 		if( 0 == (ret=openFile(&fp, path, O_CREAT|O_WRONLY, 0666)) ) {
2059 			DBG_871X("%s openFile path:%s fp=%p\n",__FUNCTION__, path ,fp);
2060 
2061 			oldfs = get_fs(); set_fs(KERNEL_DS);
2062 			ret=writeFile(fp, buf, sz);
2063 			set_fs(oldfs);
2064 			closeFile(fp);
2065 
2066 			DBG_871X("%s writeFile, ret:%d\n",__FUNCTION__, ret);
2067 
2068 		} else {
2069 			DBG_871X("%s openFile path:%s Fail, ret:%d\n",__FUNCTION__, path, ret);
2070 		}
2071 	} else {
2072 		DBG_871X("%s NULL pointer\n",__FUNCTION__);
2073 		ret =  -EINVAL;
2074 	}
2075 	return ret;
2076 }
2077 #endif //PLATFORM_LINUX
2078 
2079 /*
2080 * Test if the specifi @param path is a file and readable
2081 * @param path the path of the file to test
2082 * @return _TRUE or _FALSE
2083 */
rtw_is_file_readable(char * path)2084 int rtw_is_file_readable(char *path)
2085 {
2086 #ifdef PLATFORM_LINUX
2087 	if(isFileReadable(path) == 0)
2088 		return _TRUE;
2089 	else
2090 		return _FALSE;
2091 #else
2092 	//Todo...
2093 	return _FALSE;
2094 #endif
2095 }
2096 
2097 /*
2098 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2099 * @param path the path of the file to open and read
2100 * @param buf the starting address of the buffer to store file content
2101 * @param sz how many bytes to read at most
2102 * @return the byte we've read
2103 */
rtw_retrieve_from_file(char * path,u8 * buf,u32 sz)2104 int rtw_retrieve_from_file(char *path, u8 *buf, u32 sz)
2105 {
2106 #ifdef PLATFORM_LINUX
2107 	int ret =retriveFromFile(path, buf, sz);
2108 	return ret>=0?ret:0;
2109 #else
2110 	//Todo...
2111 	return 0;
2112 #endif
2113 }
2114 
2115 /*
2116 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2117 * @param path the path of the file to open and write
2118 * @param buf the starting address of the data to write into file
2119 * @param sz how many bytes to write at most
2120 * @return the byte we've written
2121 */
rtw_store_to_file(char * path,u8 * buf,u32 sz)2122 int rtw_store_to_file(char *path, u8* buf, u32 sz)
2123 {
2124 #ifdef PLATFORM_LINUX
2125 	int ret =storeToFile(path, buf, sz);
2126 	return ret>=0?ret:0;
2127 #else
2128 	//Todo...
2129 	return 0;
2130 #endif
2131 }
2132 
2133 #ifdef PLATFORM_LINUX
rtw_alloc_etherdev_with_old_priv(int sizeof_priv,void * old_priv)2134 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
2135 {
2136 	struct net_device *pnetdev;
2137 	struct rtw_netdev_priv_indicator *pnpi;
2138 
2139 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
2140 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2141 #else
2142 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2143 #endif
2144 	if (!pnetdev)
2145 		goto RETURN;
2146 
2147 	pnpi = netdev_priv(pnetdev);
2148 	pnpi->priv=old_priv;
2149 	pnpi->sizeof_priv=sizeof_priv;
2150 
2151 RETURN:
2152 	return pnetdev;
2153 }
2154 
rtw_alloc_etherdev(int sizeof_priv)2155 struct net_device *rtw_alloc_etherdev(int sizeof_priv)
2156 {
2157 	struct net_device *pnetdev;
2158 	struct rtw_netdev_priv_indicator *pnpi;
2159 
2160 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
2161 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2162 #else
2163 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2164 #endif
2165 	if (!pnetdev)
2166 		goto RETURN;
2167 
2168 	pnpi = netdev_priv(pnetdev);
2169 
2170 	pnpi->priv = rtw_zvmalloc(sizeof_priv);
2171 	if (!pnpi->priv) {
2172 		free_netdev(pnetdev);
2173 		pnetdev = NULL;
2174 		goto RETURN;
2175 	}
2176 
2177 	pnpi->sizeof_priv=sizeof_priv;
2178 RETURN:
2179 	return pnetdev;
2180 }
2181 
rtw_free_netdev(struct net_device * netdev)2182 void rtw_free_netdev(struct net_device * netdev)
2183 {
2184 	struct rtw_netdev_priv_indicator *pnpi;
2185 
2186 	if(!netdev)
2187 		goto RETURN;
2188 
2189 	pnpi = netdev_priv(netdev);
2190 
2191 	if(!pnpi->priv)
2192 		goto RETURN;
2193 
2194 	free_netdev(netdev);
2195 
2196 RETURN:
2197 	return;
2198 }
2199 
2200 /*
2201 * Jeff: this function should be called under ioctl (rtnl_lock is accquired) while
2202 * LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
2203 */
rtw_change_ifname(_adapter * padapter,const char * ifname)2204 int rtw_change_ifname(_adapter *padapter, const char *ifname)
2205 {
2206 	struct net_device *pnetdev;
2207 	struct net_device *cur_pnetdev;
2208 	struct rereg_nd_name_data *rereg_priv;
2209 	int ret;
2210 
2211 	if(!padapter)
2212 		goto error;
2213 
2214 	cur_pnetdev = padapter->pnetdev;
2215 	rereg_priv = &padapter->rereg_nd_name_priv;
2216 
2217 	//free the old_pnetdev
2218 	if(rereg_priv->old_pnetdev) {
2219 		free_netdev(rereg_priv->old_pnetdev);
2220 		rereg_priv->old_pnetdev = NULL;
2221 	}
2222 
2223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2224 	if(!rtnl_is_locked())
2225 		unregister_netdev(cur_pnetdev);
2226 	else
2227 #endif
2228 		unregister_netdevice(cur_pnetdev);
2229 
2230 	rereg_priv->old_pnetdev=cur_pnetdev;
2231 
2232 	pnetdev = rtw_init_netdev(padapter);
2233 	if (!pnetdev)  {
2234 		ret = -1;
2235 		goto error;
2236 	}
2237 
2238 	SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
2239 
2240 	rtw_init_netdev_name(pnetdev, ifname);
2241 
2242 	_rtw_memcpy(pnetdev->dev_addr, adapter_mac_addr(padapter), ETH_ALEN);
2243 
2244 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
2245 	if(!rtnl_is_locked())
2246 		ret = register_netdev(pnetdev);
2247 	else
2248 #endif
2249 		ret = register_netdevice(pnetdev);
2250 
2251 	if ( ret != 0) {
2252 		RT_TRACE(_module_hci_intfs_c_,_drv_err_,("register_netdev() failed\n"));
2253 		goto error;
2254 	}
2255 
2256 	return 0;
2257 
2258 error:
2259 
2260 	return -1;
2261 
2262 }
2263 #endif
2264 
2265 #ifdef PLATFORM_FREEBSD
2266 /*
2267  * Copy a buffer from userspace and write into kernel address
2268  * space.
2269  *
2270  * This emulation just calls the FreeBSD copyin function (to
2271  * copy data from user space buffer into a kernel space buffer)
2272  * and is designed to be used with the above io_write_wrapper.
2273  *
2274  * This function should return the number of bytes not copied.
2275  * I.e. success results in a zero value.
2276  * Negative error values are not returned.
2277  */
2278 unsigned long
copy_from_user(void * to,const void * from,unsigned long n)2279 copy_from_user(void *to, const void *from, unsigned long n)
2280 {
2281         if ( copyin(from, to, n) != 0 ) {
2282                 /* Any errors will be treated as a failure
2283                    to copy any of the requested bytes */
2284                 return n;
2285         }
2286 
2287         return 0;
2288 }
2289 
2290 unsigned long
copy_to_user(void * to,const void * from,unsigned long n)2291 copy_to_user(void *to, const void *from, unsigned long n)
2292 {
2293 	if ( copyout(from, to, n) != 0 ) {
2294 		/* Any errors will be treated as a failure
2295 		   to copy any of the requested bytes */
2296 		return n;
2297 	}
2298 
2299 	return 0;
2300 }
2301 
2302 
2303 /*
2304  * The usb_register and usb_deregister functions are used to register
2305  * usb drivers with the usb subsystem. In this compatibility layer
2306  * emulation a list of drivers (struct usb_driver) is maintained
2307  * and is used for probing/attaching etc.
2308  *
2309  * usb_register and usb_deregister simply call these functions.
2310  */
2311 int
usb_register(struct usb_driver * driver)2312 usb_register(struct usb_driver *driver)
2313 {
2314         rtw_usb_linux_register(driver);
2315         return 0;
2316 }
2317 
2318 
2319 int
usb_deregister(struct usb_driver * driver)2320 usb_deregister(struct usb_driver *driver)
2321 {
2322         rtw_usb_linux_deregister(driver);
2323         return 0;
2324 }
2325 
module_init_exit_wrapper(void * arg)2326 void module_init_exit_wrapper(void *arg)
2327 {
2328         int (*func)(void) = arg;
2329         func();
2330         return;
2331 }
2332 
2333 #endif //PLATFORM_FREEBSD
2334 
2335 #ifdef CONFIG_PLATFORM_SPRD
2336 #ifdef do_div
2337 #undef do_div
2338 #endif
2339 #include <asm-generic/div64.h>
2340 #endif
2341 
rtw_modular64(u64 x,u64 y)2342 u64 rtw_modular64(u64 x, u64 y)
2343 {
2344 #ifdef PLATFORM_LINUX
2345 	return do_div(x, y);
2346 #elif defined(PLATFORM_WINDOWS)
2347 	return (x % y);
2348 #elif defined(PLATFORM_FREEBSD)
2349 	return (x %y);
2350 #endif
2351 }
2352 
rtw_division64(u64 x,u64 y)2353 u64 rtw_division64(u64 x, u64 y)
2354 {
2355 #ifdef PLATFORM_LINUX
2356 	do_div(x, y);
2357 	return x;
2358 #elif defined(PLATFORM_WINDOWS)
2359 	return (x / y);
2360 #elif defined(PLATFORM_FREEBSD)
2361 	return (x / y);
2362 #endif
2363 }
2364 
rtw_random32(void)2365 inline u32 rtw_random32(void)
2366 {
2367 #ifdef PLATFORM_LINUX
2368 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
2369 	return prandom_u32();
2370 	#elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18))
2371 	u32 random_int;
2372 	get_random_bytes( &random_int , 4 );
2373 	return random_int;
2374 	#else
2375 	return random32();
2376 	#endif
2377 #elif defined(PLATFORM_WINDOWS)
2378 	#error "to be implemented\n"
2379 #elif defined(PLATFORM_FREEBSD)
2380 	#error "to be implemented\n"
2381 #endif
2382 }
2383 
rtw_buf_free(u8 ** buf,u32 * buf_len)2384 void rtw_buf_free(u8 **buf, u32 *buf_len)
2385 {
2386 	u32 ori_len;
2387 
2388 	if (!buf || !buf_len)
2389 		return;
2390 
2391 	ori_len = *buf_len;
2392 
2393 	if (*buf) {
2394 		u32 tmp_buf_len = *buf_len;
2395 		*buf_len = 0;
2396 		rtw_mfree(*buf, tmp_buf_len);
2397 		*buf = NULL;
2398 	}
2399 }
2400 
rtw_buf_update(u8 ** buf,u32 * buf_len,u8 * src,u32 src_len)2401 void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
2402 {
2403 	u32 ori_len = 0, dup_len = 0;
2404 	u8 *ori = NULL;
2405 	u8 *dup = NULL;
2406 
2407 	if (!buf || !buf_len)
2408 		return;
2409 
2410 	if (!src || !src_len)
2411 		goto keep_ori;
2412 
2413 	/* duplicate src */
2414 	dup = rtw_malloc(src_len);
2415 	if (dup) {
2416 		dup_len = src_len;
2417 		_rtw_memcpy(dup, src, dup_len);
2418 	}
2419 
2420 keep_ori:
2421 	ori = *buf;
2422 	ori_len = *buf_len;
2423 
2424 	/* replace buf with dup */
2425 	*buf_len = 0;
2426 	*buf = dup;
2427 	*buf_len = dup_len;
2428 
2429 	/* free ori */
2430 	if (ori && ori_len > 0)
2431 		rtw_mfree(ori, ori_len);
2432 }
2433 
2434 
2435 /**
2436  * rtw_cbuf_full - test if cbuf is full
2437  * @cbuf: pointer of struct rtw_cbuf
2438  *
2439  * Returns: _TRUE if cbuf is full
2440  */
rtw_cbuf_full(struct rtw_cbuf * cbuf)2441 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
2442 {
2443 	return (cbuf->write == cbuf->read-1)? _TRUE : _FALSE;
2444 }
2445 
2446 /**
2447  * rtw_cbuf_empty - test if cbuf is empty
2448  * @cbuf: pointer of struct rtw_cbuf
2449  *
2450  * Returns: _TRUE if cbuf is empty
2451  */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)2452 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
2453 {
2454 	return (cbuf->write == cbuf->read)? _TRUE : _FALSE;
2455 }
2456 
2457 /**
2458  * rtw_cbuf_push - push a pointer into cbuf
2459  * @cbuf: pointer of struct rtw_cbuf
2460  * @buf: pointer to push in
2461  *
2462  * Lock free operation, be careful of the use scheme
2463  * Returns: _TRUE push success
2464  */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)2465 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
2466 {
2467 	if (rtw_cbuf_full(cbuf))
2468 		return _FAIL;
2469 
2470 	if (0)
2471 		DBG_871X("%s on %u\n", __func__, cbuf->write);
2472 	cbuf->bufs[cbuf->write] = buf;
2473 	cbuf->write = (cbuf->write+1)%cbuf->size;
2474 
2475 	return _SUCCESS;
2476 }
2477 
2478 /**
2479  * rtw_cbuf_pop - pop a pointer from cbuf
2480  * @cbuf: pointer of struct rtw_cbuf
2481  *
2482  * Lock free operation, be careful of the use scheme
2483  * Returns: pointer popped out
2484  */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)2485 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
2486 {
2487 	void *buf;
2488 	if (rtw_cbuf_empty(cbuf))
2489 		return NULL;
2490 
2491 	if (0)
2492 		DBG_871X("%s on %u\n", __func__, cbuf->read);
2493 	buf = cbuf->bufs[cbuf->read];
2494 	cbuf->read = (cbuf->read+1)%cbuf->size;
2495 
2496 	return buf;
2497 }
2498 
2499 /**
2500  * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
2501  * @size: size of pointer
2502  *
2503  * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
2504  */
rtw_cbuf_alloc(u32 size)2505 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
2506 {
2507 	struct rtw_cbuf *cbuf;
2508 
2509 	cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void*)*size);
2510 
2511 	if (cbuf) {
2512 		cbuf->write = cbuf->read = 0;
2513 		cbuf->size = size;
2514 	}
2515 
2516 	return cbuf;
2517 }
2518 
2519 /**
2520  * rtw_cbuf_free - free the given rtw_cbuf
2521  * @cbuf: pointer of struct rtw_cbuf to free
2522  */
rtw_cbuf_free(struct rtw_cbuf * cbuf)2523 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
2524 {
2525 	rtw_mfree((u8*)cbuf, sizeof(*cbuf) + sizeof(void*)*cbuf->size);
2526 }
2527 
2528