xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8188fu/os_dep/osdep_service.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2017 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 
16 
17 #define _OSDEP_SERVICE_C_
18 
19 #include <drv_types.h>
20 
21 #define RT_TAG	'1178'
22 
23 #ifdef DBG_MEMORY_LEAK
24 #ifdef PLATFORM_LINUX
25 atomic_t _malloc_cnt = ATOMIC_INIT(0);
26 atomic_t _malloc_size = ATOMIC_INIT(0);
27 #endif
28 #endif /* DBG_MEMORY_LEAK */
29 
30 
31 #if defined(PLATFORM_LINUX)
32 /*
33 * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
34 * @return: one of RTW_STATUS_CODE
35 */
RTW_STATUS_CODE(int error_code)36 inline int RTW_STATUS_CODE(int error_code)
37 {
38 	if (error_code >= 0)
39 		return _SUCCESS;
40 
41 	switch (error_code) {
42 	/* case -ETIMEDOUT: */
43 	/*	return RTW_STATUS_TIMEDOUT; */
44 	default:
45 		return _FAIL;
46 	}
47 }
48 #else
RTW_STATUS_CODE(int error_code)49 inline int RTW_STATUS_CODE(int error_code)
50 {
51 	return error_code;
52 }
53 #endif
54 
rtw_atoi(u8 * s)55 u32 rtw_atoi(u8 *s)
56 {
57 
58 	int num = 0, flag = 0;
59 	int i;
60 	for (i = 0; i <= strlen(s); i++) {
61 		if (s[i] >= '0' && s[i] <= '9')
62 			num = num * 10 + s[i] - '0';
63 		else if (s[0] == '-' && i == 0)
64 			flag = 1;
65 		else
66 			break;
67 	}
68 
69 	if (flag == 1)
70 		num = num * -1;
71 
72 	return num;
73 
74 }
75 
_rtw_vmalloc(u32 sz)76 inline void *_rtw_vmalloc(u32 sz)
77 {
78 	void *pbuf;
79 #ifdef PLATFORM_LINUX
80 	pbuf = vmalloc(sz);
81 #endif
82 #ifdef PLATFORM_FREEBSD
83 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
84 #endif
85 
86 #ifdef PLATFORM_WINDOWS
87 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
88 #endif
89 
90 #ifdef DBG_MEMORY_LEAK
91 #ifdef PLATFORM_LINUX
92 	if (pbuf != NULL) {
93 		atomic_inc(&_malloc_cnt);
94 		atomic_add(sz, &_malloc_size);
95 	}
96 #endif
97 #endif /* DBG_MEMORY_LEAK */
98 
99 	return pbuf;
100 }
101 
_rtw_zvmalloc(u32 sz)102 inline void *_rtw_zvmalloc(u32 sz)
103 {
104 	void *pbuf;
105 #ifdef PLATFORM_LINUX
106 	pbuf = _rtw_vmalloc(sz);
107 	if (pbuf != NULL)
108 		memset(pbuf, 0, sz);
109 #endif
110 #ifdef PLATFORM_FREEBSD
111 	pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
112 #endif
113 #ifdef PLATFORM_WINDOWS
114 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
115 	if (pbuf != NULL)
116 		NdisFillMemory(pbuf, sz, 0);
117 #endif
118 
119 	return pbuf;
120 }
121 
_rtw_vmfree(void * pbuf,u32 sz)122 inline void _rtw_vmfree(void *pbuf, u32 sz)
123 {
124 #ifdef PLATFORM_LINUX
125 	vfree(pbuf);
126 #endif
127 #ifdef PLATFORM_FREEBSD
128 	free(pbuf, M_DEVBUF);
129 #endif
130 #ifdef PLATFORM_WINDOWS
131 	NdisFreeMemory(pbuf, sz, 0);
132 #endif
133 
134 #ifdef DBG_MEMORY_LEAK
135 #ifdef PLATFORM_LINUX
136 	atomic_dec(&_malloc_cnt);
137 	atomic_sub(sz, &_malloc_size);
138 #endif
139 #endif /* DBG_MEMORY_LEAK */
140 }
141 
_rtw_malloc(u32 sz)142 void *_rtw_malloc(u32 sz)
143 {
144 	void *pbuf = NULL;
145 
146 #ifdef PLATFORM_LINUX
147 #ifdef RTK_DMP_PLATFORM
148 	if (sz > 0x4000)
149 		pbuf = dvr_malloc(sz);
150 	else
151 #endif
152 		pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
153 
154 #endif
155 #ifdef PLATFORM_FREEBSD
156 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
157 #endif
158 #ifdef PLATFORM_WINDOWS
159 
160 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
161 
162 #endif
163 
164 #ifdef DBG_MEMORY_LEAK
165 #ifdef PLATFORM_LINUX
166 	if (pbuf != NULL) {
167 		atomic_inc(&_malloc_cnt);
168 		atomic_add(sz, &_malloc_size);
169 	}
170 #endif
171 #endif /* DBG_MEMORY_LEAK */
172 
173 	return pbuf;
174 
175 }
176 
177 
_rtw_zmalloc(u32 sz)178 void *_rtw_zmalloc(u32 sz)
179 {
180 #ifdef PLATFORM_FREEBSD
181 	return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
182 #else /* PLATFORM_FREEBSD */
183 	void *pbuf = _rtw_malloc(sz);
184 
185 	if (pbuf != NULL) {
186 
187 #ifdef PLATFORM_LINUX
188 		memset(pbuf, 0, sz);
189 #endif
190 
191 #ifdef PLATFORM_WINDOWS
192 		NdisFillMemory(pbuf, sz, 0);
193 #endif
194 
195 	}
196 
197 	return pbuf;
198 #endif /* PLATFORM_FREEBSD */
199 }
200 
_rtw_mfree(void * pbuf,u32 sz)201 void _rtw_mfree(void *pbuf, u32 sz)
202 {
203 
204 #ifdef PLATFORM_LINUX
205 #ifdef RTK_DMP_PLATFORM
206 	if (sz > 0x4000)
207 		dvr_free(pbuf);
208 	else
209 #endif
210 		kfree(pbuf);
211 
212 #endif
213 #ifdef PLATFORM_FREEBSD
214 	free(pbuf, M_DEVBUF);
215 #endif
216 #ifdef PLATFORM_WINDOWS
217 
218 	NdisFreeMemory(pbuf, sz, 0);
219 
220 #endif
221 
222 #ifdef DBG_MEMORY_LEAK
223 #ifdef PLATFORM_LINUX
224 	atomic_dec(&_malloc_cnt);
225 	atomic_sub(sz, &_malloc_size);
226 #endif
227 #endif /* DBG_MEMORY_LEAK */
228 
229 }
230 
231 #ifdef PLATFORM_FREEBSD
232 /* review again */
dev_alloc_skb(unsigned int size)233 struct sk_buff *dev_alloc_skb(unsigned int size)
234 {
235 	struct sk_buff *skb = NULL;
236 	u8 *data = NULL;
237 
238 	/* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
239 	skb = _rtw_malloc(sizeof(struct sk_buff));
240 	if (!skb)
241 		goto out;
242 	data = _rtw_malloc(size);
243 	if (!data)
244 		goto nodata;
245 
246 	skb->head = (unsigned char *)data;
247 	skb->data = (unsigned char *)data;
248 	skb->tail = (unsigned char *)data;
249 	skb->end = (unsigned char *)data + size;
250 	skb->len = 0;
251 	/* printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); */
252 
253 out:
254 	return skb;
255 nodata:
256 	_rtw_mfree(skb, sizeof(struct sk_buff));
257 	skb = NULL;
258 	goto out;
259 
260 }
261 
dev_kfree_skb_any(struct sk_buff * skb)262 void dev_kfree_skb_any(struct sk_buff *skb)
263 {
264 	/* printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); */
265 	if (skb->head)
266 		_rtw_mfree(skb->head, 0);
267 	/* printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); */
268 	if (skb)
269 		_rtw_mfree(skb, 0);
270 }
skb_clone(const struct sk_buff * skb)271 struct sk_buff *skb_clone(const struct sk_buff *skb)
272 {
273 	return NULL;
274 }
275 
276 #endif /* PLATFORM_FREEBSD */
277 
_rtw_skb_alloc(u32 sz)278 inline struct sk_buff *_rtw_skb_alloc(u32 sz)
279 {
280 #ifdef PLATFORM_LINUX
281 	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
282 #endif /* PLATFORM_LINUX */
283 
284 #ifdef PLATFORM_FREEBSD
285 	return dev_alloc_skb(sz);
286 #endif /* PLATFORM_FREEBSD */
287 }
288 
_rtw_skb_free(struct sk_buff * skb)289 inline void _rtw_skb_free(struct sk_buff *skb)
290 {
291 	dev_kfree_skb_any(skb);
292 }
293 
_rtw_skb_copy(const struct sk_buff * skb)294 inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
295 {
296 #ifdef PLATFORM_LINUX
297 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
298 #endif /* PLATFORM_LINUX */
299 
300 #ifdef PLATFORM_FREEBSD
301 	return NULL;
302 #endif /* PLATFORM_FREEBSD */
303 }
304 
_rtw_skb_clone(struct sk_buff * skb)305 inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
306 {
307 #ifdef PLATFORM_LINUX
308 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
309 #endif /* PLATFORM_LINUX */
310 
311 #ifdef PLATFORM_FREEBSD
312 	return skb_clone(skb);
313 #endif /* PLATFORM_FREEBSD */
314 }
_rtw_pskb_copy(struct sk_buff * skb)315 inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
316 {
317 #ifdef PLATFORM_LINUX
318 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
319 	return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
320 #else
321 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
322 #endif
323 #endif /* PLATFORM_LINUX */
324 
325 #ifdef PLATFORM_FREEBSD
326 	return NULL;
327 #endif /* PLATFORM_FREEBSD */
328 }
329 
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)330 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
331 {
332 #if defined(PLATFORM_LINUX)
333 	skb->dev = ndev;
334 	return netif_rx(skb);
335 #elif defined(PLATFORM_FREEBSD)
336 	return (*ndev->if_input)(ndev, skb);
337 #else
338 	rtw_warn_on(1);
339 	return -1;
340 #endif
341 }
342 
343 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)344 inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
345 {
346 #if defined(PLATFORM_LINUX)
347 	skb->dev = ndev;
348 	return netif_receive_skb(skb);
349 #else
350 	rtw_warn_on(1);
351 	return -1;
352 #endif
353 }
354 
355 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)356 inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
357 {
358 #if defined(PLATFORM_LINUX)
359 	return napi_gro_receive(napi, skb);
360 #else
361 	rtw_warn_on(1);
362 	return -1;
363 #endif
364 }
365 #endif /* CONFIG_RTW_GRO */
366 #endif /* CONFIG_RTW_NAPI */
367 
_rtw_skb_queue_purge(struct sk_buff_head * list)368 void _rtw_skb_queue_purge(struct sk_buff_head *list)
369 {
370 	struct sk_buff *skb;
371 
372 	while ((skb = skb_dequeue(list)) != NULL)
373 		_rtw_skb_free(skb);
374 }
375 
376 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)377 inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
378 {
379 #ifdef PLATFORM_LINUX
380 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
381 	return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
382 #else
383 	return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
384 #endif
385 #endif /* PLATFORM_LINUX */
386 
387 #ifdef PLATFORM_FREEBSD
388 	return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
389 #endif /* PLATFORM_FREEBSD */
390 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)391 inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
392 {
393 #ifdef PLATFORM_LINUX
394 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
395 	usb_free_coherent(dev, size, addr, dma);
396 #else
397 	usb_buffer_free(dev, size, addr, dma);
398 #endif
399 #endif /* PLATFORM_LINUX */
400 
401 #ifdef PLATFORM_FREEBSD
402 	free(addr, M_USBDEV);
403 #endif /* PLATFORM_FREEBSD */
404 }
405 #endif /* CONFIG_USB_HCI */
406 
407 #if defined(DBG_MEM_ALLOC)
408 
409 struct rtw_mem_stat {
410 	ATOMIC_T alloc; /* the memory bytes we allocate currently */
411 	ATOMIC_T peak; /* the peak memory bytes we allocate */
412 	ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
413 	ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
414 };
415 
416 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
417 #ifdef RTW_MEM_FUNC_STAT
418 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
419 #endif
420 
421 char *MSTAT_TYPE_str[] = {
422 	"VIR",
423 	"PHY",
424 	"SKB",
425 	"USB",
426 };
427 
428 #ifdef RTW_MEM_FUNC_STAT
429 char *MSTAT_FUNC_str[] = {
430 	"UNSP",
431 	"IO",
432 	"TXIO",
433 	"RXIO",
434 	"TX",
435 	"RX",
436 };
437 #endif
438 
rtw_mstat_dump(void * sel)439 void rtw_mstat_dump(void *sel)
440 {
441 	int i;
442 	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
443 #ifdef RTW_MEM_FUNC_STAT
444 	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
445 #endif
446 
447 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
448 		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
449 		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
450 		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
451 		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
452 	}
453 
454 #ifdef RTW_MEM_FUNC_STAT
455 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
456 		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
457 		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
458 		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
459 		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
460 	}
461 #endif
462 
463 	RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
464 	RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
465 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
466 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
467 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
468 #ifdef RTW_MEM_FUNC_STAT
469 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
470 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
471 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
472 #endif
473 }
474 
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)475 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
476 {
477 	static systime update_time = 0;
478 	int peak, alloc;
479 	int i;
480 
481 	/* initialization */
482 	if (!update_time) {
483 		for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
484 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
485 			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
486 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
487 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
488 		}
489 		#ifdef RTW_MEM_FUNC_STAT
490 		for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
491 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
492 			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
493 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
494 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
495 		}
496 		#endif
497 	}
498 
499 	switch (status) {
500 	case MSTAT_ALLOC_SUCCESS:
501 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
502 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
503 		peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
504 		if (peak < alloc)
505 			ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
506 
507 		#ifdef RTW_MEM_FUNC_STAT
508 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
509 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
510 		peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
511 		if (peak < alloc)
512 			ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
513 		#endif
514 		break;
515 
516 	case MSTAT_ALLOC_FAIL:
517 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
518 		#ifdef RTW_MEM_FUNC_STAT
519 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
520 		#endif
521 		break;
522 
523 	case MSTAT_FREE:
524 		ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
525 		ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
526 		#ifdef RTW_MEM_FUNC_STAT
527 		ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
528 		ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
529 		#endif
530 		break;
531 	};
532 
533 	/* if (rtw_get_passing_time_ms(update_time) > 5000) { */
534 	/*	rtw_mstat_dump(RTW_DBGDUMP); */
535 	update_time = rtw_get_current_time();
536 	/* } */
537 }
538 
539 #ifndef SIZE_MAX
540 	#define SIZE_MAX (~(size_t)0)
541 #endif
542 
543 struct mstat_sniff_rule {
544 	enum mstat_f flags;
545 	size_t lb;
546 	size_t hb;
547 };
548 
549 struct mstat_sniff_rule mstat_sniff_rules[] = {
550 	{MSTAT_TYPE_PHY, 4097, SIZE_MAX},
551 };
552 
553 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
554 
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)555 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
556 {
557 	int i;
558 	for (i = 0; i < mstat_sniff_rule_num; i++) {
559 		if (mstat_sniff_rules[i].flags == flags
560 			&& mstat_sniff_rules[i].lb <= size
561 			&& mstat_sniff_rules[i].hb >= size)
562 			return _TRUE;
563 	}
564 
565 	return _FALSE;
566 }
567 
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)568 inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
569 {
570 	void *p;
571 
572 	if (match_mstat_sniff_rules(flags, sz))
573 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
574 
575 	p = _rtw_vmalloc((sz));
576 
577 	rtw_mstat_update(
578 		flags
579 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
580 		, sz
581 	);
582 
583 	return p;
584 }
585 
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)586 inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
587 {
588 	void *p;
589 
590 	if (match_mstat_sniff_rules(flags, sz))
591 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
592 
593 	p = _rtw_zvmalloc((sz));
594 
595 	rtw_mstat_update(
596 		flags
597 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
598 		, sz
599 	);
600 
601 	return p;
602 }
603 
dbg_rtw_vmfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)604 inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
605 {
606 
607 	if (match_mstat_sniff_rules(flags, sz))
608 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
609 
610 	_rtw_vmfree((pbuf), (sz));
611 
612 	rtw_mstat_update(
613 		flags
614 		, MSTAT_FREE
615 		, sz
616 	);
617 }
618 
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)619 inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
620 {
621 	void *p;
622 
623 	if (match_mstat_sniff_rules(flags, sz))
624 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
625 
626 	p = _rtw_malloc((sz));
627 
628 	rtw_mstat_update(
629 		flags
630 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
631 		, sz
632 	);
633 
634 	return p;
635 }
636 
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)637 inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
638 {
639 	void *p;
640 
641 	if (match_mstat_sniff_rules(flags, sz))
642 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
643 
644 	p = _rtw_zmalloc((sz));
645 
646 	rtw_mstat_update(
647 		flags
648 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
649 		, sz
650 	);
651 
652 	return p;
653 }
654 
dbg_rtw_mfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)655 inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
656 {
657 	if (match_mstat_sniff_rules(flags, sz))
658 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
659 
660 	_rtw_mfree((pbuf), (sz));
661 
662 	rtw_mstat_update(
663 		flags
664 		, MSTAT_FREE
665 		, sz
666 	);
667 }
668 
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)669 inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
670 {
671 	struct sk_buff *skb;
672 	unsigned int truesize = 0;
673 
674 	skb = _rtw_skb_alloc(size);
675 
676 	if (skb)
677 		truesize = skb->truesize;
678 
679 	if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
680 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
681 
682 	rtw_mstat_update(
683 		flags
684 		, skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
685 		, truesize
686 	);
687 
688 	return skb;
689 }
690 
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)691 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
692 {
693 	unsigned int truesize = skb->truesize;
694 
695 	if (match_mstat_sniff_rules(flags, truesize))
696 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
697 
698 	_rtw_skb_free(skb);
699 
700 	rtw_mstat_update(
701 		flags
702 		, MSTAT_FREE
703 		, truesize
704 	);
705 }
706 
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)707 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
708 {
709 	struct sk_buff *skb_cp;
710 	unsigned int truesize = skb->truesize;
711 	unsigned int cp_truesize = 0;
712 
713 	skb_cp = _rtw_skb_copy(skb);
714 	if (skb_cp)
715 		cp_truesize = skb_cp->truesize;
716 
717 	if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
718 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
719 
720 	rtw_mstat_update(
721 		flags
722 		, skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
723 		, cp_truesize
724 	);
725 
726 	return skb_cp;
727 }
728 
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)729 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
730 {
731 	struct sk_buff *skb_cl;
732 	unsigned int truesize = skb->truesize;
733 	unsigned int cl_truesize = 0;
734 
735 	skb_cl = _rtw_skb_clone(skb);
736 	if (skb_cl)
737 		cl_truesize = skb_cl->truesize;
738 
739 	if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
740 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
741 
742 	rtw_mstat_update(
743 		flags
744 		, skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
745 		, cl_truesize
746 	);
747 
748 	return skb_cl;
749 }
750 
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)751 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
752 {
753 	int ret;
754 	unsigned int truesize = skb->truesize;
755 
756 	if (match_mstat_sniff_rules(flags, truesize))
757 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
758 
759 	ret = _rtw_netif_rx(ndev, skb);
760 
761 	rtw_mstat_update(
762 		flags
763 		, MSTAT_FREE
764 		, truesize
765 	);
766 
767 	return ret;
768 }
769 
770 #ifdef CONFIG_RTW_NAPI
dbg_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)771 inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
772 {
773 	int ret;
774 	unsigned int truesize = skb->truesize;
775 
776 	if (match_mstat_sniff_rules(flags, truesize))
777 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
778 
779 	ret = _rtw_netif_receive_skb(ndev, skb);
780 
781 	rtw_mstat_update(
782 		flags
783 		, MSTAT_FREE
784 		, truesize
785 	);
786 
787 	return ret;
788 }
789 
790 #ifdef CONFIG_RTW_GRO
dbg_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)791 inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
792 {
793 	int ret;
794 	unsigned int truesize = skb->truesize;
795 
796 	if (match_mstat_sniff_rules(flags, truesize))
797 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
798 
799 	ret = _rtw_napi_gro_receive(napi, skb);
800 
801 	rtw_mstat_update(
802 		flags
803 		, MSTAT_FREE
804 		, truesize
805 	);
806 
807 	return ret;
808 }
809 #endif /* CONFIG_RTW_GRO */
810 #endif /* CONFIG_RTW_NAPI */
811 
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)812 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
813 {
814 	struct sk_buff *skb;
815 
816 	while ((skb = skb_dequeue(list)) != NULL)
817 		dbg_rtw_skb_free(skb, flags, func, line);
818 }
819 
820 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)821 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
822 {
823 	void *p;
824 
825 	if (match_mstat_sniff_rules(flags, size))
826 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
827 
828 	p = _rtw_usb_buffer_alloc(dev, size, dma);
829 
830 	rtw_mstat_update(
831 		flags
832 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
833 		, size
834 	);
835 
836 	return p;
837 }
838 
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)839 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
840 {
841 
842 	if (match_mstat_sniff_rules(flags, size))
843 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
844 
845 	_rtw_usb_buffer_free(dev, size, addr, dma);
846 
847 	rtw_mstat_update(
848 		flags
849 		, MSTAT_FREE
850 		, size
851 	);
852 }
853 #endif /* CONFIG_USB_HCI */
854 
855 #endif /* defined(DBG_MEM_ALLOC) */
856 
rtw_malloc2d(int h,int w,size_t size)857 void *rtw_malloc2d(int h, int w, size_t size)
858 {
859 	int j;
860 
861 	void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
862 	if (a == NULL) {
863 		RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
864 		return NULL;
865 	}
866 
867 	for (j = 0; j < h; j++)
868 		a[j] = ((char *)(a + h)) + j * w * size;
869 
870 	return a;
871 }
872 
rtw_mfree2d(void * pbuf,int h,int w,int size)873 void rtw_mfree2d(void *pbuf, int h, int w, int size)
874 {
875 	rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
876 }
877 
rtw_os_pkt_free(_pkt * pkt)878 inline void rtw_os_pkt_free(_pkt *pkt)
879 {
880 #if defined(PLATFORM_LINUX)
881 	rtw_skb_free(pkt);
882 #elif defined(PLATFORM_FREEBSD)
883 	m_freem(pkt);
884 #else
885 	#error "TBD\n"
886 #endif
887 }
888 
rtw_os_pkt_copy(_pkt * pkt)889 inline _pkt *rtw_os_pkt_copy(_pkt *pkt)
890 {
891 #if defined(PLATFORM_LINUX)
892 	return rtw_skb_copy(pkt);
893 #elif defined(PLATFORM_FREEBSD)
894 	return m_dup(pkt, M_NOWAIT);
895 #else
896 	#error "TBD\n"
897 #endif
898 }
899 
rtw_os_pkt_data(_pkt * pkt)900 inline void *rtw_os_pkt_data(_pkt *pkt)
901 {
902 #if defined(PLATFORM_LINUX)
903 	return pkt->data;
904 #elif defined(PLATFORM_FREEBSD)
905 	return pkt->m_data;
906 #else
907 	#error "TBD\n"
908 #endif
909 }
910 
rtw_os_pkt_len(_pkt * pkt)911 inline u32 rtw_os_pkt_len(_pkt *pkt)
912 {
913 #if defined(PLATFORM_LINUX)
914 	return pkt->len;
915 #elif defined(PLATFORM_FREEBSD)
916 	return pkt->m_pkthdr.len;
917 #else
918 	#error "TBD\n"
919 #endif
920 }
921 
_rtw_memcpy(void * dst,const void * src,u32 sz)922 void _rtw_memcpy(void *dst, const void *src, u32 sz)
923 {
924 
925 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
926 
927 	memcpy(dst, src, sz);
928 
929 #endif
930 
931 #ifdef PLATFORM_WINDOWS
932 
933 	NdisMoveMemory(dst, src, sz);
934 
935 #endif
936 
937 }
938 
_rtw_memmove(void * dst,const void * src,u32 sz)939 inline void _rtw_memmove(void *dst, const void *src, u32 sz)
940 {
941 #if defined(PLATFORM_LINUX)
942 	memmove(dst, src, sz);
943 #else
944 	#error "TBD\n"
945 #endif
946 }
947 
_rtw_memcmp(const void * dst,const void * src,u32 sz)948 int	_rtw_memcmp(const void *dst, const void *src, u32 sz)
949 {
950 
951 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
952 	/* under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 */
953 
954 	if (!(memcmp(dst, src, sz)))
955 		return _TRUE;
956 	else
957 		return _FALSE;
958 #endif
959 
960 
961 #ifdef PLATFORM_WINDOWS
962 	/* under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 */
963 
964 	if (NdisEqualMemory(dst, src, sz))
965 		return _TRUE;
966 	else
967 		return _FALSE;
968 
969 #endif
970 
971 
972 
973 }
974 
_rtw_memcmp2(const void * dst,const void * src,u32 sz)975 int _rtw_memcmp2(const void *dst, const void *src, u32 sz)
976 {
977 	const unsigned char *p1 = dst, *p2 = src;
978 
979 	if (sz == 0)
980 		return 0;
981 
982 	while (*p1 == *p2) {
983 		p1++;
984 		p2++;
985 		sz--;
986 		if (sz == 0)
987 			return 0;
988 	}
989 
990 	return *p1 - *p2;
991 }
992 
_rtw_memset(void * pbuf,int c,u32 sz)993 void _rtw_memset(void *pbuf, int c, u32 sz)
994 {
995 
996 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
997 
998 	memset(pbuf, c, sz);
999 
1000 #endif
1001 
1002 #ifdef PLATFORM_WINDOWS
1003 #if 0
1004 	NdisZeroMemory(pbuf, sz);
1005 	if (c != 0)
1006 		memset(pbuf, c, sz);
1007 #else
1008 	NdisFillMemory(pbuf, sz, c);
1009 #endif
1010 #endif
1011 
1012 }
1013 
1014 #ifdef PLATFORM_FREEBSD
__list_add(_list * pnew,_list * pprev,_list * pnext)1015 static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
1016 {
1017 	pnext->prev = pnew;
1018 	pnew->next = pnext;
1019 	pnew->prev = pprev;
1020 	pprev->next = pnew;
1021 }
1022 #endif /* PLATFORM_FREEBSD */
1023 
1024 
_rtw_init_listhead(_list * list)1025 void _rtw_init_listhead(_list *list)
1026 {
1027 
1028 #ifdef PLATFORM_LINUX
1029 
1030 	INIT_LIST_HEAD(list);
1031 
1032 #endif
1033 
1034 #ifdef PLATFORM_FREEBSD
1035 	list->next = list;
1036 	list->prev = list;
1037 #endif
1038 #ifdef PLATFORM_WINDOWS
1039 
1040 	NdisInitializeListHead(list);
1041 
1042 #endif
1043 
1044 }
1045 
1046 
1047 /*
1048 For the following list_xxx operations,
1049 caller must guarantee the atomic context.
1050 Otherwise, there will be racing condition.
1051 */
rtw_is_list_empty(_list * phead)1052 u32	rtw_is_list_empty(_list *phead)
1053 {
1054 
1055 #ifdef PLATFORM_LINUX
1056 
1057 	if (list_empty(phead))
1058 		return _TRUE;
1059 	else
1060 		return _FALSE;
1061 
1062 #endif
1063 #ifdef PLATFORM_FREEBSD
1064 
1065 	if (phead->next == phead)
1066 		return _TRUE;
1067 	else
1068 		return _FALSE;
1069 
1070 #endif
1071 
1072 
1073 #ifdef PLATFORM_WINDOWS
1074 
1075 	if (IsListEmpty(phead))
1076 		return _TRUE;
1077 	else
1078 		return _FALSE;
1079 
1080 #endif
1081 
1082 
1083 }
1084 
rtw_list_insert_head(_list * plist,_list * phead)1085 void rtw_list_insert_head(_list *plist, _list *phead)
1086 {
1087 
1088 #ifdef PLATFORM_LINUX
1089 	list_add(plist, phead);
1090 #endif
1091 
1092 #ifdef PLATFORM_FREEBSD
1093 	__list_add(plist, phead, phead->next);
1094 #endif
1095 
1096 #ifdef PLATFORM_WINDOWS
1097 	InsertHeadList(phead, plist);
1098 #endif
1099 }
1100 
rtw_list_insert_tail(_list * plist,_list * phead)1101 void rtw_list_insert_tail(_list *plist, _list *phead)
1102 {
1103 
1104 #ifdef PLATFORM_LINUX
1105 
1106 	list_add_tail(plist, phead);
1107 
1108 #endif
1109 #ifdef PLATFORM_FREEBSD
1110 
1111 	__list_add(plist, phead->prev, phead);
1112 
1113 #endif
1114 #ifdef PLATFORM_WINDOWS
1115 
1116 	InsertTailList(phead, plist);
1117 
1118 #endif
1119 
1120 }
1121 
rtw_list_splice(_list * list,_list * head)1122 inline void rtw_list_splice(_list *list, _list *head)
1123 {
1124 #ifdef PLATFORM_LINUX
1125 	list_splice(list, head);
1126 #else
1127 	#error "TBD\n"
1128 #endif
1129 }
1130 
rtw_list_splice_init(_list * list,_list * head)1131 inline void rtw_list_splice_init(_list *list, _list *head)
1132 {
1133 #ifdef PLATFORM_LINUX
1134 	list_splice_init(list, head);
1135 #else
1136 	#error "TBD\n"
1137 #endif
1138 }
1139 
rtw_list_splice_tail(_list * list,_list * head)1140 inline void rtw_list_splice_tail(_list *list, _list *head)
1141 {
1142 #ifdef PLATFORM_LINUX
1143 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
1144 	if (!list_empty(list))
1145 		__list_splice(list, head);
1146 	#else
1147 	list_splice_tail(list, head);
1148 	#endif
1149 #else
1150 	#error "TBD\n"
1151 #endif
1152 }
1153 
rtw_hlist_head_init(rtw_hlist_head * h)1154 inline void rtw_hlist_head_init(rtw_hlist_head *h)
1155 {
1156 #ifdef PLATFORM_LINUX
1157 	INIT_HLIST_HEAD(h);
1158 #else
1159 	#error "TBD\n"
1160 #endif
1161 }
1162 
rtw_hlist_add_head(rtw_hlist_node * n,rtw_hlist_head * h)1163 inline void rtw_hlist_add_head(rtw_hlist_node *n, rtw_hlist_head *h)
1164 {
1165 #ifdef PLATFORM_LINUX
1166 	hlist_add_head(n, h);
1167 #else
1168 	#error "TBD\n"
1169 #endif
1170 }
1171 
rtw_hlist_del(rtw_hlist_node * n)1172 inline void rtw_hlist_del(rtw_hlist_node *n)
1173 {
1174 #ifdef PLATFORM_LINUX
1175 	hlist_del(n);
1176 #else
1177 	#error "TBD\n"
1178 #endif
1179 }
1180 
rtw_hlist_add_head_rcu(rtw_hlist_node * n,rtw_hlist_head * h)1181 inline void rtw_hlist_add_head_rcu(rtw_hlist_node *n, rtw_hlist_head *h)
1182 {
1183 #ifdef PLATFORM_LINUX
1184 	hlist_add_head_rcu(n, h);
1185 #else
1186 	#error "TBD\n"
1187 #endif
1188 }
1189 
rtw_hlist_del_rcu(rtw_hlist_node * n)1190 inline void rtw_hlist_del_rcu(rtw_hlist_node *n)
1191 {
1192 #ifdef PLATFORM_LINUX
1193 	hlist_del_rcu(n);
1194 #else
1195 	#error "TBD\n"
1196 #endif
1197 }
1198 
rtw_init_timer(_timer * ptimer,void * padapter,void * pfunc,void * ctx)1199 void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc, void *ctx)
1200 {
1201 	_adapter *adapter = (_adapter *)padapter;
1202 
1203 #ifdef PLATFORM_LINUX
1204 	_init_timer(ptimer, adapter->pnetdev, pfunc, ctx);
1205 #endif
1206 #ifdef PLATFORM_FREEBSD
1207 	_init_timer(ptimer, adapter->pifp, pfunc, ctx);
1208 #endif
1209 #ifdef PLATFORM_WINDOWS
1210 	_init_timer(ptimer, adapter->hndis_adapter, pfunc, ctx);
1211 #endif
1212 }
1213 
1214 /*
1215 
1216 Caller must check if the list is empty before calling rtw_list_delete
1217 
1218 */
1219 
1220 
_rtw_init_sema(_sema * sema,int init_val)1221 void _rtw_init_sema(_sema	*sema, int init_val)
1222 {
1223 
1224 #ifdef PLATFORM_LINUX
1225 
1226 	sema_init(sema, init_val);
1227 
1228 #endif
1229 #ifdef PLATFORM_FREEBSD
1230 	sema_init(sema, init_val, "rtw_drv");
1231 #endif
1232 #ifdef PLATFORM_OS_XP
1233 
1234 	KeInitializeSemaphore(sema, init_val,  SEMA_UPBND); /* count=0; */
1235 
1236 #endif
1237 
1238 #ifdef PLATFORM_OS_CE
1239 	if (*sema == NULL)
1240 		*sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1241 #endif
1242 
1243 }
1244 
_rtw_free_sema(_sema * sema)1245 void _rtw_free_sema(_sema	*sema)
1246 {
1247 #ifdef PLATFORM_FREEBSD
1248 	sema_destroy(sema);
1249 #endif
1250 #ifdef PLATFORM_OS_CE
1251 	CloseHandle(*sema);
1252 #endif
1253 
1254 }
1255 
_rtw_up_sema(_sema * sema)1256 void _rtw_up_sema(_sema	*sema)
1257 {
1258 
1259 #ifdef PLATFORM_LINUX
1260 
1261 	up(sema);
1262 
1263 #endif
1264 #ifdef PLATFORM_FREEBSD
1265 	sema_post(sema);
1266 #endif
1267 #ifdef PLATFORM_OS_XP
1268 
1269 	KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1,  FALSE);
1270 
1271 #endif
1272 
1273 #ifdef PLATFORM_OS_CE
1274 	ReleaseSemaphore(*sema,  1,  NULL);
1275 #endif
1276 }
1277 
_rtw_down_sema(_sema * sema)1278 u32 _rtw_down_sema(_sema *sema)
1279 {
1280 
1281 #ifdef PLATFORM_LINUX
1282 
1283 	if (down_interruptible(sema))
1284 		return _FAIL;
1285 	else
1286 		return _SUCCESS;
1287 
1288 #endif
1289 #ifdef PLATFORM_FREEBSD
1290 	sema_wait(sema);
1291 	return  _SUCCESS;
1292 #endif
1293 #ifdef PLATFORM_OS_XP
1294 
1295 	if (STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1296 		return  _SUCCESS;
1297 	else
1298 		return _FAIL;
1299 #endif
1300 
1301 #ifdef PLATFORM_OS_CE
1302 	if (WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE))
1303 		return _SUCCESS;
1304 	else
1305 		return _FAIL;
1306 #endif
1307 }
1308 
thread_exit(_completion * comp)1309 inline void thread_exit(_completion *comp)
1310 {
1311 #ifdef PLATFORM_LINUX
1312 	complete_and_exit(comp, 0);
1313 #endif
1314 
1315 #ifdef PLATFORM_FREEBSD
1316 	printf("%s", "RTKTHREAD_exit");
1317 #endif
1318 
1319 #ifdef PLATFORM_OS_CE
1320 	ExitThread(STATUS_SUCCESS);
1321 #endif
1322 
1323 #ifdef PLATFORM_OS_XP
1324 	PsTerminateSystemThread(STATUS_SUCCESS);
1325 #endif
1326 }
1327 
_rtw_init_completion(_completion * comp)1328 inline void _rtw_init_completion(_completion *comp)
1329 {
1330 #ifdef PLATFORM_LINUX
1331 	init_completion(comp);
1332 #endif
1333 }
_rtw_wait_for_comp_timeout(_completion * comp)1334 inline void _rtw_wait_for_comp_timeout(_completion *comp)
1335 {
1336 #ifdef PLATFORM_LINUX
1337 	wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1338 #endif
1339 }
_rtw_wait_for_comp(_completion * comp)1340 inline void _rtw_wait_for_comp(_completion *comp)
1341 {
1342 #ifdef PLATFORM_LINUX
1343 	wait_for_completion(comp);
1344 #endif
1345 }
1346 
_rtw_mutex_init(_mutex * pmutex)1347 void	_rtw_mutex_init(_mutex *pmutex)
1348 {
1349 #ifdef PLATFORM_LINUX
1350 
1351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1352 	mutex_init(pmutex);
1353 #else
1354 	init_MUTEX(pmutex);
1355 #endif
1356 
1357 #endif
1358 #ifdef PLATFORM_FREEBSD
1359 	mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
1360 #endif
1361 #ifdef PLATFORM_OS_XP
1362 
1363 	KeInitializeMutex(pmutex, 0);
1364 
1365 #endif
1366 
1367 #ifdef PLATFORM_OS_CE
1368 	*pmutex =  CreateMutex(NULL, _FALSE, NULL);
1369 #endif
1370 }
1371 
1372 void	_rtw_mutex_free(_mutex *pmutex);
_rtw_mutex_free(_mutex * pmutex)1373 void	_rtw_mutex_free(_mutex *pmutex)
1374 {
1375 #ifdef PLATFORM_LINUX
1376 
1377 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1378 	mutex_destroy(pmutex);
1379 #else
1380 #endif
1381 
1382 #ifdef PLATFORM_FREEBSD
1383 	sema_destroy(pmutex);
1384 #endif
1385 
1386 #endif
1387 
1388 #ifdef PLATFORM_OS_XP
1389 
1390 #endif
1391 
1392 #ifdef PLATFORM_OS_CE
1393 
1394 #endif
1395 }
1396 
_rtw_spinlock_init(_lock * plock)1397 void	_rtw_spinlock_init(_lock *plock)
1398 {
1399 
1400 #ifdef PLATFORM_LINUX
1401 
1402 	spin_lock_init(plock);
1403 
1404 #endif
1405 #ifdef PLATFORM_FREEBSD
1406 	mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
1407 #endif
1408 #ifdef PLATFORM_WINDOWS
1409 
1410 	NdisAllocateSpinLock(plock);
1411 
1412 #endif
1413 
1414 }
1415 
_rtw_spinlock_free(_lock * plock)1416 void	_rtw_spinlock_free(_lock *plock)
1417 {
1418 #ifdef PLATFORM_FREEBSD
1419 	mtx_destroy(plock);
1420 #endif
1421 
1422 #ifdef PLATFORM_WINDOWS
1423 
1424 	NdisFreeSpinLock(plock);
1425 
1426 #endif
1427 
1428 }
1429 #ifdef PLATFORM_FREEBSD
1430 extern PADAPTER prtw_lock;
1431 
rtw_mtx_lock(_lock * plock)1432 void rtw_mtx_lock(_lock *plock)
1433 {
1434 	if (prtw_lock)
1435 		mtx_lock(&prtw_lock->glock);
1436 	else
1437 		printf("%s prtw_lock==NULL", __FUNCTION__);
1438 }
rtw_mtx_unlock(_lock * plock)1439 void rtw_mtx_unlock(_lock *plock)
1440 {
1441 	if (prtw_lock)
1442 		mtx_unlock(&prtw_lock->glock);
1443 	else
1444 		printf("%s prtw_lock==NULL", __FUNCTION__);
1445 
1446 }
1447 #endif /* PLATFORM_FREEBSD */
1448 
1449 
_rtw_spinlock(_lock * plock)1450 void	_rtw_spinlock(_lock	*plock)
1451 {
1452 
1453 #ifdef PLATFORM_LINUX
1454 
1455 	spin_lock(plock);
1456 
1457 #endif
1458 #ifdef PLATFORM_FREEBSD
1459 	mtx_lock(plock);
1460 #endif
1461 #ifdef PLATFORM_WINDOWS
1462 
1463 	NdisAcquireSpinLock(plock);
1464 
1465 #endif
1466 
1467 }
1468 
_rtw_spinunlock(_lock * plock)1469 void	_rtw_spinunlock(_lock *plock)
1470 {
1471 
1472 #ifdef PLATFORM_LINUX
1473 
1474 	spin_unlock(plock);
1475 
1476 #endif
1477 #ifdef PLATFORM_FREEBSD
1478 	mtx_unlock(plock);
1479 #endif
1480 #ifdef PLATFORM_WINDOWS
1481 
1482 	NdisReleaseSpinLock(plock);
1483 
1484 #endif
1485 }
1486 
1487 
_rtw_spinlock_ex(_lock * plock)1488 void	_rtw_spinlock_ex(_lock	*plock)
1489 {
1490 
1491 #ifdef PLATFORM_LINUX
1492 
1493 	spin_lock(plock);
1494 
1495 #endif
1496 #ifdef PLATFORM_FREEBSD
1497 	mtx_lock(plock);
1498 #endif
1499 #ifdef PLATFORM_WINDOWS
1500 
1501 	NdisDprAcquireSpinLock(plock);
1502 
1503 #endif
1504 
1505 }
1506 
_rtw_spinunlock_ex(_lock * plock)1507 void	_rtw_spinunlock_ex(_lock *plock)
1508 {
1509 
1510 #ifdef PLATFORM_LINUX
1511 
1512 	spin_unlock(plock);
1513 
1514 #endif
1515 #ifdef PLATFORM_FREEBSD
1516 	mtx_unlock(plock);
1517 #endif
1518 #ifdef PLATFORM_WINDOWS
1519 
1520 	NdisDprReleaseSpinLock(plock);
1521 
1522 #endif
1523 }
1524 
1525 
1526 
_rtw_init_queue(_queue * pqueue)1527 void _rtw_init_queue(_queue *pqueue)
1528 {
1529 	_rtw_init_listhead(&(pqueue->queue));
1530 	_rtw_spinlock_init(&(pqueue->lock));
1531 }
1532 
_rtw_deinit_queue(_queue * pqueue)1533 void _rtw_deinit_queue(_queue *pqueue)
1534 {
1535 	_rtw_spinlock_free(&(pqueue->lock));
1536 }
1537 
_rtw_queue_empty(_queue * pqueue)1538 u32	  _rtw_queue_empty(_queue	*pqueue)
1539 {
1540 	return rtw_is_list_empty(&(pqueue->queue));
1541 }
1542 
1543 
rtw_end_of_queue_search(_list * head,_list * plist)1544 u32 rtw_end_of_queue_search(_list *head, _list *plist)
1545 {
1546 	if (head == plist)
1547 		return _TRUE;
1548 	else
1549 		return _FALSE;
1550 }
1551 
1552 
_rtw_get_current_time(void)1553 systime _rtw_get_current_time(void)
1554 {
1555 
1556 #ifdef PLATFORM_LINUX
1557 	return jiffies;
1558 #endif
1559 #ifdef PLATFORM_FREEBSD
1560 	struct timeval tvp;
1561 	getmicrotime(&tvp);
1562 	return tvp.tv_sec;
1563 #endif
1564 #ifdef PLATFORM_WINDOWS
1565 	LARGE_INTEGER	SystemTime;
1566 	NdisGetCurrentSystemTime(&SystemTime);
1567 	return SystemTime.LowPart;/* count of 100-nanosecond intervals */
1568 #endif
1569 }
1570 
_rtw_systime_to_ms(systime stime)1571 inline u32 _rtw_systime_to_ms(systime stime)
1572 {
1573 #ifdef PLATFORM_LINUX
1574 	return jiffies_to_msecs(stime);
1575 #endif
1576 #ifdef PLATFORM_FREEBSD
1577 	return stime * 1000;
1578 #endif
1579 #ifdef PLATFORM_WINDOWS
1580 	return stime / 10000 ;
1581 #endif
1582 }
1583 
_rtw_ms_to_systime(u32 ms)1584 inline systime _rtw_ms_to_systime(u32 ms)
1585 {
1586 #ifdef PLATFORM_LINUX
1587 	return msecs_to_jiffies(ms);
1588 #endif
1589 #ifdef PLATFORM_FREEBSD
1590 	return ms / 1000;
1591 #endif
1592 #ifdef PLATFORM_WINDOWS
1593 	return ms * 10000 ;
1594 #endif
1595 }
1596 
_rtw_us_to_systime(u32 us)1597 inline systime _rtw_us_to_systime(u32 us)
1598 {
1599 #ifdef PLATFORM_LINUX
1600 	return usecs_to_jiffies(us);
1601 #else
1602 	#error "TBD\n"
1603 #endif
1604 }
1605 
1606 /* the input parameter start use the same unit as returned by rtw_get_current_time */
_rtw_get_passing_time_ms(systime start)1607 inline s32 _rtw_get_passing_time_ms(systime start)
1608 {
1609 	return _rtw_systime_to_ms(_rtw_get_current_time() - start);
1610 }
1611 
_rtw_get_remaining_time_ms(systime end)1612 inline s32 _rtw_get_remaining_time_ms(systime end)
1613 {
1614 	return _rtw_systime_to_ms(end - _rtw_get_current_time());
1615 }
1616 
_rtw_get_time_interval_ms(systime start,systime end)1617 inline s32 _rtw_get_time_interval_ms(systime start, systime end)
1618 {
1619 	return _rtw_systime_to_ms(end - start);
1620 }
1621 
_rtw_time_after(systime a,systime b)1622 inline bool _rtw_time_after(systime a, systime b)
1623 {
1624 #ifdef PLATFORM_LINUX
1625 	return time_after(a, b);
1626 #else
1627 	#error "TBD\n"
1628 #endif
1629 }
1630 
rtw_sleep_schedulable(int ms)1631 void rtw_sleep_schedulable(int ms)
1632 {
1633 
1634 #ifdef PLATFORM_LINUX
1635 
1636 	u32 delta;
1637 
1638 	delta = (ms * HZ) / 1000; /* (ms) */
1639 	if (delta == 0) {
1640 		delta = 1;/* 1 ms */
1641 	}
1642 	set_current_state(TASK_INTERRUPTIBLE);
1643         schedule_timeout(delta);
1644 	return;
1645 
1646 #endif
1647 #ifdef PLATFORM_FREEBSD
1648 	DELAY(ms * 1000);
1649 	return ;
1650 #endif
1651 
1652 #ifdef PLATFORM_WINDOWS
1653 
1654 	NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
1655 
1656 #endif
1657 
1658 }
1659 
1660 
rtw_msleep_os(int ms)1661 void rtw_msleep_os(int ms)
1662 {
1663 
1664 #ifdef PLATFORM_LINUX
1665 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1666 	if (ms < 20) {
1667 		unsigned long us = ms * 1000UL;
1668 		usleep_range(us, us + 1000UL);
1669 	} else
1670 #endif
1671 		msleep((unsigned int)ms);
1672 
1673 #endif
1674 #ifdef PLATFORM_FREEBSD
1675 	/* Delay for delay microseconds */
1676 	DELAY(ms * 1000);
1677 	return ;
1678 #endif
1679 #ifdef PLATFORM_WINDOWS
1680 
1681 	NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
1682 
1683 #endif
1684 
1685 
1686 }
rtw_usleep_os(int us)1687 void rtw_usleep_os(int us)
1688 {
1689 #ifdef PLATFORM_LINUX
1690 
1691 	/* msleep((unsigned int)us); */
1692 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1693 	usleep_range(us, us + 1);
1694 #else
1695 	if (1 < (us / 1000))
1696 		msleep(1);
1697 	else
1698 		msleep((us / 1000) + 1);
1699 #endif
1700 #endif
1701 
1702 #ifdef PLATFORM_FREEBSD
1703 	/* Delay for delay microseconds */
1704 	DELAY(us);
1705 
1706 	return ;
1707 #endif
1708 #ifdef PLATFORM_WINDOWS
1709 
1710 	NdisMSleep(us); /* (us) */
1711 
1712 #endif
1713 
1714 
1715 }
1716 
1717 
1718 #ifdef DBG_DELAY_OS
_rtw_mdelay_os(int ms,const char * func,const int line)1719 void _rtw_mdelay_os(int ms, const char *func, const int line)
1720 {
1721 #if 0
1722 	if (ms > 10)
1723 		RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1724 	rtw_msleep_os(ms);
1725 	return;
1726 #endif
1727 
1728 
1729 	RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1730 
1731 #if defined(PLATFORM_LINUX)
1732 
1733 	mdelay((unsigned long)ms);
1734 
1735 #elif defined(PLATFORM_WINDOWS)
1736 
1737 	NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
1738 
1739 #endif
1740 
1741 
1742 }
_rtw_udelay_os(int us,const char * func,const int line)1743 void _rtw_udelay_os(int us, const char *func, const int line)
1744 {
1745 
1746 #if 0
1747 	if (us > 1000) {
1748 		RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1749 		rtw_usleep_os(us);
1750 		return;
1751 	}
1752 #endif
1753 
1754 
1755 	RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1756 
1757 
1758 #if defined(PLATFORM_LINUX)
1759 
1760 	udelay((unsigned long)us);
1761 
1762 #elif defined(PLATFORM_WINDOWS)
1763 
1764 	NdisStallExecution(us); /* (us) */
1765 
1766 #endif
1767 
1768 }
1769 #else
rtw_mdelay_os(int ms)1770 void rtw_mdelay_os(int ms)
1771 {
1772 
1773 #ifdef PLATFORM_LINUX
1774 
1775 	mdelay((unsigned long)ms);
1776 
1777 #endif
1778 #ifdef PLATFORM_FREEBSD
1779 	DELAY(ms * 1000);
1780 	return ;
1781 #endif
1782 #ifdef PLATFORM_WINDOWS
1783 
1784 	NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
1785 
1786 #endif
1787 
1788 
1789 }
rtw_udelay_os(int us)1790 void rtw_udelay_os(int us)
1791 {
1792 
1793 #ifdef PLATFORM_LINUX
1794 
1795 	udelay((unsigned long)us);
1796 
1797 #endif
1798 #ifdef PLATFORM_FREEBSD
1799 	/* Delay for delay microseconds */
1800 	DELAY(us);
1801 	return ;
1802 #endif
1803 #ifdef PLATFORM_WINDOWS
1804 
1805 	NdisStallExecution(us); /* (us) */
1806 
1807 #endif
1808 
1809 }
1810 #endif
1811 
rtw_yield_os(void)1812 void rtw_yield_os(void)
1813 {
1814 #ifdef PLATFORM_LINUX
1815 	yield();
1816 #endif
1817 #ifdef PLATFORM_FREEBSD
1818 	yield();
1819 #endif
1820 #ifdef PLATFORM_WINDOWS
1821 	SwitchToThread();
1822 #endif
1823 }
1824 
1825 const char *_rtw_pwait_type_str[] = {
1826 	[RTW_PWAIT_TYPE_MSLEEP] = "MS",
1827 	[RTW_PWAIT_TYPE_USLEEP] = "US",
1828 	[RTW_PWAIT_TYPE_YIELD] = "Y",
1829 	[RTW_PWAIT_TYPE_MDELAY] = "MD",
1830 	[RTW_PWAIT_TYPE_UDELAY] = "UD",
1831 	[RTW_PWAIT_TYPE_NUM] = "unknown",
1832 };
1833 
rtw_pwctx_yield(int us)1834 static void rtw_pwctx_yield(int us)
1835 {
1836 	rtw_yield_os();
1837 }
1838 
1839 static void (*const rtw_pwait_hdl[])(int)= {
1840 	[RTW_PWAIT_TYPE_MSLEEP] = rtw_msleep_os,
1841 	[RTW_PWAIT_TYPE_USLEEP] = rtw_usleep_os,
1842 	[RTW_PWAIT_TYPE_YIELD] = rtw_pwctx_yield,
1843 	[RTW_PWAIT_TYPE_MDELAY] = rtw_mdelay_os,
1844 	[RTW_PWAIT_TYPE_UDELAY] = rtw_udelay_os,
1845 };
1846 
rtw_pwctx_config(struct rtw_pwait_ctx * pwctx,enum rtw_pwait_type type,s32 time,s32 cnt_lmt)1847 int rtw_pwctx_config(struct rtw_pwait_ctx *pwctx, enum rtw_pwait_type type, s32 time, s32 cnt_lmt)
1848 {
1849 	int ret = _FAIL;
1850 
1851 	if (!RTW_PWAIT_TYPE_VALID(type))
1852 		goto exit;
1853 
1854 	pwctx->conf.type = type;
1855 	pwctx->conf.wait_time = time;
1856 	pwctx->conf.wait_cnt_lmt = cnt_lmt;
1857 	pwctx->wait_hdl = rtw_pwait_hdl[type];
1858 
1859 	ret = _SUCCESS;
1860 
1861 exit:
1862 	return ret;
1863 }
1864 
rtw_macaddr_is_larger(const u8 * a,const u8 * b)1865 bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
1866 {
1867 	u32 va, vb;
1868 
1869 	va = be32_to_cpu(*((u32 *)a));
1870 	vb = be32_to_cpu(*((u32 *)b));
1871 	if (va > vb)
1872 		return 1;
1873 	else if (va < vb)
1874 		return 0;
1875 
1876 	return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
1877 }
1878 
1879 #define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
1880 #define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
1881 #define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
1882 #ifdef CONFIG_WAKELOCK
1883 static struct wake_lock rtw_suspend_lock;
1884 static struct wake_lock rtw_suspend_traffic_lock;
1885 static struct wake_lock rtw_suspend_resume_lock;
1886 #elif defined(CONFIG_ANDROID_POWER)
1887 static android_suspend_lock_t rtw_suspend_lock = {
1888 	.name = RTW_SUSPEND_LOCK_NAME
1889 };
1890 static android_suspend_lock_t rtw_suspend_traffic_lock = {
1891 	.name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
1892 };
1893 static android_suspend_lock_t rtw_suspend_resume_lock = {
1894 	.name = RTW_SUSPEND_RESUME_LOCK_NAME
1895 };
1896 #endif
1897 
rtw_suspend_lock_init(void)1898 inline void rtw_suspend_lock_init(void)
1899 {
1900 #ifdef CONFIG_WAKELOCK
1901 	wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
1902 	wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
1903 	wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
1904 #elif defined(CONFIG_ANDROID_POWER)
1905 	android_init_suspend_lock(&rtw_suspend_lock);
1906 	android_init_suspend_lock(&rtw_suspend_traffic_lock);
1907 	android_init_suspend_lock(&rtw_suspend_resume_lock);
1908 #endif
1909 }
1910 
rtw_suspend_lock_uninit(void)1911 inline void rtw_suspend_lock_uninit(void)
1912 {
1913 #ifdef CONFIG_WAKELOCK
1914 	wake_lock_destroy(&rtw_suspend_lock);
1915 	wake_lock_destroy(&rtw_suspend_traffic_lock);
1916 	wake_lock_destroy(&rtw_suspend_resume_lock);
1917 #elif defined(CONFIG_ANDROID_POWER)
1918 	android_uninit_suspend_lock(&rtw_suspend_lock);
1919 	android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
1920 	android_uninit_suspend_lock(&rtw_suspend_resume_lock);
1921 #endif
1922 }
1923 
rtw_lock_suspend(void)1924 inline void rtw_lock_suspend(void)
1925 {
1926 #ifdef CONFIG_WAKELOCK
1927 	wake_lock(&rtw_suspend_lock);
1928 #elif defined(CONFIG_ANDROID_POWER)
1929 	android_lock_suspend(&rtw_suspend_lock);
1930 #endif
1931 
1932 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1933 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1934 #endif
1935 }
1936 
rtw_unlock_suspend(void)1937 inline void rtw_unlock_suspend(void)
1938 {
1939 #ifdef CONFIG_WAKELOCK
1940 	wake_unlock(&rtw_suspend_lock);
1941 #elif defined(CONFIG_ANDROID_POWER)
1942 	android_unlock_suspend(&rtw_suspend_lock);
1943 #endif
1944 
1945 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1946 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1947 #endif
1948 }
1949 
rtw_resume_lock_suspend(void)1950 inline void rtw_resume_lock_suspend(void)
1951 {
1952 #ifdef CONFIG_WAKELOCK
1953 	wake_lock(&rtw_suspend_resume_lock);
1954 #elif defined(CONFIG_ANDROID_POWER)
1955 	android_lock_suspend(&rtw_suspend_resume_lock);
1956 #endif
1957 
1958 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1959 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1960 #endif
1961 }
1962 
rtw_resume_unlock_suspend(void)1963 inline void rtw_resume_unlock_suspend(void)
1964 {
1965 #ifdef CONFIG_WAKELOCK
1966 	wake_unlock(&rtw_suspend_resume_lock);
1967 #elif defined(CONFIG_ANDROID_POWER)
1968 	android_unlock_suspend(&rtw_suspend_resume_lock);
1969 #endif
1970 
1971 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
1972 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
1973 #endif
1974 }
1975 
rtw_lock_suspend_timeout(u32 timeout_ms)1976 inline void rtw_lock_suspend_timeout(u32 timeout_ms)
1977 {
1978 #ifdef CONFIG_WAKELOCK
1979 	wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
1980 #elif defined(CONFIG_ANDROID_POWER)
1981 	android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
1982 #endif
1983 }
1984 
1985 
rtw_lock_traffic_suspend_timeout(u32 timeout_ms)1986 inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
1987 {
1988 #ifdef CONFIG_WAKELOCK
1989 	wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
1990 #elif defined(CONFIG_ANDROID_POWER)
1991 	android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
1992 #endif
1993 	/* RTW_INFO("traffic lock timeout:%d\n", timeout_ms); */
1994 }
1995 
rtw_set_bit(int nr,unsigned long * addr)1996 inline void rtw_set_bit(int nr, unsigned long *addr)
1997 {
1998 #ifdef PLATFORM_LINUX
1999 	set_bit(nr, addr);
2000 #else
2001 	#error "TBD\n";
2002 #endif
2003 }
2004 
rtw_clear_bit(int nr,unsigned long * addr)2005 inline void rtw_clear_bit(int nr, unsigned long *addr)
2006 {
2007 #ifdef PLATFORM_LINUX
2008 	clear_bit(nr, addr);
2009 #else
2010 	#error "TBD\n";
2011 #endif
2012 }
2013 
rtw_test_and_clear_bit(int nr,unsigned long * addr)2014 inline int rtw_test_and_clear_bit(int nr, unsigned long *addr)
2015 {
2016 #ifdef PLATFORM_LINUX
2017 	return test_and_clear_bit(nr, addr);
2018 #else
2019 	#error "TBD\n";
2020 #endif
2021 }
2022 
ATOMIC_SET(ATOMIC_T * v,int i)2023 inline void ATOMIC_SET(ATOMIC_T *v, int i)
2024 {
2025 #ifdef PLATFORM_LINUX
2026 	atomic_set(v, i);
2027 #elif defined(PLATFORM_WINDOWS)
2028 	*v = i; /* other choice???? */
2029 #elif defined(PLATFORM_FREEBSD)
2030 	atomic_set_int(v, i);
2031 #endif
2032 }
2033 
ATOMIC_READ(ATOMIC_T * v)2034 inline int ATOMIC_READ(ATOMIC_T *v)
2035 {
2036 #ifdef PLATFORM_LINUX
2037 	return atomic_read(v);
2038 #elif defined(PLATFORM_WINDOWS)
2039 	return *v; /* other choice???? */
2040 #elif defined(PLATFORM_FREEBSD)
2041 	return atomic_load_acq_32(v);
2042 #endif
2043 }
2044 
ATOMIC_ADD(ATOMIC_T * v,int i)2045 inline void ATOMIC_ADD(ATOMIC_T *v, int i)
2046 {
2047 #ifdef PLATFORM_LINUX
2048 	atomic_add(i, v);
2049 #elif defined(PLATFORM_WINDOWS)
2050 	InterlockedAdd(v, i);
2051 #elif defined(PLATFORM_FREEBSD)
2052 	atomic_add_int(v, i);
2053 #endif
2054 }
ATOMIC_SUB(ATOMIC_T * v,int i)2055 inline void ATOMIC_SUB(ATOMIC_T *v, int i)
2056 {
2057 #ifdef PLATFORM_LINUX
2058 	atomic_sub(i, v);
2059 #elif defined(PLATFORM_WINDOWS)
2060 	InterlockedAdd(v, -i);
2061 #elif defined(PLATFORM_FREEBSD)
2062 	atomic_subtract_int(v, i);
2063 #endif
2064 }
2065 
ATOMIC_INC(ATOMIC_T * v)2066 inline void ATOMIC_INC(ATOMIC_T *v)
2067 {
2068 #ifdef PLATFORM_LINUX
2069 	atomic_inc(v);
2070 #elif defined(PLATFORM_WINDOWS)
2071 	InterlockedIncrement(v);
2072 #elif defined(PLATFORM_FREEBSD)
2073 	atomic_add_int(v, 1);
2074 #endif
2075 }
2076 
ATOMIC_DEC(ATOMIC_T * v)2077 inline void ATOMIC_DEC(ATOMIC_T *v)
2078 {
2079 #ifdef PLATFORM_LINUX
2080 	atomic_dec(v);
2081 #elif defined(PLATFORM_WINDOWS)
2082 	InterlockedDecrement(v);
2083 #elif defined(PLATFORM_FREEBSD)
2084 	atomic_subtract_int(v, 1);
2085 #endif
2086 }
2087 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)2088 inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
2089 {
2090 #ifdef PLATFORM_LINUX
2091 	return atomic_add_return(i, v);
2092 #elif defined(PLATFORM_WINDOWS)
2093 	return InterlockedAdd(v, i);
2094 #elif defined(PLATFORM_FREEBSD)
2095 	atomic_add_int(v, i);
2096 	return atomic_load_acq_32(v);
2097 #endif
2098 }
2099 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)2100 inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
2101 {
2102 #ifdef PLATFORM_LINUX
2103 	return atomic_sub_return(i, v);
2104 #elif defined(PLATFORM_WINDOWS)
2105 	return InterlockedAdd(v, -i);
2106 #elif defined(PLATFORM_FREEBSD)
2107 	atomic_subtract_int(v, i);
2108 	return atomic_load_acq_32(v);
2109 #endif
2110 }
2111 
ATOMIC_INC_RETURN(ATOMIC_T * v)2112 inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
2113 {
2114 #ifdef PLATFORM_LINUX
2115 	return atomic_inc_return(v);
2116 #elif defined(PLATFORM_WINDOWS)
2117 	return InterlockedIncrement(v);
2118 #elif defined(PLATFORM_FREEBSD)
2119 	atomic_add_int(v, 1);
2120 	return atomic_load_acq_32(v);
2121 #endif
2122 }
2123 
ATOMIC_DEC_RETURN(ATOMIC_T * v)2124 inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
2125 {
2126 #ifdef PLATFORM_LINUX
2127 	return atomic_dec_return(v);
2128 #elif defined(PLATFORM_WINDOWS)
2129 	return InterlockedDecrement(v);
2130 #elif defined(PLATFORM_FREEBSD)
2131 	atomic_subtract_int(v, 1);
2132 	return atomic_load_acq_32(v);
2133 #endif
2134 }
2135 
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)2136 inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
2137 {
2138 #ifdef PLATFORM_LINUX
2139 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
2140 	return atomic_add_unless(v, 1, u);
2141 #else
2142 	/* only make sure not exceed after this function */
2143 	if (ATOMIC_INC_RETURN(v) > u) {
2144 		ATOMIC_DEC(v);
2145 		return 0;
2146 	}
2147 	return 1;
2148 #endif
2149 #else
2150 	#error "TBD\n"
2151 #endif
2152 }
2153 
2154 #ifdef PLATFORM_LINUX
2155 /*
2156 * Open a file with the specific @param path, @param flag, @param mode
2157 * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
2158 * @param path the path of the file to open
2159 * @param flag file operation flags, please refer to linux document
2160 * @param mode please refer to linux document
2161 * @return Linux specific error code
2162 */
openFile(struct file ** fpp,const char * path,int flag,int mode)2163 static int openFile(struct file **fpp, const char *path, int flag, int mode)
2164 {
2165 	struct file *fp;
2166 
2167 	fp = filp_open(path, flag, mode);
2168 	if (IS_ERR(fp)) {
2169 		*fpp = NULL;
2170 		return PTR_ERR(fp);
2171 	} else {
2172 		*fpp = fp;
2173 		return 0;
2174 	}
2175 }
2176 
2177 /*
2178 * Close the file with the specific @param fp
2179 * @param fp the pointer of struct file to close
2180 * @return always 0
2181 */
closeFile(struct file * fp)2182 static int closeFile(struct file *fp)
2183 {
2184 	filp_close(fp, NULL);
2185 	return 0;
2186 }
2187 
readFile(struct file * fp,char * buf,int len)2188 static int readFile(struct file *fp, char *buf, int len)
2189 {
2190 	int rlen = 0, sum = 0;
2191 
2192 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2193 	if (!(fp->f_mode & FMODE_CAN_READ))
2194 #else
2195 	if (!fp->f_op || !fp->f_op->read)
2196 #endif
2197 		return -EPERM;
2198 
2199 	while (sum < len) {
2200 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2201 		rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
2202 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2203 		rlen = __vfs_read(fp, buf + sum, len - sum, &fp->f_pos);
2204 #else
2205 		rlen = fp->f_op->read(fp, buf + sum, len - sum, &fp->f_pos);
2206 #endif
2207 		if (rlen > 0)
2208 			sum += rlen;
2209 		else if (0 != rlen)
2210 			return rlen;
2211 		else
2212 			break;
2213 	}
2214 
2215 	return  sum;
2216 
2217 }
2218 
writeFile(struct file * fp,char * buf,int len)2219 static int writeFile(struct file *fp, char *buf, int len)
2220 {
2221 	int wlen = 0, sum = 0;
2222 
2223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2224 	if (!(fp->f_mode & FMODE_CAN_WRITE))
2225 #else
2226 	if (!fp->f_op || !fp->f_op->write)
2227 #endif
2228 		return -EPERM;
2229 
2230 	while (sum < len) {
2231 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2232 		wlen = kernel_write(fp, buf + sum, len - sum, &fp->f_pos);
2233 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2234 		wlen = __vfs_write(fp, buf + sum, len - sum, &fp->f_pos);
2235 #else
2236 		wlen = fp->f_op->write(fp, buf + sum, len - sum, &fp->f_pos);
2237 #endif
2238 		if (wlen > 0)
2239 			sum += wlen;
2240 		else if (0 != wlen)
2241 			return wlen;
2242 		else
2243 			break;
2244 	}
2245 
2246 	return sum;
2247 
2248 }
2249 
2250 /*
2251 * Test if the specifi @param pathname is a direct and readable
2252 * If readable, @param sz is not used
2253 * @param pathname the name of the path to test
2254 * @return Linux specific error code
2255 */
isDirReadable(const char * pathname,u32 * sz)2256 static int isDirReadable(const char *pathname, u32 *sz)
2257 {
2258 	struct path path;
2259 	int error = 0;
2260 
2261 	return kern_path(pathname, LOOKUP_FOLLOW, &path);
2262 }
2263 
2264 /*
2265 * Test if the specifi @param path is a file and readable
2266 * If readable, @param sz is got
2267 * @param path the path of the file to test
2268 * @return Linux specific error code
2269 */
isFileReadable(const char * path,u32 * sz)2270 static int isFileReadable(const char *path, u32 *sz)
2271 {
2272 	struct file *fp;
2273 	int ret = 0;
2274 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2275 	mm_segment_t oldfs;
2276 	#endif
2277 	char buf;
2278 
2279 	fp = filp_open(path, O_RDONLY, 0);
2280 	if (IS_ERR(fp))
2281 		ret = PTR_ERR(fp);
2282 	else {
2283 		#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2284 		oldfs = get_fs();
2285 		#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2286 		set_fs(KERNEL_DS);
2287 		#else
2288 		set_fs(get_ds());
2289 		#endif
2290 		#endif
2291 
2292 		if (1 != readFile(fp, &buf, 1))
2293 			ret = PTR_ERR(fp);
2294 
2295 		if (ret == 0 && sz) {
2296 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
2297 			*sz = i_size_read(fp->f_path.dentry->d_inode);
2298 			#else
2299 			*sz = i_size_read(fp->f_dentry->d_inode);
2300 			#endif
2301 		}
2302 
2303 		#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2304 		set_fs(oldfs);
2305 		#endif
2306 		filp_close(fp, NULL);
2307 	}
2308 	return ret;
2309 }
2310 
2311 /*
2312 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2313 * @param path the path of the file to open and read
2314 * @param buf the starting address of the buffer to store file content
2315 * @param sz how many bytes to read at most
2316 * @return the byte we've read, or Linux specific error code
2317 */
retriveFromFile(const char * path,u8 * buf,u32 sz)2318 static int retriveFromFile(const char *path, u8 *buf, u32 sz)
2319 {
2320 	int ret = -1;
2321 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2322 	mm_segment_t oldfs;
2323 	#endif
2324 	struct file *fp;
2325 
2326 	if (path && buf) {
2327 		ret = openFile(&fp, path, O_RDONLY, 0);
2328 		if (0 == ret) {
2329 			RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2330 
2331 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2332 			oldfs = get_fs();
2333 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2334 			set_fs(KERNEL_DS);
2335 			#else
2336 			set_fs(get_ds());
2337 			#endif
2338 			#endif
2339 
2340 			ret = readFile(fp, buf, sz);
2341 
2342 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2343 			set_fs(oldfs);
2344 			#endif
2345 			closeFile(fp);
2346 
2347 			RTW_INFO("%s readFile, ret:%d\n", __FUNCTION__, ret);
2348 
2349 		} else
2350 			RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2351 	} else {
2352 		RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2353 		ret =  -EINVAL;
2354 	}
2355 	return ret;
2356 }
2357 
2358 /*
2359 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2360 * @param path the path of the file to open and write
2361 * @param buf the starting address of the data to write into file
2362 * @param sz how many bytes to write at most
2363 * @return the byte we've written, or Linux specific error code
2364 */
storeToFile(const char * path,u8 * buf,u32 sz)2365 static int storeToFile(const char *path, u8 *buf, u32 sz)
2366 {
2367 	int ret = 0;
2368 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2369 	mm_segment_t oldfs;
2370 	#endif
2371 	struct file *fp;
2372 
2373 	if (path && buf) {
2374 		ret = openFile(&fp, path, O_CREAT | O_WRONLY, 0666);
2375 		if (0 == ret) {
2376 			RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2377 
2378 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2379 			oldfs = get_fs();
2380 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2381 			set_fs(KERNEL_DS);
2382 			#else
2383 			set_fs(get_ds());
2384 			#endif
2385 			#endif
2386 
2387 			ret = writeFile(fp, buf, sz);
2388 
2389 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2390 			set_fs(oldfs);
2391 			#endif
2392 			closeFile(fp);
2393 
2394 			RTW_INFO("%s writeFile, ret:%d\n", __FUNCTION__, ret);
2395 
2396 		} else
2397 			RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2398 	} else {
2399 		RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2400 		ret =  -EINVAL;
2401 	}
2402 	return ret;
2403 }
2404 #endif /* PLATFORM_LINUX */
2405 
2406 /*
2407 * Test if the specifi @param path is a direct and readable
2408 * @param path the path of the direct to test
2409 * @return _TRUE or _FALSE
2410 */
rtw_is_dir_readable(const char * path)2411 int rtw_is_dir_readable(const char *path)
2412 {
2413 #ifdef PLATFORM_LINUX
2414 	if (isDirReadable(path, NULL) == 0)
2415 		return _TRUE;
2416 	else
2417 		return _FALSE;
2418 #else
2419 	/* Todo... */
2420 	return _FALSE;
2421 #endif
2422 }
2423 
2424 /*
2425 * Test if the specifi @param path is a file and readable
2426 * @param path the path of the file to test
2427 * @return _TRUE or _FALSE
2428 */
rtw_is_file_readable(const char * path)2429 int rtw_is_file_readable(const char *path)
2430 {
2431 #ifdef PLATFORM_LINUX
2432 	if (isFileReadable(path, NULL) == 0)
2433 		return _TRUE;
2434 	else
2435 		return _FALSE;
2436 #else
2437 	/* Todo... */
2438 	return _FALSE;
2439 #endif
2440 }
2441 
2442 /*
2443 * Test if the specifi @param path is a file and readable.
2444 * If readable, @param sz is got
2445 * @param path the path of the file to test
2446 * @return _TRUE or _FALSE
2447 */
rtw_is_file_readable_with_size(const char * path,u32 * sz)2448 int rtw_is_file_readable_with_size(const char *path, u32 *sz)
2449 {
2450 #ifdef PLATFORM_LINUX
2451 	if (isFileReadable(path, sz) == 0)
2452 		return _TRUE;
2453 	else
2454 		return _FALSE;
2455 #else
2456 	/* Todo... */
2457 	return _FALSE;
2458 #endif
2459 }
2460 
2461 /*
2462 * Test if the specifi @param path is a readable file with valid size.
2463 * If readable, @param sz is got
2464 * @param path the path of the file to test
2465 * @return _TRUE or _FALSE
2466 */
rtw_readable_file_sz_chk(const char * path,u32 sz)2467 int rtw_readable_file_sz_chk(const char *path, u32 sz)
2468 {
2469 	u32 fsz;
2470 
2471 	if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
2472 		return _FALSE;
2473 
2474 	if (fsz > sz)
2475 		return _FALSE;
2476 
2477 	return _TRUE;
2478 }
2479 
2480 /*
2481 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2482 * @param path the path of the file to open and read
2483 * @param buf the starting address of the buffer to store file content
2484 * @param sz how many bytes to read at most
2485 * @return the byte we've read
2486 */
rtw_retrieve_from_file(const char * path,u8 * buf,u32 sz)2487 int rtw_retrieve_from_file(const char *path, u8 *buf, u32 sz)
2488 {
2489 #ifdef PLATFORM_LINUX
2490 	int ret = retriveFromFile(path, buf, sz);
2491 	return ret >= 0 ? ret : 0;
2492 #else
2493 	/* Todo... */
2494 	return 0;
2495 #endif
2496 }
2497 
2498 /*
2499 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2500 * @param path the path of the file to open and write
2501 * @param buf the starting address of the data to write into file
2502 * @param sz how many bytes to write at most
2503 * @return the byte we've written
2504 */
rtw_store_to_file(const char * path,u8 * buf,u32 sz)2505 int rtw_store_to_file(const char *path, u8 *buf, u32 sz)
2506 {
2507 #ifdef PLATFORM_LINUX
2508 	int ret = storeToFile(path, buf, sz);
2509 	return ret >= 0 ? ret : 0;
2510 #else
2511 	/* Todo... */
2512 	return 0;
2513 #endif
2514 }
2515 
2516 #ifdef PLATFORM_LINUX
rtw_alloc_etherdev_with_old_priv(int sizeof_priv,void * old_priv)2517 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
2518 {
2519 	struct net_device *pnetdev;
2520 	struct rtw_netdev_priv_indicator *pnpi;
2521 
2522 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2523 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2524 #else
2525 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2526 #endif
2527 	if (!pnetdev)
2528 		goto RETURN;
2529 
2530 	pnpi = netdev_priv(pnetdev);
2531 	pnpi->priv = old_priv;
2532 	pnpi->sizeof_priv = sizeof_priv;
2533 
2534 RETURN:
2535 	return pnetdev;
2536 }
2537 
rtw_alloc_etherdev(int sizeof_priv)2538 struct net_device *rtw_alloc_etherdev(int sizeof_priv)
2539 {
2540 	struct net_device *pnetdev;
2541 	struct rtw_netdev_priv_indicator *pnpi;
2542 
2543 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2544 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2545 #else
2546 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2547 #endif
2548 	if (!pnetdev)
2549 		goto RETURN;
2550 
2551 	pnpi = netdev_priv(pnetdev);
2552 
2553 	pnpi->priv = rtw_zvmalloc(sizeof_priv);
2554 	if (!pnpi->priv) {
2555 		free_netdev(pnetdev);
2556 		pnetdev = NULL;
2557 		goto RETURN;
2558 	}
2559 
2560 	pnpi->sizeof_priv = sizeof_priv;
2561 RETURN:
2562 	return pnetdev;
2563 }
2564 
rtw_free_netdev(struct net_device * netdev)2565 void rtw_free_netdev(struct net_device *netdev)
2566 {
2567 	struct rtw_netdev_priv_indicator *pnpi;
2568 
2569 	if (!netdev)
2570 		goto RETURN;
2571 
2572 	pnpi = netdev_priv(netdev);
2573 
2574 	if (!pnpi->priv)
2575 		goto RETURN;
2576 
2577 	free_netdev(netdev);
2578 
2579 RETURN:
2580 	return;
2581 }
2582 
rtw_change_ifname(_adapter * padapter,const char * ifname)2583 int rtw_change_ifname(_adapter *padapter, const char *ifname)
2584 {
2585 	struct dvobj_priv *dvobj;
2586 	struct net_device *pnetdev;
2587 	struct net_device *cur_pnetdev;
2588 	struct rereg_nd_name_data *rereg_priv;
2589 	int ret;
2590 	u8 rtnl_lock_needed;
2591 
2592 	if (!padapter)
2593 		goto error;
2594 
2595 	dvobj = adapter_to_dvobj(padapter);
2596 	cur_pnetdev = padapter->pnetdev;
2597 	rereg_priv = &padapter->rereg_nd_name_priv;
2598 
2599 	/* free the old_pnetdev */
2600 	if (rereg_priv->old_pnetdev) {
2601 		free_netdev(rereg_priv->old_pnetdev);
2602 		rereg_priv->old_pnetdev = NULL;
2603 	}
2604 
2605 	rtnl_lock_needed = rtw_rtnl_lock_needed(dvobj);
2606 
2607 	if (rtnl_lock_needed)
2608 		unregister_netdev(cur_pnetdev);
2609 	else
2610 		unregister_netdevice(cur_pnetdev);
2611 
2612 	rereg_priv->old_pnetdev = cur_pnetdev;
2613 
2614 	pnetdev = rtw_init_netdev(padapter);
2615 	if (!pnetdev)  {
2616 		ret = -1;
2617 		goto error;
2618 	}
2619 
2620 	SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
2621 
2622 	rtw_init_netdev_name(pnetdev, ifname);
2623 
2624 	_rtw_memcpy(pnetdev->dev_addr, adapter_mac_addr(padapter), ETH_ALEN);
2625 
2626 	if (rtnl_lock_needed)
2627 		ret = register_netdev(pnetdev);
2628 	else
2629 		ret = register_netdevice(pnetdev);
2630 
2631 	if (ret != 0) {
2632 		goto error;
2633 	}
2634 
2635 	return 0;
2636 
2637 error:
2638 
2639 	return -1;
2640 
2641 }
2642 #endif
2643 
2644 #ifdef PLATFORM_FREEBSD
2645 /*
2646  * Copy a buffer from userspace and write into kernel address
2647  * space.
2648  *
2649  * This emulation just calls the FreeBSD copyin function (to
2650  * copy data from user space buffer into a kernel space buffer)
2651  * and is designed to be used with the above io_write_wrapper.
2652  *
2653  * This function should return the number of bytes not copied.
2654  * I.e. success results in a zero value.
2655  * Negative error values are not returned.
2656  */
2657 unsigned long
copy_from_user(void * to,const void * from,unsigned long n)2658 copy_from_user(void *to, const void *from, unsigned long n)
2659 {
2660 	if (copyin(from, to, n) != 0) {
2661 		/* Any errors will be treated as a failure
2662 		   to copy any of the requested bytes */
2663 		return n;
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 unsigned long
copy_to_user(void * to,const void * from,unsigned long n)2670 copy_to_user(void *to, const void *from, unsigned long n)
2671 {
2672 	if (copyout(from, to, n) != 0) {
2673 		/* Any errors will be treated as a failure
2674 		   to copy any of the requested bytes */
2675 		return n;
2676 	}
2677 
2678 	return 0;
2679 }
2680 
2681 
2682 /*
2683  * The usb_register and usb_deregister functions are used to register
2684  * usb drivers with the usb subsystem. In this compatibility layer
2685  * emulation a list of drivers (struct usb_driver) is maintained
2686  * and is used for probing/attaching etc.
2687  *
2688  * usb_register and usb_deregister simply call these functions.
2689  */
2690 int
usb_register(struct usb_driver * driver)2691 usb_register(struct usb_driver *driver)
2692 {
2693 	rtw_usb_linux_register(driver);
2694 	return 0;
2695 }
2696 
2697 
2698 int
usb_deregister(struct usb_driver * driver)2699 usb_deregister(struct usb_driver *driver)
2700 {
2701 	rtw_usb_linux_deregister(driver);
2702 	return 0;
2703 }
2704 
module_init_exit_wrapper(void * arg)2705 void module_init_exit_wrapper(void *arg)
2706 {
2707 	int (*func)(void) = arg;
2708 	func();
2709 	return;
2710 }
2711 
2712 #endif /* PLATFORM_FREEBSD */
2713 
2714 #ifdef CONFIG_PLATFORM_SPRD
2715 	#ifdef do_div
2716 		#undef do_div
2717 	#endif
2718 	#include <asm-generic/div64.h>
2719 #endif
2720 
rtw_modular64(u64 x,u64 y)2721 u64 rtw_modular64(u64 x, u64 y)
2722 {
2723 #ifdef PLATFORM_LINUX
2724 	return do_div(x, y);
2725 #elif defined(PLATFORM_WINDOWS)
2726 	return x % y;
2727 #elif defined(PLATFORM_FREEBSD)
2728 	return x % y;
2729 #endif
2730 }
2731 
rtw_division64(u64 x,u64 y)2732 u64 rtw_division64(u64 x, u64 y)
2733 {
2734 #ifdef PLATFORM_LINUX
2735 	do_div(x, y);
2736 	return x;
2737 #elif defined(PLATFORM_WINDOWS)
2738 	return x / y;
2739 #elif defined(PLATFORM_FREEBSD)
2740 	return x / y;
2741 #endif
2742 }
2743 
rtw_random32(void)2744 inline u32 rtw_random32(void)
2745 {
2746 #ifdef PLATFORM_LINUX
2747 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
2748 	return prandom_u32();
2749 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18))
2750 	u32 random_int;
2751 	get_random_bytes(&random_int , 4);
2752 	return random_int;
2753 #else
2754 	return random32();
2755 #endif
2756 #elif defined(PLATFORM_WINDOWS)
2757 #error "to be implemented\n"
2758 #elif defined(PLATFORM_FREEBSD)
2759 #error "to be implemented\n"
2760 #endif
2761 }
2762 
rtw_buf_free(u8 ** buf,u32 * buf_len)2763 void rtw_buf_free(u8 **buf, u32 *buf_len)
2764 {
2765 	u32 ori_len;
2766 
2767 	if (!buf || !buf_len)
2768 		return;
2769 
2770 	ori_len = *buf_len;
2771 
2772 	if (*buf) {
2773 		u32 tmp_buf_len = *buf_len;
2774 		*buf_len = 0;
2775 		rtw_mfree(*buf, tmp_buf_len);
2776 		*buf = NULL;
2777 	}
2778 }
2779 
rtw_buf_update(u8 ** buf,u32 * buf_len,u8 * src,u32 src_len)2780 void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
2781 {
2782 	u32 ori_len = 0, dup_len = 0;
2783 	u8 *ori = NULL;
2784 	u8 *dup = NULL;
2785 
2786 	if (!buf || !buf_len)
2787 		return;
2788 
2789 	if (!src || !src_len)
2790 		goto keep_ori;
2791 
2792 	/* duplicate src */
2793 	dup = rtw_malloc(src_len);
2794 	if (dup) {
2795 		dup_len = src_len;
2796 		_rtw_memcpy(dup, src, dup_len);
2797 	}
2798 
2799 keep_ori:
2800 	ori = *buf;
2801 	ori_len = *buf_len;
2802 
2803 	/* replace buf with dup */
2804 	*buf_len = 0;
2805 	*buf = dup;
2806 	*buf_len = dup_len;
2807 
2808 	/* free ori */
2809 	if (ori && ori_len > 0)
2810 		rtw_mfree(ori, ori_len);
2811 }
2812 
2813 
2814 /**
2815  * rtw_cbuf_full - test if cbuf is full
2816  * @cbuf: pointer of struct rtw_cbuf
2817  *
2818  * Returns: _TRUE if cbuf is full
2819  */
rtw_cbuf_full(struct rtw_cbuf * cbuf)2820 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
2821 {
2822 	return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
2823 }
2824 
2825 /**
2826  * rtw_cbuf_empty - test if cbuf is empty
2827  * @cbuf: pointer of struct rtw_cbuf
2828  *
2829  * Returns: _TRUE if cbuf is empty
2830  */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)2831 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
2832 {
2833 	return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
2834 }
2835 
2836 /**
2837  * rtw_cbuf_push - push a pointer into cbuf
2838  * @cbuf: pointer of struct rtw_cbuf
2839  * @buf: pointer to push in
2840  *
2841  * Lock free operation, be careful of the use scheme
2842  * Returns: _TRUE push success
2843  */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)2844 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
2845 {
2846 	if (rtw_cbuf_full(cbuf))
2847 		return _FAIL;
2848 
2849 	if (0)
2850 		RTW_INFO("%s on %u\n", __func__, cbuf->write);
2851 	cbuf->bufs[cbuf->write] = buf;
2852 	cbuf->write = (cbuf->write + 1) % cbuf->size;
2853 
2854 	return _SUCCESS;
2855 }
2856 
2857 /**
2858  * rtw_cbuf_pop - pop a pointer from cbuf
2859  * @cbuf: pointer of struct rtw_cbuf
2860  *
2861  * Lock free operation, be careful of the use scheme
2862  * Returns: pointer popped out
2863  */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)2864 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
2865 {
2866 	void *buf;
2867 	if (rtw_cbuf_empty(cbuf))
2868 		return NULL;
2869 
2870 	if (0)
2871 		RTW_INFO("%s on %u\n", __func__, cbuf->read);
2872 	buf = cbuf->bufs[cbuf->read];
2873 	cbuf->read = (cbuf->read + 1) % cbuf->size;
2874 
2875 	return buf;
2876 }
2877 
2878 /**
2879  * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
2880  * @size: size of pointer
2881  *
2882  * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
2883  */
rtw_cbuf_alloc(u32 size)2884 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
2885 {
2886 	struct rtw_cbuf *cbuf;
2887 
2888 	cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
2889 
2890 	if (cbuf) {
2891 		cbuf->write = cbuf->read = 0;
2892 		cbuf->size = size;
2893 	}
2894 
2895 	return cbuf;
2896 }
2897 
2898 /**
2899  * rtw_cbuf_free - free the given rtw_cbuf
2900  * @cbuf: pointer of struct rtw_cbuf to free
2901  */
rtw_cbuf_free(struct rtw_cbuf * cbuf)2902 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
2903 {
2904 	rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
2905 }
2906 
2907 /**
2908  * map_readN - read a range of map data
2909  * @map: map to read
2910  * @offset: start address to read
2911  * @len: length to read
2912  * @buf: pointer of buffer to store data read
2913  *
2914  * Returns: _SUCCESS or _FAIL
2915  */
map_readN(const struct map_t * map,u16 offset,u16 len,u8 * buf)2916 int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
2917 {
2918 	const struct map_seg_t *seg;
2919 	int ret = _FAIL;
2920 	int i;
2921 
2922 	if (len == 0) {
2923 		rtw_warn_on(1);
2924 		goto exit;
2925 	}
2926 
2927 	if (offset + len > map->len) {
2928 		rtw_warn_on(1);
2929 		goto exit;
2930 	}
2931 
2932 	_rtw_memset(buf, map->init_value, len);
2933 
2934 	for (i = 0; i < map->seg_num; i++) {
2935 		u8 *c_dst, *c_src;
2936 		u16 c_len;
2937 
2938 		seg = map->segs + i;
2939 		if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
2940 			continue;
2941 
2942 		if (seg->sa >= offset) {
2943 			c_dst = buf + (seg->sa - offset);
2944 			c_src = seg->c;
2945 			if (seg->sa + seg->len <= offset + len)
2946 				c_len = seg->len;
2947 			else
2948 				c_len = offset + len - seg->sa;
2949 		} else {
2950 			c_dst = buf;
2951 			c_src = seg->c + (offset - seg->sa);
2952 			if (seg->sa + seg->len >= offset + len)
2953 				c_len = len;
2954 			else
2955 				c_len = seg->sa + seg->len - offset;
2956 		}
2957 
2958 		_rtw_memcpy(c_dst, c_src, c_len);
2959 	}
2960 
2961 exit:
2962 	return ret;
2963 }
2964 
2965 /**
2966  * map_read8 - read 1 byte of map data
2967  * @map: map to read
2968  * @offset: address to read
2969  *
2970  * Returns: value of data of specified offset. map.init_value if offset is out of range
2971  */
map_read8(const struct map_t * map,u16 offset)2972 u8 map_read8(const struct map_t *map, u16 offset)
2973 {
2974 	const struct map_seg_t *seg;
2975 	u8 val = map->init_value;
2976 	int i;
2977 
2978 	if (offset + 1 > map->len) {
2979 		rtw_warn_on(1);
2980 		goto exit;
2981 	}
2982 
2983 	for (i = 0; i < map->seg_num; i++) {
2984 		seg = map->segs + i;
2985 		if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
2986 			continue;
2987 
2988 		val = *(seg->c + offset - seg->sa);
2989 		break;
2990 	}
2991 
2992 exit:
2993 	return val;
2994 }
2995 
2996 #ifdef CONFIG_RTW_MESH
rtw_blacklist_add(_queue * blist,const u8 * addr,u32 timeout_ms)2997 int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
2998 {
2999 	struct blacklist_ent *ent;
3000 	_list *list, *head;
3001 	u8 exist = _FALSE, timeout = _FALSE;
3002 
3003 	enter_critical_bh(&blist->lock);
3004 
3005 	head = &blist->queue;
3006 	list = get_next(head);
3007 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3008 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3009 		list = get_next(list);
3010 
3011 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3012 			exist = _TRUE;
3013 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3014 				timeout = _TRUE;
3015 			ent->exp_time = rtw_get_current_time()
3016 				+ rtw_ms_to_systime(timeout_ms);
3017 			break;
3018 		}
3019 
3020 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3021 			rtw_list_delete(&ent->list);
3022 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3023 		}
3024 	}
3025 
3026 	if (exist == _FALSE) {
3027 		ent = rtw_malloc(sizeof(struct blacklist_ent));
3028 		if (ent) {
3029 			_rtw_memcpy(ent->addr, addr, ETH_ALEN);
3030 			ent->exp_time = rtw_get_current_time()
3031 				+ rtw_ms_to_systime(timeout_ms);
3032 			rtw_list_insert_tail(&ent->list, head);
3033 		}
3034 	}
3035 
3036 	exit_critical_bh(&blist->lock);
3037 
3038 	return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
3039 }
3040 
rtw_blacklist_del(_queue * blist,const u8 * addr)3041 int rtw_blacklist_del(_queue *blist, const u8 *addr)
3042 {
3043 	struct blacklist_ent *ent = NULL;
3044 	_list *list, *head;
3045 	u8 exist = _FALSE;
3046 
3047 	enter_critical_bh(&blist->lock);
3048 	head = &blist->queue;
3049 	list = get_next(head);
3050 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3051 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3052 		list = get_next(list);
3053 
3054 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3055 			rtw_list_delete(&ent->list);
3056 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3057 			exist = _TRUE;
3058 			break;
3059 		}
3060 
3061 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3062 			rtw_list_delete(&ent->list);
3063 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3064 		}
3065 	}
3066 
3067 	exit_critical_bh(&blist->lock);
3068 
3069 	return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
3070 }
3071 
rtw_blacklist_search(_queue * blist,const u8 * addr)3072 int rtw_blacklist_search(_queue *blist, const u8 *addr)
3073 {
3074 	struct blacklist_ent *ent = NULL;
3075 	_list *list, *head;
3076 	u8 exist = _FALSE;
3077 
3078 	enter_critical_bh(&blist->lock);
3079 	head = &blist->queue;
3080 	list = get_next(head);
3081 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3082 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3083 		list = get_next(list);
3084 
3085 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3086 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3087 				rtw_list_delete(&ent->list);
3088 				rtw_mfree(ent, sizeof(struct blacklist_ent));
3089 			} else
3090 				exist = _TRUE;
3091 			break;
3092 		}
3093 
3094 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3095 			rtw_list_delete(&ent->list);
3096 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3097 		}
3098 	}
3099 
3100 	exit_critical_bh(&blist->lock);
3101 
3102 	return exist;
3103 }
3104 
rtw_blacklist_flush(_queue * blist)3105 void rtw_blacklist_flush(_queue *blist)
3106 {
3107 	struct blacklist_ent *ent;
3108 	_list *list, *head;
3109 	_list tmp;
3110 
3111 	_rtw_init_listhead(&tmp);
3112 
3113 	enter_critical_bh(&blist->lock);
3114 	rtw_list_splice_init(&blist->queue, &tmp);
3115 	exit_critical_bh(&blist->lock);
3116 
3117 	head = &tmp;
3118 	list = get_next(head);
3119 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3120 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3121 		list = get_next(list);
3122 		rtw_list_delete(&ent->list);
3123 		rtw_mfree(ent, sizeof(struct blacklist_ent));
3124 	}
3125 }
3126 
dump_blacklist(void * sel,_queue * blist,const char * title)3127 void dump_blacklist(void *sel, _queue *blist, const char *title)
3128 {
3129 	struct blacklist_ent *ent = NULL;
3130 	_list *list, *head;
3131 
3132 	enter_critical_bh(&blist->lock);
3133 	head = &blist->queue;
3134 	list = get_next(head);
3135 
3136 	if (rtw_end_of_queue_search(head, list) == _FALSE) {
3137 		if (title)
3138 			RTW_PRINT_SEL(sel, "%s:\n", title);
3139 
3140 		while (rtw_end_of_queue_search(head, list) == _FALSE) {
3141 			ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3142 			list = get_next(list);
3143 
3144 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3145 				RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
3146 			else
3147 				RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
3148 					, rtw_get_remaining_time_ms(ent->exp_time));
3149 		}
3150 
3151 	}
3152 	exit_critical_bh(&blist->lock);
3153 }
3154 #endif
3155 
3156 /**
3157 * is_null -
3158 *
3159 * Return	TRUE if c is null character
3160 *		FALSE otherwise.
3161 */
is_null(char c)3162 inline BOOLEAN is_null(char c)
3163 {
3164 	if (c == '\0')
3165 		return _TRUE;
3166 	else
3167 		return _FALSE;
3168 }
3169 
is_all_null(char * c,int len)3170 inline BOOLEAN is_all_null(char *c, int len)
3171 {
3172 	for (; len > 0; len--)
3173 		if (c[len - 1] != '\0')
3174 			return _FALSE;
3175 
3176 	return _TRUE;
3177 }
3178 
3179 /**
3180 * is_eol -
3181 *
3182 * Return	TRUE if c is represent for EOL (end of line)
3183 *		FALSE otherwise.
3184 */
is_eol(char c)3185 inline BOOLEAN is_eol(char c)
3186 {
3187 	if (c == '\r' || c == '\n')
3188 		return _TRUE;
3189 	else
3190 		return _FALSE;
3191 }
3192 
3193 /**
3194 * is_space -
3195 *
3196 * Return	TRUE if c is represent for space
3197 *		FALSE otherwise.
3198 */
is_space(char c)3199 inline BOOLEAN is_space(char c)
3200 {
3201 	if (c == ' ' || c == '\t')
3202 		return _TRUE;
3203 	else
3204 		return _FALSE;
3205 }
3206 
3207 /**
3208 * IsHexDigit -
3209 *
3210 * Return	TRUE if chTmp is represent for hex digit
3211 *		FALSE otherwise.
3212 */
IsHexDigit(char chTmp)3213 inline BOOLEAN IsHexDigit(char chTmp)
3214 {
3215 	if ((chTmp >= '0' && chTmp <= '9') ||
3216 		(chTmp >= 'a' && chTmp <= 'f') ||
3217 		(chTmp >= 'A' && chTmp <= 'F'))
3218 		return _TRUE;
3219 	else
3220 		return _FALSE;
3221 }
3222 
3223 /**
3224 * is_alpha -
3225 *
3226 * Return	TRUE if chTmp is represent for alphabet
3227 *		FALSE otherwise.
3228 */
is_alpha(char chTmp)3229 inline BOOLEAN is_alpha(char chTmp)
3230 {
3231 	if ((chTmp >= 'a' && chTmp <= 'z') ||
3232 		(chTmp >= 'A' && chTmp <= 'Z'))
3233 		return _TRUE;
3234 	else
3235 		return _FALSE;
3236 }
3237 
alpha_to_upper(char c)3238 inline char alpha_to_upper(char c)
3239 {
3240 	if ((c >= 'a' && c <= 'z'))
3241 		c = 'A' + (c - 'a');
3242 	return c;
3243 }
3244 
hex2num_i(char c)3245 int hex2num_i(char c)
3246 {
3247 	if (c >= '0' && c <= '9')
3248 		return c - '0';
3249 	if (c >= 'a' && c <= 'f')
3250 		return c - 'a' + 10;
3251 	if (c >= 'A' && c <= 'F')
3252 		return c - 'A' + 10;
3253 	return -1;
3254 }
3255 
hex2byte_i(const char * hex)3256 int hex2byte_i(const char *hex)
3257 {
3258 	int a, b;
3259 	a = hex2num_i(*hex++);
3260 	if (a < 0)
3261 		return -1;
3262 	b = hex2num_i(*hex++);
3263 	if (b < 0)
3264 		return -1;
3265 	return (a << 4) | b;
3266 }
3267 
hexstr2bin(const char * hex,u8 * buf,size_t len)3268 int hexstr2bin(const char *hex, u8 *buf, size_t len)
3269 {
3270 	size_t i;
3271 	int a;
3272 	const char *ipos = hex;
3273 	u8 *opos = buf;
3274 
3275 	for (i = 0; i < len; i++) {
3276 		a = hex2byte_i(ipos);
3277 		if (a < 0)
3278 			return -1;
3279 		*opos++ = a;
3280 		ipos += 2;
3281 	}
3282 	return 0;
3283 }
3284 
3285