1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2017 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15
16
17 #define _OSDEP_SERVICE_C_
18
19 #include <drv_types.h>
20
21 #define RT_TAG '1178'
22
23 #ifdef DBG_MEMORY_LEAK
24 #ifdef PLATFORM_LINUX
25 atomic_t _malloc_cnt = ATOMIC_INIT(0);
26 atomic_t _malloc_size = ATOMIC_INIT(0);
27 #endif
28 #endif /* DBG_MEMORY_LEAK */
29
30
31 #if defined(PLATFORM_LINUX)
32 /*
33 * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
34 * @return: one of RTW_STATUS_CODE
35 */
RTW_STATUS_CODE(int error_code)36 inline int RTW_STATUS_CODE(int error_code)
37 {
38 if (error_code >= 0)
39 return _SUCCESS;
40
41 switch (error_code) {
42 /* case -ETIMEDOUT: */
43 /* return RTW_STATUS_TIMEDOUT; */
44 default:
45 return _FAIL;
46 }
47 }
48 #else
RTW_STATUS_CODE(int error_code)49 inline int RTW_STATUS_CODE(int error_code)
50 {
51 return error_code;
52 }
53 #endif
54
rtw_atoi(u8 * s)55 u32 rtw_atoi(u8 *s)
56 {
57
58 int num = 0, flag = 0;
59 int i;
60 for (i = 0; i <= strlen(s); i++) {
61 if (s[i] >= '0' && s[i] <= '9')
62 num = num * 10 + s[i] - '0';
63 else if (s[0] == '-' && i == 0)
64 flag = 1;
65 else
66 break;
67 }
68
69 if (flag == 1)
70 num = num * -1;
71
72 return num;
73
74 }
75
_rtw_vmalloc(u32 sz)76 inline void *_rtw_vmalloc(u32 sz)
77 {
78 void *pbuf;
79 #ifdef PLATFORM_LINUX
80 pbuf = vmalloc(sz);
81 #endif
82 #ifdef PLATFORM_FREEBSD
83 pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
84 #endif
85
86 #ifdef PLATFORM_WINDOWS
87 NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
88 #endif
89
90 #ifdef DBG_MEMORY_LEAK
91 #ifdef PLATFORM_LINUX
92 if (pbuf != NULL) {
93 atomic_inc(&_malloc_cnt);
94 atomic_add(sz, &_malloc_size);
95 }
96 #endif
97 #endif /* DBG_MEMORY_LEAK */
98
99 return pbuf;
100 }
101
_rtw_zvmalloc(u32 sz)102 inline void *_rtw_zvmalloc(u32 sz)
103 {
104 void *pbuf;
105 #ifdef PLATFORM_LINUX
106 pbuf = _rtw_vmalloc(sz);
107 if (pbuf != NULL)
108 memset(pbuf, 0, sz);
109 #endif
110 #ifdef PLATFORM_FREEBSD
111 pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
112 #endif
113 #ifdef PLATFORM_WINDOWS
114 NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
115 if (pbuf != NULL)
116 NdisFillMemory(pbuf, sz, 0);
117 #endif
118
119 return pbuf;
120 }
121
_rtw_vmfree(void * pbuf,u32 sz)122 inline void _rtw_vmfree(void *pbuf, u32 sz)
123 {
124 #ifdef PLATFORM_LINUX
125 vfree(pbuf);
126 #endif
127 #ifdef PLATFORM_FREEBSD
128 free(pbuf, M_DEVBUF);
129 #endif
130 #ifdef PLATFORM_WINDOWS
131 NdisFreeMemory(pbuf, sz, 0);
132 #endif
133
134 #ifdef DBG_MEMORY_LEAK
135 #ifdef PLATFORM_LINUX
136 atomic_dec(&_malloc_cnt);
137 atomic_sub(sz, &_malloc_size);
138 #endif
139 #endif /* DBG_MEMORY_LEAK */
140 }
141
_rtw_malloc(u32 sz)142 void *_rtw_malloc(u32 sz)
143 {
144 void *pbuf = NULL;
145
146 #ifdef PLATFORM_LINUX
147 #ifdef RTK_DMP_PLATFORM
148 if (sz > 0x4000)
149 pbuf = dvr_malloc(sz);
150 else
151 #endif
152 pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
153
154 #endif
155 #ifdef PLATFORM_FREEBSD
156 pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
157 #endif
158 #ifdef PLATFORM_WINDOWS
159
160 NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
161
162 #endif
163
164 #ifdef DBG_MEMORY_LEAK
165 #ifdef PLATFORM_LINUX
166 if (pbuf != NULL) {
167 atomic_inc(&_malloc_cnt);
168 atomic_add(sz, &_malloc_size);
169 }
170 #endif
171 #endif /* DBG_MEMORY_LEAK */
172
173 return pbuf;
174
175 }
176
177
_rtw_zmalloc(u32 sz)178 void *_rtw_zmalloc(u32 sz)
179 {
180 #ifdef PLATFORM_FREEBSD
181 return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
182 #else /* PLATFORM_FREEBSD */
183 void *pbuf = _rtw_malloc(sz);
184
185 if (pbuf != NULL) {
186
187 #ifdef PLATFORM_LINUX
188 memset(pbuf, 0, sz);
189 #endif
190
191 #ifdef PLATFORM_WINDOWS
192 NdisFillMemory(pbuf, sz, 0);
193 #endif
194
195 }
196
197 return pbuf;
198 #endif /* PLATFORM_FREEBSD */
199 }
200
_rtw_mfree(void * pbuf,u32 sz)201 void _rtw_mfree(void *pbuf, u32 sz)
202 {
203
204 #ifdef PLATFORM_LINUX
205 #ifdef RTK_DMP_PLATFORM
206 if (sz > 0x4000)
207 dvr_free(pbuf);
208 else
209 #endif
210 kfree(pbuf);
211
212 #endif
213 #ifdef PLATFORM_FREEBSD
214 free(pbuf, M_DEVBUF);
215 #endif
216 #ifdef PLATFORM_WINDOWS
217
218 NdisFreeMemory(pbuf, sz, 0);
219
220 #endif
221
222 #ifdef DBG_MEMORY_LEAK
223 #ifdef PLATFORM_LINUX
224 atomic_dec(&_malloc_cnt);
225 atomic_sub(sz, &_malloc_size);
226 #endif
227 #endif /* DBG_MEMORY_LEAK */
228
229 }
230
231 #ifdef PLATFORM_FREEBSD
232 /* review again */
dev_alloc_skb(unsigned int size)233 struct sk_buff *dev_alloc_skb(unsigned int size)
234 {
235 struct sk_buff *skb = NULL;
236 u8 *data = NULL;
237
238 /* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
239 skb = _rtw_malloc(sizeof(struct sk_buff));
240 if (!skb)
241 goto out;
242 data = _rtw_malloc(size);
243 if (!data)
244 goto nodata;
245
246 skb->head = (unsigned char *)data;
247 skb->data = (unsigned char *)data;
248 skb->tail = (unsigned char *)data;
249 skb->end = (unsigned char *)data + size;
250 skb->len = 0;
251 /* printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); */
252
253 out:
254 return skb;
255 nodata:
256 _rtw_mfree(skb, sizeof(struct sk_buff));
257 skb = NULL;
258 goto out;
259
260 }
261
dev_kfree_skb_any(struct sk_buff * skb)262 void dev_kfree_skb_any(struct sk_buff *skb)
263 {
264 /* printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); */
265 if (skb->head)
266 _rtw_mfree(skb->head, 0);
267 /* printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); */
268 if (skb)
269 _rtw_mfree(skb, 0);
270 }
skb_clone(const struct sk_buff * skb)271 struct sk_buff *skb_clone(const struct sk_buff *skb)
272 {
273 return NULL;
274 }
275
276 #endif /* PLATFORM_FREEBSD */
277
_rtw_skb_alloc(u32 sz)278 inline struct sk_buff *_rtw_skb_alloc(u32 sz)
279 {
280 #ifdef PLATFORM_LINUX
281 return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
282 #endif /* PLATFORM_LINUX */
283
284 #ifdef PLATFORM_FREEBSD
285 return dev_alloc_skb(sz);
286 #endif /* PLATFORM_FREEBSD */
287 }
288
_rtw_skb_free(struct sk_buff * skb)289 inline void _rtw_skb_free(struct sk_buff *skb)
290 {
291 dev_kfree_skb_any(skb);
292 }
293
_rtw_skb_copy(const struct sk_buff * skb)294 inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
295 {
296 #ifdef PLATFORM_LINUX
297 return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
298 #endif /* PLATFORM_LINUX */
299
300 #ifdef PLATFORM_FREEBSD
301 return NULL;
302 #endif /* PLATFORM_FREEBSD */
303 }
304
_rtw_skb_clone(struct sk_buff * skb)305 inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
306 {
307 #ifdef PLATFORM_LINUX
308 return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
309 #endif /* PLATFORM_LINUX */
310
311 #ifdef PLATFORM_FREEBSD
312 return skb_clone(skb);
313 #endif /* PLATFORM_FREEBSD */
314 }
_rtw_pskb_copy(struct sk_buff * skb)315 inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
316 {
317 #ifdef PLATFORM_LINUX
318 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
319 return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
320 #else
321 return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
322 #endif
323 #endif /* PLATFORM_LINUX */
324
325 #ifdef PLATFORM_FREEBSD
326 return NULL;
327 #endif /* PLATFORM_FREEBSD */
328 }
329
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)330 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
331 {
332 #if defined(PLATFORM_LINUX)
333 skb->dev = ndev;
334 return netif_rx(skb);
335 #elif defined(PLATFORM_FREEBSD)
336 return (*ndev->if_input)(ndev, skb);
337 #else
338 rtw_warn_on(1);
339 return -1;
340 #endif
341 }
342
343 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)344 inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
345 {
346 #if defined(PLATFORM_LINUX)
347 skb->dev = ndev;
348 return netif_receive_skb(skb);
349 #else
350 rtw_warn_on(1);
351 return -1;
352 #endif
353 }
354
355 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)356 inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
357 {
358 #if defined(PLATFORM_LINUX)
359 return napi_gro_receive(napi, skb);
360 #else
361 rtw_warn_on(1);
362 return -1;
363 #endif
364 }
365 #endif /* CONFIG_RTW_GRO */
366 #endif /* CONFIG_RTW_NAPI */
367
_rtw_skb_queue_purge(struct sk_buff_head * list)368 void _rtw_skb_queue_purge(struct sk_buff_head *list)
369 {
370 struct sk_buff *skb;
371
372 while ((skb = skb_dequeue(list)) != NULL)
373 _rtw_skb_free(skb);
374 }
375
376 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)377 inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
378 {
379 #ifdef PLATFORM_LINUX
380 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
381 return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
382 #else
383 return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
384 #endif
385 #endif /* PLATFORM_LINUX */
386
387 #ifdef PLATFORM_FREEBSD
388 return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
389 #endif /* PLATFORM_FREEBSD */
390 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)391 inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
392 {
393 #ifdef PLATFORM_LINUX
394 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
395 usb_free_coherent(dev, size, addr, dma);
396 #else
397 usb_buffer_free(dev, size, addr, dma);
398 #endif
399 #endif /* PLATFORM_LINUX */
400
401 #ifdef PLATFORM_FREEBSD
402 free(addr, M_USBDEV);
403 #endif /* PLATFORM_FREEBSD */
404 }
405 #endif /* CONFIG_USB_HCI */
406
407 #if defined(DBG_MEM_ALLOC)
408
409 struct rtw_mem_stat {
410 ATOMIC_T alloc; /* the memory bytes we allocate currently */
411 ATOMIC_T peak; /* the peak memory bytes we allocate */
412 ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
413 ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
414 };
415
416 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
417 #ifdef RTW_MEM_FUNC_STAT
418 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
419 #endif
420
421 char *MSTAT_TYPE_str[] = {
422 "VIR",
423 "PHY",
424 "SKB",
425 "USB",
426 };
427
428 #ifdef RTW_MEM_FUNC_STAT
429 char *MSTAT_FUNC_str[] = {
430 "UNSP",
431 "IO",
432 "TXIO",
433 "RXIO",
434 "TX",
435 "RX",
436 };
437 #endif
438
rtw_mstat_dump(void * sel)439 void rtw_mstat_dump(void *sel)
440 {
441 int i;
442 int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
443 #ifdef RTW_MEM_FUNC_STAT
444 int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
445 #endif
446
447 for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
448 value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
449 value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
450 value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
451 value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
452 }
453
454 #ifdef RTW_MEM_FUNC_STAT
455 for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
456 value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
457 value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
458 value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
459 value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
460 }
461 #endif
462
463 RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
464 RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
465 RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
466 for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
467 RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
468 #ifdef RTW_MEM_FUNC_STAT
469 RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
470 for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
471 RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
472 #endif
473 }
474
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)475 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
476 {
477 static systime update_time = 0;
478 int peak, alloc;
479 int i;
480
481 /* initialization */
482 if (!update_time) {
483 for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
484 ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
485 ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
486 ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
487 ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
488 }
489 #ifdef RTW_MEM_FUNC_STAT
490 for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
491 ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
492 ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
493 ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
494 ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
495 }
496 #endif
497 }
498
499 switch (status) {
500 case MSTAT_ALLOC_SUCCESS:
501 ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
502 alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
503 peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
504 if (peak < alloc)
505 ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
506
507 #ifdef RTW_MEM_FUNC_STAT
508 ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
509 alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
510 peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
511 if (peak < alloc)
512 ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
513 #endif
514 break;
515
516 case MSTAT_ALLOC_FAIL:
517 ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
518 #ifdef RTW_MEM_FUNC_STAT
519 ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
520 #endif
521 break;
522
523 case MSTAT_FREE:
524 ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
525 ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
526 #ifdef RTW_MEM_FUNC_STAT
527 ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
528 ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
529 #endif
530 break;
531 };
532
533 /* if (rtw_get_passing_time_ms(update_time) > 5000) { */
534 /* rtw_mstat_dump(RTW_DBGDUMP); */
535 update_time = rtw_get_current_time();
536 /* } */
537 }
538
539 #ifndef SIZE_MAX
540 #define SIZE_MAX (~(size_t)0)
541 #endif
542
543 struct mstat_sniff_rule {
544 enum mstat_f flags;
545 size_t lb;
546 size_t hb;
547 };
548
549 struct mstat_sniff_rule mstat_sniff_rules[] = {
550 {MSTAT_TYPE_PHY, 4097, SIZE_MAX},
551 };
552
553 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
554
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)555 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
556 {
557 int i;
558 for (i = 0; i < mstat_sniff_rule_num; i++) {
559 if (mstat_sniff_rules[i].flags == flags
560 && mstat_sniff_rules[i].lb <= size
561 && mstat_sniff_rules[i].hb >= size)
562 return _TRUE;
563 }
564
565 return _FALSE;
566 }
567
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)568 inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
569 {
570 void *p;
571
572 if (match_mstat_sniff_rules(flags, sz))
573 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
574
575 p = _rtw_vmalloc((sz));
576
577 rtw_mstat_update(
578 flags
579 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
580 , sz
581 );
582
583 return p;
584 }
585
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)586 inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
587 {
588 void *p;
589
590 if (match_mstat_sniff_rules(flags, sz))
591 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
592
593 p = _rtw_zvmalloc((sz));
594
595 rtw_mstat_update(
596 flags
597 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
598 , sz
599 );
600
601 return p;
602 }
603
dbg_rtw_vmfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)604 inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
605 {
606
607 if (match_mstat_sniff_rules(flags, sz))
608 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
609
610 _rtw_vmfree((pbuf), (sz));
611
612 rtw_mstat_update(
613 flags
614 , MSTAT_FREE
615 , sz
616 );
617 }
618
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)619 inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
620 {
621 void *p;
622
623 if (match_mstat_sniff_rules(flags, sz))
624 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
625
626 p = _rtw_malloc((sz));
627
628 rtw_mstat_update(
629 flags
630 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
631 , sz
632 );
633
634 return p;
635 }
636
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)637 inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
638 {
639 void *p;
640
641 if (match_mstat_sniff_rules(flags, sz))
642 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
643
644 p = _rtw_zmalloc((sz));
645
646 rtw_mstat_update(
647 flags
648 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
649 , sz
650 );
651
652 return p;
653 }
654
dbg_rtw_mfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)655 inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
656 {
657 if (match_mstat_sniff_rules(flags, sz))
658 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
659
660 _rtw_mfree((pbuf), (sz));
661
662 rtw_mstat_update(
663 flags
664 , MSTAT_FREE
665 , sz
666 );
667 }
668
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)669 inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
670 {
671 struct sk_buff *skb;
672 unsigned int truesize = 0;
673
674 skb = _rtw_skb_alloc(size);
675
676 if (skb)
677 truesize = skb->truesize;
678
679 if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
680 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
681
682 rtw_mstat_update(
683 flags
684 , skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
685 , truesize
686 );
687
688 return skb;
689 }
690
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)691 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
692 {
693 unsigned int truesize = skb->truesize;
694
695 if (match_mstat_sniff_rules(flags, truesize))
696 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
697
698 _rtw_skb_free(skb);
699
700 rtw_mstat_update(
701 flags
702 , MSTAT_FREE
703 , truesize
704 );
705 }
706
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)707 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
708 {
709 struct sk_buff *skb_cp;
710 unsigned int truesize = skb->truesize;
711 unsigned int cp_truesize = 0;
712
713 skb_cp = _rtw_skb_copy(skb);
714 if (skb_cp)
715 cp_truesize = skb_cp->truesize;
716
717 if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
718 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
719
720 rtw_mstat_update(
721 flags
722 , skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
723 , cp_truesize
724 );
725
726 return skb_cp;
727 }
728
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)729 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
730 {
731 struct sk_buff *skb_cl;
732 unsigned int truesize = skb->truesize;
733 unsigned int cl_truesize = 0;
734
735 skb_cl = _rtw_skb_clone(skb);
736 if (skb_cl)
737 cl_truesize = skb_cl->truesize;
738
739 if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
740 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
741
742 rtw_mstat_update(
743 flags
744 , skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
745 , cl_truesize
746 );
747
748 return skb_cl;
749 }
750
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)751 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
752 {
753 int ret;
754 unsigned int truesize = skb->truesize;
755
756 if (match_mstat_sniff_rules(flags, truesize))
757 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
758
759 ret = _rtw_netif_rx(ndev, skb);
760
761 rtw_mstat_update(
762 flags
763 , MSTAT_FREE
764 , truesize
765 );
766
767 return ret;
768 }
769
770 #ifdef CONFIG_RTW_NAPI
dbg_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)771 inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
772 {
773 int ret;
774 unsigned int truesize = skb->truesize;
775
776 if (match_mstat_sniff_rules(flags, truesize))
777 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
778
779 ret = _rtw_netif_receive_skb(ndev, skb);
780
781 rtw_mstat_update(
782 flags
783 , MSTAT_FREE
784 , truesize
785 );
786
787 return ret;
788 }
789
790 #ifdef CONFIG_RTW_GRO
dbg_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)791 inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
792 {
793 int ret;
794 unsigned int truesize = skb->truesize;
795
796 if (match_mstat_sniff_rules(flags, truesize))
797 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
798
799 ret = _rtw_napi_gro_receive(napi, skb);
800
801 rtw_mstat_update(
802 flags
803 , MSTAT_FREE
804 , truesize
805 );
806
807 return ret;
808 }
809 #endif /* CONFIG_RTW_GRO */
810 #endif /* CONFIG_RTW_NAPI */
811
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)812 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
813 {
814 struct sk_buff *skb;
815
816 while ((skb = skb_dequeue(list)) != NULL)
817 dbg_rtw_skb_free(skb, flags, func, line);
818 }
819
820 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)821 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
822 {
823 void *p;
824
825 if (match_mstat_sniff_rules(flags, size))
826 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
827
828 p = _rtw_usb_buffer_alloc(dev, size, dma);
829
830 rtw_mstat_update(
831 flags
832 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
833 , size
834 );
835
836 return p;
837 }
838
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)839 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
840 {
841
842 if (match_mstat_sniff_rules(flags, size))
843 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
844
845 _rtw_usb_buffer_free(dev, size, addr, dma);
846
847 rtw_mstat_update(
848 flags
849 , MSTAT_FREE
850 , size
851 );
852 }
853 #endif /* CONFIG_USB_HCI */
854
855 #endif /* defined(DBG_MEM_ALLOC) */
856
rtw_malloc2d(int h,int w,size_t size)857 void *rtw_malloc2d(int h, int w, size_t size)
858 {
859 int j;
860
861 void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
862 if (a == NULL) {
863 RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
864 return NULL;
865 }
866
867 for (j = 0; j < h; j++)
868 a[j] = ((char *)(a + h)) + j * w * size;
869
870 return a;
871 }
872
rtw_mfree2d(void * pbuf,int h,int w,int size)873 void rtw_mfree2d(void *pbuf, int h, int w, int size)
874 {
875 rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
876 }
877
rtw_os_pkt_free(_pkt * pkt)878 inline void rtw_os_pkt_free(_pkt *pkt)
879 {
880 #if defined(PLATFORM_LINUX)
881 rtw_skb_free(pkt);
882 #elif defined(PLATFORM_FREEBSD)
883 m_freem(pkt);
884 #else
885 #error "TBD\n"
886 #endif
887 }
888
rtw_os_pkt_copy(_pkt * pkt)889 inline _pkt *rtw_os_pkt_copy(_pkt *pkt)
890 {
891 #if defined(PLATFORM_LINUX)
892 return rtw_skb_copy(pkt);
893 #elif defined(PLATFORM_FREEBSD)
894 return m_dup(pkt, M_NOWAIT);
895 #else
896 #error "TBD\n"
897 #endif
898 }
899
rtw_os_pkt_data(_pkt * pkt)900 inline void *rtw_os_pkt_data(_pkt *pkt)
901 {
902 #if defined(PLATFORM_LINUX)
903 return pkt->data;
904 #elif defined(PLATFORM_FREEBSD)
905 return pkt->m_data;
906 #else
907 #error "TBD\n"
908 #endif
909 }
910
rtw_os_pkt_len(_pkt * pkt)911 inline u32 rtw_os_pkt_len(_pkt *pkt)
912 {
913 #if defined(PLATFORM_LINUX)
914 return pkt->len;
915 #elif defined(PLATFORM_FREEBSD)
916 return pkt->m_pkthdr.len;
917 #else
918 #error "TBD\n"
919 #endif
920 }
921
_rtw_memcpy(void * dst,const void * src,u32 sz)922 void _rtw_memcpy(void *dst, const void *src, u32 sz)
923 {
924
925 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
926
927 memcpy(dst, src, sz);
928
929 #endif
930
931 #ifdef PLATFORM_WINDOWS
932
933 NdisMoveMemory(dst, src, sz);
934
935 #endif
936
937 }
938
_rtw_memmove(void * dst,const void * src,u32 sz)939 inline void _rtw_memmove(void *dst, const void *src, u32 sz)
940 {
941 #if defined(PLATFORM_LINUX)
942 memmove(dst, src, sz);
943 #else
944 #error "TBD\n"
945 #endif
946 }
947
_rtw_memcmp(const void * dst,const void * src,u32 sz)948 int _rtw_memcmp(const void *dst, const void *src, u32 sz)
949 {
950
951 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
952 /* under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 */
953
954 if (!(memcmp(dst, src, sz)))
955 return _TRUE;
956 else
957 return _FALSE;
958 #endif
959
960
961 #ifdef PLATFORM_WINDOWS
962 /* under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 */
963
964 if (NdisEqualMemory(dst, src, sz))
965 return _TRUE;
966 else
967 return _FALSE;
968
969 #endif
970
971
972
973 }
974
_rtw_memcmp2(const void * dst,const void * src,u32 sz)975 int _rtw_memcmp2(const void *dst, const void *src, u32 sz)
976 {
977 const unsigned char *p1 = dst, *p2 = src;
978
979 if (sz == 0)
980 return 0;
981
982 while (*p1 == *p2) {
983 p1++;
984 p2++;
985 sz--;
986 if (sz == 0)
987 return 0;
988 }
989
990 return *p1 - *p2;
991 }
992
_rtw_memset(void * pbuf,int c,u32 sz)993 void _rtw_memset(void *pbuf, int c, u32 sz)
994 {
995
996 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
997
998 memset(pbuf, c, sz);
999
1000 #endif
1001
1002 #ifdef PLATFORM_WINDOWS
1003 #if 0
1004 NdisZeroMemory(pbuf, sz);
1005 if (c != 0)
1006 memset(pbuf, c, sz);
1007 #else
1008 NdisFillMemory(pbuf, sz, c);
1009 #endif
1010 #endif
1011
1012 }
1013
1014 #ifdef PLATFORM_FREEBSD
__list_add(_list * pnew,_list * pprev,_list * pnext)1015 static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
1016 {
1017 pnext->prev = pnew;
1018 pnew->next = pnext;
1019 pnew->prev = pprev;
1020 pprev->next = pnew;
1021 }
1022 #endif /* PLATFORM_FREEBSD */
1023
1024
_rtw_init_listhead(_list * list)1025 void _rtw_init_listhead(_list *list)
1026 {
1027
1028 #ifdef PLATFORM_LINUX
1029
1030 INIT_LIST_HEAD(list);
1031
1032 #endif
1033
1034 #ifdef PLATFORM_FREEBSD
1035 list->next = list;
1036 list->prev = list;
1037 #endif
1038 #ifdef PLATFORM_WINDOWS
1039
1040 NdisInitializeListHead(list);
1041
1042 #endif
1043
1044 }
1045
1046
1047 /*
1048 For the following list_xxx operations,
1049 caller must guarantee the atomic context.
1050 Otherwise, there will be racing condition.
1051 */
rtw_is_list_empty(_list * phead)1052 u32 rtw_is_list_empty(_list *phead)
1053 {
1054
1055 #ifdef PLATFORM_LINUX
1056
1057 if (list_empty(phead))
1058 return _TRUE;
1059 else
1060 return _FALSE;
1061
1062 #endif
1063 #ifdef PLATFORM_FREEBSD
1064
1065 if (phead->next == phead)
1066 return _TRUE;
1067 else
1068 return _FALSE;
1069
1070 #endif
1071
1072
1073 #ifdef PLATFORM_WINDOWS
1074
1075 if (IsListEmpty(phead))
1076 return _TRUE;
1077 else
1078 return _FALSE;
1079
1080 #endif
1081
1082
1083 }
1084
rtw_list_insert_head(_list * plist,_list * phead)1085 void rtw_list_insert_head(_list *plist, _list *phead)
1086 {
1087
1088 #ifdef PLATFORM_LINUX
1089 list_add(plist, phead);
1090 #endif
1091
1092 #ifdef PLATFORM_FREEBSD
1093 __list_add(plist, phead, phead->next);
1094 #endif
1095
1096 #ifdef PLATFORM_WINDOWS
1097 InsertHeadList(phead, plist);
1098 #endif
1099 }
1100
rtw_list_insert_tail(_list * plist,_list * phead)1101 void rtw_list_insert_tail(_list *plist, _list *phead)
1102 {
1103
1104 #ifdef PLATFORM_LINUX
1105
1106 list_add_tail(plist, phead);
1107
1108 #endif
1109 #ifdef PLATFORM_FREEBSD
1110
1111 __list_add(plist, phead->prev, phead);
1112
1113 #endif
1114 #ifdef PLATFORM_WINDOWS
1115
1116 InsertTailList(phead, plist);
1117
1118 #endif
1119
1120 }
1121
rtw_list_splice(_list * list,_list * head)1122 inline void rtw_list_splice(_list *list, _list *head)
1123 {
1124 #ifdef PLATFORM_LINUX
1125 list_splice(list, head);
1126 #else
1127 #error "TBD\n"
1128 #endif
1129 }
1130
rtw_list_splice_init(_list * list,_list * head)1131 inline void rtw_list_splice_init(_list *list, _list *head)
1132 {
1133 #ifdef PLATFORM_LINUX
1134 list_splice_init(list, head);
1135 #else
1136 #error "TBD\n"
1137 #endif
1138 }
1139
rtw_list_splice_tail(_list * list,_list * head)1140 inline void rtw_list_splice_tail(_list *list, _list *head)
1141 {
1142 #ifdef PLATFORM_LINUX
1143 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
1144 if (!list_empty(list))
1145 __list_splice(list, head);
1146 #else
1147 list_splice_tail(list, head);
1148 #endif
1149 #else
1150 #error "TBD\n"
1151 #endif
1152 }
1153
rtw_hlist_head_init(rtw_hlist_head * h)1154 inline void rtw_hlist_head_init(rtw_hlist_head *h)
1155 {
1156 #ifdef PLATFORM_LINUX
1157 INIT_HLIST_HEAD(h);
1158 #else
1159 #error "TBD\n"
1160 #endif
1161 }
1162
rtw_hlist_add_head(rtw_hlist_node * n,rtw_hlist_head * h)1163 inline void rtw_hlist_add_head(rtw_hlist_node *n, rtw_hlist_head *h)
1164 {
1165 #ifdef PLATFORM_LINUX
1166 hlist_add_head(n, h);
1167 #else
1168 #error "TBD\n"
1169 #endif
1170 }
1171
rtw_hlist_del(rtw_hlist_node * n)1172 inline void rtw_hlist_del(rtw_hlist_node *n)
1173 {
1174 #ifdef PLATFORM_LINUX
1175 hlist_del(n);
1176 #else
1177 #error "TBD\n"
1178 #endif
1179 }
1180
rtw_hlist_add_head_rcu(rtw_hlist_node * n,rtw_hlist_head * h)1181 inline void rtw_hlist_add_head_rcu(rtw_hlist_node *n, rtw_hlist_head *h)
1182 {
1183 #ifdef PLATFORM_LINUX
1184 hlist_add_head_rcu(n, h);
1185 #else
1186 #error "TBD\n"
1187 #endif
1188 }
1189
rtw_hlist_del_rcu(rtw_hlist_node * n)1190 inline void rtw_hlist_del_rcu(rtw_hlist_node *n)
1191 {
1192 #ifdef PLATFORM_LINUX
1193 hlist_del_rcu(n);
1194 #else
1195 #error "TBD\n"
1196 #endif
1197 }
1198
rtw_init_timer(_timer * ptimer,void * padapter,void * pfunc,void * ctx)1199 void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc, void *ctx)
1200 {
1201 _adapter *adapter = (_adapter *)padapter;
1202
1203 #ifdef PLATFORM_LINUX
1204 _init_timer(ptimer, adapter->pnetdev, pfunc, ctx);
1205 #endif
1206 #ifdef PLATFORM_FREEBSD
1207 _init_timer(ptimer, adapter->pifp, pfunc, ctx);
1208 #endif
1209 #ifdef PLATFORM_WINDOWS
1210 _init_timer(ptimer, adapter->hndis_adapter, pfunc, ctx);
1211 #endif
1212 }
1213
1214 /*
1215
1216 Caller must check if the list is empty before calling rtw_list_delete
1217
1218 */
1219
1220
_rtw_init_sema(_sema * sema,int init_val)1221 void _rtw_init_sema(_sema *sema, int init_val)
1222 {
1223
1224 #ifdef PLATFORM_LINUX
1225
1226 sema_init(sema, init_val);
1227
1228 #endif
1229 #ifdef PLATFORM_FREEBSD
1230 sema_init(sema, init_val, "rtw_drv");
1231 #endif
1232 #ifdef PLATFORM_OS_XP
1233
1234 KeInitializeSemaphore(sema, init_val, SEMA_UPBND); /* count=0; */
1235
1236 #endif
1237
1238 #ifdef PLATFORM_OS_CE
1239 if (*sema == NULL)
1240 *sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1241 #endif
1242
1243 }
1244
_rtw_free_sema(_sema * sema)1245 void _rtw_free_sema(_sema *sema)
1246 {
1247 #ifdef PLATFORM_FREEBSD
1248 sema_destroy(sema);
1249 #endif
1250 #ifdef PLATFORM_OS_CE
1251 CloseHandle(*sema);
1252 #endif
1253
1254 }
1255
_rtw_up_sema(_sema * sema)1256 void _rtw_up_sema(_sema *sema)
1257 {
1258
1259 #ifdef PLATFORM_LINUX
1260
1261 up(sema);
1262
1263 #endif
1264 #ifdef PLATFORM_FREEBSD
1265 sema_post(sema);
1266 #endif
1267 #ifdef PLATFORM_OS_XP
1268
1269 KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1, FALSE);
1270
1271 #endif
1272
1273 #ifdef PLATFORM_OS_CE
1274 ReleaseSemaphore(*sema, 1, NULL);
1275 #endif
1276 }
1277
_rtw_down_sema(_sema * sema)1278 u32 _rtw_down_sema(_sema *sema)
1279 {
1280
1281 #ifdef PLATFORM_LINUX
1282
1283 if (down_killable(sema))
1284 return _FAIL;
1285 else
1286 return _SUCCESS;
1287
1288 #endif
1289 #ifdef PLATFORM_FREEBSD
1290 sema_wait(sema);
1291 return _SUCCESS;
1292 #endif
1293 #ifdef PLATFORM_OS_XP
1294
1295 if (STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1296 return _SUCCESS;
1297 else
1298 return _FAIL;
1299 #endif
1300
1301 #ifdef PLATFORM_OS_CE
1302 if (WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE))
1303 return _SUCCESS;
1304 else
1305 return _FAIL;
1306 #endif
1307 }
1308
thread_exit(_completion * comp)1309 inline void thread_exit(_completion *comp)
1310 {
1311 #ifdef PLATFORM_LINUX
1312 complete_and_exit(comp, 0);
1313 #endif
1314
1315 #ifdef PLATFORM_FREEBSD
1316 printf("%s", "RTKTHREAD_exit");
1317 #endif
1318
1319 #ifdef PLATFORM_OS_CE
1320 ExitThread(STATUS_SUCCESS);
1321 #endif
1322
1323 #ifdef PLATFORM_OS_XP
1324 PsTerminateSystemThread(STATUS_SUCCESS);
1325 #endif
1326 }
1327
_rtw_init_completion(_completion * comp)1328 inline void _rtw_init_completion(_completion *comp)
1329 {
1330 #ifdef PLATFORM_LINUX
1331 init_completion(comp);
1332 #endif
1333 }
_rtw_wait_for_comp_timeout(_completion * comp)1334 inline void _rtw_wait_for_comp_timeout(_completion *comp)
1335 {
1336 #ifdef PLATFORM_LINUX
1337 wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1338 #endif
1339 }
_rtw_wait_for_comp(_completion * comp)1340 inline void _rtw_wait_for_comp(_completion *comp)
1341 {
1342 #ifdef PLATFORM_LINUX
1343 wait_for_completion(comp);
1344 #endif
1345 }
1346
_rtw_mutex_init(_mutex * pmutex)1347 void _rtw_mutex_init(_mutex *pmutex)
1348 {
1349 #ifdef PLATFORM_LINUX
1350
1351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1352 mutex_init(pmutex);
1353 #else
1354 init_MUTEX(pmutex);
1355 #endif
1356
1357 #endif
1358 #ifdef PLATFORM_FREEBSD
1359 mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
1360 #endif
1361 #ifdef PLATFORM_OS_XP
1362
1363 KeInitializeMutex(pmutex, 0);
1364
1365 #endif
1366
1367 #ifdef PLATFORM_OS_CE
1368 *pmutex = CreateMutex(NULL, _FALSE, NULL);
1369 #endif
1370 }
1371
1372 void _rtw_mutex_free(_mutex *pmutex);
_rtw_mutex_free(_mutex * pmutex)1373 void _rtw_mutex_free(_mutex *pmutex)
1374 {
1375 #ifdef PLATFORM_LINUX
1376
1377 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1378 mutex_destroy(pmutex);
1379 #else
1380 #endif
1381
1382 #ifdef PLATFORM_FREEBSD
1383 sema_destroy(pmutex);
1384 #endif
1385
1386 #endif
1387
1388 #ifdef PLATFORM_OS_XP
1389
1390 #endif
1391
1392 #ifdef PLATFORM_OS_CE
1393
1394 #endif
1395 }
1396
_rtw_spinlock_init(_lock * plock)1397 void _rtw_spinlock_init(_lock *plock)
1398 {
1399
1400 #ifdef PLATFORM_LINUX
1401
1402 spin_lock_init(plock);
1403
1404 #endif
1405 #ifdef PLATFORM_FREEBSD
1406 mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
1407 #endif
1408 #ifdef PLATFORM_WINDOWS
1409
1410 NdisAllocateSpinLock(plock);
1411
1412 #endif
1413
1414 }
1415
_rtw_spinlock_free(_lock * plock)1416 void _rtw_spinlock_free(_lock *plock)
1417 {
1418 #ifdef PLATFORM_FREEBSD
1419 mtx_destroy(plock);
1420 #endif
1421
1422 #ifdef PLATFORM_WINDOWS
1423
1424 NdisFreeSpinLock(plock);
1425
1426 #endif
1427
1428 }
1429 #ifdef PLATFORM_FREEBSD
1430 extern PADAPTER prtw_lock;
1431
rtw_mtx_lock(_lock * plock)1432 void rtw_mtx_lock(_lock *plock)
1433 {
1434 if (prtw_lock)
1435 mtx_lock(&prtw_lock->glock);
1436 else
1437 printf("%s prtw_lock==NULL", __FUNCTION__);
1438 }
rtw_mtx_unlock(_lock * plock)1439 void rtw_mtx_unlock(_lock *plock)
1440 {
1441 if (prtw_lock)
1442 mtx_unlock(&prtw_lock->glock);
1443 else
1444 printf("%s prtw_lock==NULL", __FUNCTION__);
1445
1446 }
1447 #endif /* PLATFORM_FREEBSD */
1448
1449
_rtw_spinlock(_lock * plock)1450 void _rtw_spinlock(_lock *plock)
1451 {
1452
1453 #ifdef PLATFORM_LINUX
1454
1455 spin_lock(plock);
1456
1457 #endif
1458 #ifdef PLATFORM_FREEBSD
1459 mtx_lock(plock);
1460 #endif
1461 #ifdef PLATFORM_WINDOWS
1462
1463 NdisAcquireSpinLock(plock);
1464
1465 #endif
1466
1467 }
1468
_rtw_spinunlock(_lock * plock)1469 void _rtw_spinunlock(_lock *plock)
1470 {
1471
1472 #ifdef PLATFORM_LINUX
1473
1474 spin_unlock(plock);
1475
1476 #endif
1477 #ifdef PLATFORM_FREEBSD
1478 mtx_unlock(plock);
1479 #endif
1480 #ifdef PLATFORM_WINDOWS
1481
1482 NdisReleaseSpinLock(plock);
1483
1484 #endif
1485 }
1486
1487
_rtw_spinlock_ex(_lock * plock)1488 void _rtw_spinlock_ex(_lock *plock)
1489 {
1490
1491 #ifdef PLATFORM_LINUX
1492
1493 spin_lock(plock);
1494
1495 #endif
1496 #ifdef PLATFORM_FREEBSD
1497 mtx_lock(plock);
1498 #endif
1499 #ifdef PLATFORM_WINDOWS
1500
1501 NdisDprAcquireSpinLock(plock);
1502
1503 #endif
1504
1505 }
1506
_rtw_spinunlock_ex(_lock * plock)1507 void _rtw_spinunlock_ex(_lock *plock)
1508 {
1509
1510 #ifdef PLATFORM_LINUX
1511
1512 spin_unlock(plock);
1513
1514 #endif
1515 #ifdef PLATFORM_FREEBSD
1516 mtx_unlock(plock);
1517 #endif
1518 #ifdef PLATFORM_WINDOWS
1519
1520 NdisDprReleaseSpinLock(plock);
1521
1522 #endif
1523 }
1524
1525
1526
_rtw_init_queue(_queue * pqueue)1527 void _rtw_init_queue(_queue *pqueue)
1528 {
1529 _rtw_init_listhead(&(pqueue->queue));
1530 _rtw_spinlock_init(&(pqueue->lock));
1531 }
1532
_rtw_deinit_queue(_queue * pqueue)1533 void _rtw_deinit_queue(_queue *pqueue)
1534 {
1535 _rtw_spinlock_free(&(pqueue->lock));
1536 }
1537
_rtw_queue_empty(_queue * pqueue)1538 u32 _rtw_queue_empty(_queue *pqueue)
1539 {
1540 return rtw_is_list_empty(&(pqueue->queue));
1541 }
1542
1543
rtw_end_of_queue_search(_list * head,_list * plist)1544 u32 rtw_end_of_queue_search(_list *head, _list *plist)
1545 {
1546 if (head == plist)
1547 return _TRUE;
1548 else
1549 return _FALSE;
1550 }
1551
1552
_rtw_get_current_time(void)1553 systime _rtw_get_current_time(void)
1554 {
1555
1556 #ifdef PLATFORM_LINUX
1557 return jiffies;
1558 #endif
1559 #ifdef PLATFORM_FREEBSD
1560 struct timeval tvp;
1561 getmicrotime(&tvp);
1562 return tvp.tv_sec;
1563 #endif
1564 #ifdef PLATFORM_WINDOWS
1565 LARGE_INTEGER SystemTime;
1566 NdisGetCurrentSystemTime(&SystemTime);
1567 return SystemTime.LowPart;/* count of 100-nanosecond intervals */
1568 #endif
1569 }
1570
_rtw_systime_to_ms(systime stime)1571 inline u32 _rtw_systime_to_ms(systime stime)
1572 {
1573 #ifdef PLATFORM_LINUX
1574 return jiffies_to_msecs(stime);
1575 #endif
1576 #ifdef PLATFORM_FREEBSD
1577 return stime * 1000;
1578 #endif
1579 #ifdef PLATFORM_WINDOWS
1580 return stime / 10000 ;
1581 #endif
1582 }
1583
_rtw_ms_to_systime(u32 ms)1584 inline systime _rtw_ms_to_systime(u32 ms)
1585 {
1586 #ifdef PLATFORM_LINUX
1587 return msecs_to_jiffies(ms);
1588 #endif
1589 #ifdef PLATFORM_FREEBSD
1590 return ms / 1000;
1591 #endif
1592 #ifdef PLATFORM_WINDOWS
1593 return ms * 10000 ;
1594 #endif
1595 }
1596
_rtw_us_to_systime(u32 us)1597 inline systime _rtw_us_to_systime(u32 us)
1598 {
1599 #ifdef PLATFORM_LINUX
1600 return usecs_to_jiffies(us);
1601 #else
1602 #error "TBD\n"
1603 #endif
1604 }
1605
1606 /* the input parameter start use the same unit as returned by rtw_get_current_time */
_rtw_get_passing_time_ms(systime start)1607 inline s32 _rtw_get_passing_time_ms(systime start)
1608 {
1609 return _rtw_systime_to_ms(_rtw_get_current_time() - start);
1610 }
1611
_rtw_get_remaining_time_ms(systime end)1612 inline s32 _rtw_get_remaining_time_ms(systime end)
1613 {
1614 return _rtw_systime_to_ms(end - _rtw_get_current_time());
1615 }
1616
_rtw_get_time_interval_ms(systime start,systime end)1617 inline s32 _rtw_get_time_interval_ms(systime start, systime end)
1618 {
1619 return _rtw_systime_to_ms(end - start);
1620 }
1621
_rtw_time_after(systime a,systime b)1622 inline bool _rtw_time_after(systime a, systime b)
1623 {
1624 #ifdef PLATFORM_LINUX
1625 return time_after(a, b);
1626 #else
1627 #error "TBD\n"
1628 #endif
1629 }
1630
rtw_sptime_get(void)1631 sysptime rtw_sptime_get(void)
1632 {
1633 /* CLOCK_MONOTONIC */
1634 #ifdef PLATFORM_LINUX
1635 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
1636 struct timespec64 cur;
1637
1638 ktime_get_ts64(&cur);
1639 return timespec64_to_ktime(cur);
1640 #else
1641 struct timespec cur;
1642
1643 ktime_get_ts(&cur);
1644 return timespec_to_ktime(cur);
1645 #endif
1646 #else
1647 #error "TBD\n"
1648 #endif
1649 }
1650
rtw_sptime_set(s64 secs,const u32 nsecs)1651 sysptime rtw_sptime_set(s64 secs, const u32 nsecs)
1652 {
1653 #ifdef PLATFORM_LINUX
1654 return ktime_set(secs, nsecs);
1655 #else
1656 #error "TBD\n"
1657 #endif
1658 }
1659
rtw_sptime_zero(void)1660 sysptime rtw_sptime_zero(void)
1661 {
1662 #ifdef PLATFORM_LINUX
1663 return ktime_set(0, 0);
1664 #else
1665 #error "TBD\n"
1666 #endif
1667 }
1668
1669 /*
1670 * cmp1 < cmp2: return <0
1671 * cmp1 == cmp2: return 0
1672 * cmp1 > cmp2: return >0
1673 */
rtw_sptime_cmp(const sysptime cmp1,const sysptime cmp2)1674 int rtw_sptime_cmp(const sysptime cmp1, const sysptime cmp2)
1675 {
1676 #ifdef PLATFORM_LINUX
1677 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
1678 return ktime_compare(cmp1, cmp2);
1679 #else
1680 if (cmp1.tv64 < cmp2.tv64)
1681 return -1;
1682 if (cmp1.tv64 > cmp2.tv64)
1683 return 1;
1684 return 0;
1685 #endif
1686 #else
1687 #error "TBD\n"
1688 #endif
1689 }
1690
rtw_sptime_eql(const sysptime cmp1,const sysptime cmp2)1691 bool rtw_sptime_eql(const sysptime cmp1, const sysptime cmp2)
1692 {
1693 #ifdef PLATFORM_LINUX
1694 return rtw_sptime_cmp(cmp1, cmp2) == 0;
1695 #else
1696 #error "TBD\n"
1697 #endif
1698 }
1699
rtw_sptime_is_zero(const sysptime sptime)1700 bool rtw_sptime_is_zero(const sysptime sptime)
1701 {
1702 #ifdef PLATFORM_LINUX
1703 return rtw_sptime_cmp(sptime, rtw_sptime_zero()) == 0;
1704 #else
1705 #error "TBD\n"
1706 #endif
1707 }
1708
1709 /*
1710 * sub = lhs - rhs, in normalized form
1711 */
rtw_sptime_sub(const sysptime lhs,const sysptime rhs)1712 sysptime rtw_sptime_sub(const sysptime lhs, const sysptime rhs)
1713 {
1714 #ifdef PLATFORM_LINUX
1715 return ktime_sub(lhs, rhs);
1716 #else
1717 #error "TBD\n"
1718 #endif
1719 }
1720
1721 /*
1722 * add = lhs + rhs, in normalized form
1723 */
rtw_sptime_add(const sysptime lhs,const sysptime rhs)1724 sysptime rtw_sptime_add(const sysptime lhs, const sysptime rhs)
1725 {
1726 #ifdef PLATFORM_LINUX
1727 return ktime_add(lhs, rhs);
1728 #else
1729 #error "TBD\n"
1730 #endif
1731 }
1732
rtw_sptime_to_ms(const sysptime sptime)1733 s64 rtw_sptime_to_ms(const sysptime sptime)
1734 {
1735 #ifdef PLATFORM_LINUX
1736 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
1737 return ktime_to_ms(sptime);
1738 #else
1739 struct timeval tv = ktime_to_timeval(sptime);
1740
1741 return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
1742 #endif
1743 #else
1744 #error "TBD\n"
1745 #endif
1746 }
1747
rtw_ms_to_sptime(u64 ms)1748 sysptime rtw_ms_to_sptime(u64 ms)
1749 {
1750 #ifdef PLATFORM_LINUX
1751 return ns_to_ktime(ms * NSEC_PER_MSEC);
1752 #else
1753 #error "TBD\n"
1754 #endif
1755 }
1756
rtw_sptime_to_us(const sysptime sptime)1757 s64 rtw_sptime_to_us(const sysptime sptime)
1758 {
1759 #ifdef PLATFORM_LINUX
1760 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
1761 return ktime_to_us(sptime);
1762 #else
1763 struct timeval tv = ktime_to_timeval(sptime);
1764
1765 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
1766 #endif
1767 #else
1768 #error "TBD\n"
1769 #endif
1770 }
1771
rtw_us_to_sptime(u64 us)1772 sysptime rtw_us_to_sptime(u64 us)
1773 {
1774 #ifdef PLATFORM_LINUX
1775 return ns_to_ktime(us * NSEC_PER_USEC);
1776 #else
1777 #error "TBD\n"
1778 #endif
1779 }
1780
rtw_sptime_to_ns(const sysptime sptime)1781 s64 rtw_sptime_to_ns(const sysptime sptime)
1782 {
1783 #ifdef PLATFORM_LINUX
1784 return ktime_to_ns(sptime);
1785 #else
1786 #error "TBD\n"
1787 #endif
1788 }
1789
rtw_ns_to_sptime(u64 ns)1790 sysptime rtw_ns_to_sptime(u64 ns)
1791 {
1792 #ifdef PLATFORM_LINUX
1793 return ns_to_ktime(ns);
1794 #else
1795 #error "TBD\n"
1796 #endif
1797 }
1798
rtw_sptime_diff_ms(const sysptime start,const sysptime end)1799 s64 rtw_sptime_diff_ms(const sysptime start, const sysptime end)
1800 {
1801 sysptime diff;
1802
1803 diff = rtw_sptime_sub(end, start);
1804
1805 return rtw_sptime_to_ms(diff);
1806 }
1807
rtw_sptime_pass_ms(const sysptime start)1808 s64 rtw_sptime_pass_ms(const sysptime start)
1809 {
1810 sysptime cur, diff;
1811
1812 cur = rtw_sptime_get();
1813 diff = rtw_sptime_sub(cur, start);
1814
1815 return rtw_sptime_to_ms(diff);
1816 }
1817
rtw_sptime_diff_us(const sysptime start,const sysptime end)1818 s64 rtw_sptime_diff_us(const sysptime start, const sysptime end)
1819 {
1820 sysptime diff;
1821
1822 diff = rtw_sptime_sub(end, start);
1823
1824 return rtw_sptime_to_us(diff);
1825 }
1826
rtw_sptime_pass_us(const sysptime start)1827 s64 rtw_sptime_pass_us(const sysptime start)
1828 {
1829 sysptime cur, diff;
1830
1831 cur = rtw_sptime_get();
1832 diff = rtw_sptime_sub(cur, start);
1833
1834 return rtw_sptime_to_us(diff);
1835 }
1836
rtw_sptime_diff_ns(const sysptime start,const sysptime end)1837 s64 rtw_sptime_diff_ns(const sysptime start, const sysptime end)
1838 {
1839 sysptime diff;
1840
1841 diff = rtw_sptime_sub(end, start);
1842
1843 return rtw_sptime_to_ns(diff);
1844 }
1845
rtw_sptime_pass_ns(const sysptime start)1846 s64 rtw_sptime_pass_ns(const sysptime start)
1847 {
1848 sysptime cur, diff;
1849
1850 cur = rtw_sptime_get();
1851 diff = rtw_sptime_sub(cur, start);
1852
1853 return rtw_sptime_to_ns(diff);
1854 }
1855
rtw_sleep_schedulable(int ms)1856 void rtw_sleep_schedulable(int ms)
1857 {
1858
1859 #ifdef PLATFORM_LINUX
1860
1861 u32 delta;
1862
1863 delta = (ms * HZ) / 1000; /* (ms) */
1864 if (delta == 0) {
1865 delta = 1;/* 1 ms */
1866 }
1867 set_current_state(TASK_INTERRUPTIBLE);
1868 schedule_timeout(delta);
1869 return;
1870
1871 #endif
1872 #ifdef PLATFORM_FREEBSD
1873 DELAY(ms * 1000);
1874 return ;
1875 #endif
1876
1877 #ifdef PLATFORM_WINDOWS
1878
1879 NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
1880
1881 #endif
1882
1883 }
1884
1885
rtw_msleep_os(int ms)1886 void rtw_msleep_os(int ms)
1887 {
1888
1889 #ifdef PLATFORM_LINUX
1890 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1891 if (ms < 20) {
1892 unsigned long us = ms * 1000UL;
1893 usleep_range(us, us + 1000UL);
1894 } else
1895 #endif
1896 msleep((unsigned int)ms);
1897
1898 #endif
1899 #ifdef PLATFORM_FREEBSD
1900 /* Delay for delay microseconds */
1901 DELAY(ms * 1000);
1902 return ;
1903 #endif
1904 #ifdef PLATFORM_WINDOWS
1905
1906 NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
1907
1908 #endif
1909
1910
1911 }
rtw_usleep_os(int us)1912 void rtw_usleep_os(int us)
1913 {
1914 #ifdef PLATFORM_LINUX
1915
1916 /* msleep((unsigned int)us); */
1917 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1918 usleep_range(us, us + 1);
1919 #else
1920 if (1 < (us / 1000))
1921 msleep(1);
1922 else
1923 msleep((us / 1000) + 1);
1924 #endif
1925 #endif
1926
1927 #ifdef PLATFORM_FREEBSD
1928 /* Delay for delay microseconds */
1929 DELAY(us);
1930
1931 return ;
1932 #endif
1933 #ifdef PLATFORM_WINDOWS
1934
1935 NdisMSleep(us); /* (us) */
1936
1937 #endif
1938
1939
1940 }
1941
1942
1943 #ifdef DBG_DELAY_OS
_rtw_mdelay_os(int ms,const char * func,const int line)1944 void _rtw_mdelay_os(int ms, const char *func, const int line)
1945 {
1946 #if 0
1947 if (ms > 10)
1948 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1949 rtw_msleep_os(ms);
1950 return;
1951 #endif
1952
1953
1954 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
1955
1956 #if defined(PLATFORM_LINUX)
1957
1958 mdelay((unsigned long)ms);
1959
1960 #elif defined(PLATFORM_WINDOWS)
1961
1962 NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
1963
1964 #endif
1965
1966
1967 }
_rtw_udelay_os(int us,const char * func,const int line)1968 void _rtw_udelay_os(int us, const char *func, const int line)
1969 {
1970
1971 #if 0
1972 if (us > 1000) {
1973 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1974 rtw_usleep_os(us);
1975 return;
1976 }
1977 #endif
1978
1979
1980 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
1981
1982
1983 #if defined(PLATFORM_LINUX)
1984
1985 udelay((unsigned long)us);
1986
1987 #elif defined(PLATFORM_WINDOWS)
1988
1989 NdisStallExecution(us); /* (us) */
1990
1991 #endif
1992
1993 }
1994 #else
rtw_mdelay_os(int ms)1995 void rtw_mdelay_os(int ms)
1996 {
1997
1998 #ifdef PLATFORM_LINUX
1999
2000 mdelay((unsigned long)ms);
2001
2002 #endif
2003 #ifdef PLATFORM_FREEBSD
2004 DELAY(ms * 1000);
2005 return ;
2006 #endif
2007 #ifdef PLATFORM_WINDOWS
2008
2009 NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2010
2011 #endif
2012
2013
2014 }
rtw_udelay_os(int us)2015 void rtw_udelay_os(int us)
2016 {
2017
2018 #ifdef PLATFORM_LINUX
2019
2020 udelay((unsigned long)us);
2021
2022 #endif
2023 #ifdef PLATFORM_FREEBSD
2024 /* Delay for delay microseconds */
2025 DELAY(us);
2026 return ;
2027 #endif
2028 #ifdef PLATFORM_WINDOWS
2029
2030 NdisStallExecution(us); /* (us) */
2031
2032 #endif
2033
2034 }
2035 #endif
2036
rtw_yield_os(void)2037 void rtw_yield_os(void)
2038 {
2039 #ifdef PLATFORM_LINUX
2040 yield();
2041 #endif
2042 #ifdef PLATFORM_FREEBSD
2043 yield();
2044 #endif
2045 #ifdef PLATFORM_WINDOWS
2046 SwitchToThread();
2047 #endif
2048 }
2049
2050 const char *_rtw_pwait_type_str[] = {
2051 [RTW_PWAIT_TYPE_MSLEEP] = "MS",
2052 [RTW_PWAIT_TYPE_USLEEP] = "US",
2053 [RTW_PWAIT_TYPE_YIELD] = "Y",
2054 [RTW_PWAIT_TYPE_MDELAY] = "MD",
2055 [RTW_PWAIT_TYPE_UDELAY] = "UD",
2056 [RTW_PWAIT_TYPE_NUM] = "unknown",
2057 };
2058
rtw_pwctx_yield(int us)2059 static void rtw_pwctx_yield(int us)
2060 {
2061 rtw_yield_os();
2062 }
2063
2064 static void (*const rtw_pwait_hdl[])(int)= {
2065 [RTW_PWAIT_TYPE_MSLEEP] = rtw_msleep_os,
2066 [RTW_PWAIT_TYPE_USLEEP] = rtw_usleep_os,
2067 [RTW_PWAIT_TYPE_YIELD] = rtw_pwctx_yield,
2068 [RTW_PWAIT_TYPE_MDELAY] = rtw_mdelay_os,
2069 [RTW_PWAIT_TYPE_UDELAY] = rtw_udelay_os,
2070 };
2071
rtw_pwctx_config(struct rtw_pwait_ctx * pwctx,enum rtw_pwait_type type,s32 time,s32 cnt_lmt)2072 int rtw_pwctx_config(struct rtw_pwait_ctx *pwctx, enum rtw_pwait_type type, s32 time, s32 cnt_lmt)
2073 {
2074 int ret = _FAIL;
2075
2076 if (!RTW_PWAIT_TYPE_VALID(type))
2077 goto exit;
2078
2079 pwctx->conf.type = type;
2080 pwctx->conf.wait_time = time;
2081 pwctx->conf.wait_cnt_lmt = cnt_lmt;
2082 pwctx->wait_hdl = rtw_pwait_hdl[type];
2083
2084 ret = _SUCCESS;
2085
2086 exit:
2087 return ret;
2088 }
2089
rtw_macaddr_is_larger(const u8 * a,const u8 * b)2090 bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
2091 {
2092 u32 va, vb;
2093
2094 va = be32_to_cpu(*((u32 *)a));
2095 vb = be32_to_cpu(*((u32 *)b));
2096 if (va > vb)
2097 return 1;
2098 else if (va < vb)
2099 return 0;
2100
2101 return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
2102 }
2103
2104 #define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
2105 #define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
2106 #define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
2107 #ifdef CONFIG_WAKELOCK
2108 static struct wake_lock rtw_suspend_lock;
2109 static struct wake_lock rtw_suspend_traffic_lock;
2110 static struct wake_lock rtw_suspend_resume_lock;
2111 #elif defined(CONFIG_ANDROID_POWER)
2112 static android_suspend_lock_t rtw_suspend_lock = {
2113 .name = RTW_SUSPEND_LOCK_NAME
2114 };
2115 static android_suspend_lock_t rtw_suspend_traffic_lock = {
2116 .name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
2117 };
2118 static android_suspend_lock_t rtw_suspend_resume_lock = {
2119 .name = RTW_SUSPEND_RESUME_LOCK_NAME
2120 };
2121 #endif
2122
rtw_suspend_lock_init(void)2123 inline void rtw_suspend_lock_init(void)
2124 {
2125 #ifdef CONFIG_WAKELOCK
2126 wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
2127 wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
2128 wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
2129 #elif defined(CONFIG_ANDROID_POWER)
2130 android_init_suspend_lock(&rtw_suspend_lock);
2131 android_init_suspend_lock(&rtw_suspend_traffic_lock);
2132 android_init_suspend_lock(&rtw_suspend_resume_lock);
2133 #endif
2134 }
2135
rtw_suspend_lock_uninit(void)2136 inline void rtw_suspend_lock_uninit(void)
2137 {
2138 #ifdef CONFIG_WAKELOCK
2139 wake_lock_destroy(&rtw_suspend_lock);
2140 wake_lock_destroy(&rtw_suspend_traffic_lock);
2141 wake_lock_destroy(&rtw_suspend_resume_lock);
2142 #elif defined(CONFIG_ANDROID_POWER)
2143 android_uninit_suspend_lock(&rtw_suspend_lock);
2144 android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
2145 android_uninit_suspend_lock(&rtw_suspend_resume_lock);
2146 #endif
2147 }
2148
rtw_lock_suspend(void)2149 inline void rtw_lock_suspend(void)
2150 {
2151 #ifdef CONFIG_WAKELOCK
2152 wake_lock(&rtw_suspend_lock);
2153 #elif defined(CONFIG_ANDROID_POWER)
2154 android_lock_suspend(&rtw_suspend_lock);
2155 #endif
2156
2157 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2158 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2159 #endif
2160 }
2161
rtw_unlock_suspend(void)2162 inline void rtw_unlock_suspend(void)
2163 {
2164 #ifdef CONFIG_WAKELOCK
2165 wake_unlock(&rtw_suspend_lock);
2166 #elif defined(CONFIG_ANDROID_POWER)
2167 android_unlock_suspend(&rtw_suspend_lock);
2168 #endif
2169
2170 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2171 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2172 #endif
2173 }
2174
rtw_resume_lock_suspend(void)2175 inline void rtw_resume_lock_suspend(void)
2176 {
2177 #ifdef CONFIG_WAKELOCK
2178 wake_lock(&rtw_suspend_resume_lock);
2179 #elif defined(CONFIG_ANDROID_POWER)
2180 android_lock_suspend(&rtw_suspend_resume_lock);
2181 #endif
2182
2183 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2184 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2185 #endif
2186 }
2187
rtw_resume_unlock_suspend(void)2188 inline void rtw_resume_unlock_suspend(void)
2189 {
2190 #ifdef CONFIG_WAKELOCK
2191 wake_unlock(&rtw_suspend_resume_lock);
2192 #elif defined(CONFIG_ANDROID_POWER)
2193 android_unlock_suspend(&rtw_suspend_resume_lock);
2194 #endif
2195
2196 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2197 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2198 #endif
2199 }
2200
rtw_lock_suspend_timeout(u32 timeout_ms)2201 inline void rtw_lock_suspend_timeout(u32 timeout_ms)
2202 {
2203 #ifdef CONFIG_WAKELOCK
2204 wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2205 #elif defined(CONFIG_ANDROID_POWER)
2206 android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2207 #endif
2208 }
2209
2210
rtw_lock_traffic_suspend_timeout(u32 timeout_ms)2211 inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
2212 {
2213 #ifdef CONFIG_WAKELOCK
2214 wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2215 #elif defined(CONFIG_ANDROID_POWER)
2216 android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2217 #endif
2218 /* RTW_INFO("traffic lock timeout:%d\n", timeout_ms); */
2219 }
2220
rtw_set_bit(int nr,unsigned long * addr)2221 inline void rtw_set_bit(int nr, unsigned long *addr)
2222 {
2223 #ifdef PLATFORM_LINUX
2224 set_bit(nr, addr);
2225 #else
2226 #error "TBD\n";
2227 #endif
2228 }
2229
rtw_clear_bit(int nr,unsigned long * addr)2230 inline void rtw_clear_bit(int nr, unsigned long *addr)
2231 {
2232 #ifdef PLATFORM_LINUX
2233 clear_bit(nr, addr);
2234 #else
2235 #error "TBD\n";
2236 #endif
2237 }
2238
rtw_test_and_clear_bit(int nr,unsigned long * addr)2239 inline int rtw_test_and_clear_bit(int nr, unsigned long *addr)
2240 {
2241 #ifdef PLATFORM_LINUX
2242 return test_and_clear_bit(nr, addr);
2243 #else
2244 #error "TBD\n";
2245 #endif
2246 }
2247
ATOMIC_SET(ATOMIC_T * v,int i)2248 inline void ATOMIC_SET(ATOMIC_T *v, int i)
2249 {
2250 #ifdef PLATFORM_LINUX
2251 atomic_set(v, i);
2252 #elif defined(PLATFORM_WINDOWS)
2253 *v = i; /* other choice???? */
2254 #elif defined(PLATFORM_FREEBSD)
2255 atomic_set_int(v, i);
2256 #endif
2257 }
2258
ATOMIC_READ(ATOMIC_T * v)2259 inline int ATOMIC_READ(ATOMIC_T *v)
2260 {
2261 #ifdef PLATFORM_LINUX
2262 return atomic_read(v);
2263 #elif defined(PLATFORM_WINDOWS)
2264 return *v; /* other choice???? */
2265 #elif defined(PLATFORM_FREEBSD)
2266 return atomic_load_acq_32(v);
2267 #endif
2268 }
2269
ATOMIC_ADD(ATOMIC_T * v,int i)2270 inline void ATOMIC_ADD(ATOMIC_T *v, int i)
2271 {
2272 #ifdef PLATFORM_LINUX
2273 atomic_add(i, v);
2274 #elif defined(PLATFORM_WINDOWS)
2275 InterlockedAdd(v, i);
2276 #elif defined(PLATFORM_FREEBSD)
2277 atomic_add_int(v, i);
2278 #endif
2279 }
ATOMIC_SUB(ATOMIC_T * v,int i)2280 inline void ATOMIC_SUB(ATOMIC_T *v, int i)
2281 {
2282 #ifdef PLATFORM_LINUX
2283 atomic_sub(i, v);
2284 #elif defined(PLATFORM_WINDOWS)
2285 InterlockedAdd(v, -i);
2286 #elif defined(PLATFORM_FREEBSD)
2287 atomic_subtract_int(v, i);
2288 #endif
2289 }
2290
ATOMIC_INC(ATOMIC_T * v)2291 inline void ATOMIC_INC(ATOMIC_T *v)
2292 {
2293 #ifdef PLATFORM_LINUX
2294 atomic_inc(v);
2295 #elif defined(PLATFORM_WINDOWS)
2296 InterlockedIncrement(v);
2297 #elif defined(PLATFORM_FREEBSD)
2298 atomic_add_int(v, 1);
2299 #endif
2300 }
2301
ATOMIC_DEC(ATOMIC_T * v)2302 inline void ATOMIC_DEC(ATOMIC_T *v)
2303 {
2304 #ifdef PLATFORM_LINUX
2305 atomic_dec(v);
2306 #elif defined(PLATFORM_WINDOWS)
2307 InterlockedDecrement(v);
2308 #elif defined(PLATFORM_FREEBSD)
2309 atomic_subtract_int(v, 1);
2310 #endif
2311 }
2312
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)2313 inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
2314 {
2315 #ifdef PLATFORM_LINUX
2316 return atomic_add_return(i, v);
2317 #elif defined(PLATFORM_WINDOWS)
2318 return InterlockedAdd(v, i);
2319 #elif defined(PLATFORM_FREEBSD)
2320 atomic_add_int(v, i);
2321 return atomic_load_acq_32(v);
2322 #endif
2323 }
2324
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)2325 inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
2326 {
2327 #ifdef PLATFORM_LINUX
2328 return atomic_sub_return(i, v);
2329 #elif defined(PLATFORM_WINDOWS)
2330 return InterlockedAdd(v, -i);
2331 #elif defined(PLATFORM_FREEBSD)
2332 atomic_subtract_int(v, i);
2333 return atomic_load_acq_32(v);
2334 #endif
2335 }
2336
ATOMIC_INC_RETURN(ATOMIC_T * v)2337 inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
2338 {
2339 #ifdef PLATFORM_LINUX
2340 return atomic_inc_return(v);
2341 #elif defined(PLATFORM_WINDOWS)
2342 return InterlockedIncrement(v);
2343 #elif defined(PLATFORM_FREEBSD)
2344 atomic_add_int(v, 1);
2345 return atomic_load_acq_32(v);
2346 #endif
2347 }
2348
ATOMIC_DEC_RETURN(ATOMIC_T * v)2349 inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
2350 {
2351 #ifdef PLATFORM_LINUX
2352 return atomic_dec_return(v);
2353 #elif defined(PLATFORM_WINDOWS)
2354 return InterlockedDecrement(v);
2355 #elif defined(PLATFORM_FREEBSD)
2356 atomic_subtract_int(v, 1);
2357 return atomic_load_acq_32(v);
2358 #endif
2359 }
2360
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)2361 inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
2362 {
2363 #ifdef PLATFORM_LINUX
2364 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
2365 return atomic_add_unless(v, 1, u);
2366 #else
2367 /* only make sure not exceed after this function */
2368 if (ATOMIC_INC_RETURN(v) > u) {
2369 ATOMIC_DEC(v);
2370 return 0;
2371 }
2372 return 1;
2373 #endif
2374 #else
2375 #error "TBD\n"
2376 #endif
2377 }
2378
2379 #ifdef PLATFORM_LINUX
2380 /*
2381 * Open a file with the specific @param path, @param flag, @param mode
2382 * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
2383 * @param path the path of the file to open
2384 * @param flag file operation flags, please refer to linux document
2385 * @param mode please refer to linux document
2386 * @return Linux specific error code
2387 */
openFile(struct file ** fpp,const char * path,int flag,int mode)2388 static int openFile(struct file **fpp, const char *path, int flag, int mode)
2389 {
2390 struct file *fp;
2391
2392 fp = filp_open(path, flag, mode);
2393 if (IS_ERR(fp)) {
2394 *fpp = NULL;
2395 return PTR_ERR(fp);
2396 } else {
2397 *fpp = fp;
2398 return 0;
2399 }
2400 }
2401
2402 /*
2403 * Close the file with the specific @param fp
2404 * @param fp the pointer of struct file to close
2405 * @return always 0
2406 */
closeFile(struct file * fp)2407 static int closeFile(struct file *fp)
2408 {
2409 filp_close(fp, NULL);
2410 return 0;
2411 }
2412
readFile(struct file * fp,char * buf,int len)2413 static int readFile(struct file *fp, char *buf, int len)
2414 {
2415 int rlen = 0, sum = 0;
2416
2417 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2418 if (!(fp->f_mode & FMODE_CAN_READ))
2419 #else
2420 if (!fp->f_op || !fp->f_op->read)
2421 #endif
2422 return -EPERM;
2423
2424 while (sum < len) {
2425 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2426 rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
2427 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2428 rlen = __vfs_read(fp, buf + sum, len - sum, &fp->f_pos);
2429 #else
2430 rlen = fp->f_op->read(fp, buf + sum, len - sum, &fp->f_pos);
2431 #endif
2432 if (rlen > 0)
2433 sum += rlen;
2434 else if (0 != rlen)
2435 return rlen;
2436 else
2437 break;
2438 }
2439
2440 return sum;
2441
2442 }
2443
writeFile(struct file * fp,char * buf,int len)2444 static int writeFile(struct file *fp, char *buf, int len)
2445 {
2446 int wlen = 0, sum = 0;
2447
2448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2449 if (!(fp->f_mode & FMODE_CAN_WRITE))
2450 #else
2451 if (!fp->f_op || !fp->f_op->write)
2452 #endif
2453 return -EPERM;
2454
2455 while (sum < len) {
2456 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2457 wlen = kernel_write(fp, buf + sum, len - sum, &fp->f_pos);
2458 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2459 wlen = __vfs_write(fp, buf + sum, len - sum, &fp->f_pos);
2460 #else
2461 wlen = fp->f_op->write(fp, buf + sum, len - sum, &fp->f_pos);
2462 #endif
2463 if (wlen > 0)
2464 sum += wlen;
2465 else if (0 != wlen)
2466 return wlen;
2467 else
2468 break;
2469 }
2470
2471 return sum;
2472
2473 }
2474
2475 /*
2476 * Test if the specifi @param pathname is a direct and readable
2477 * If readable, @param sz is not used
2478 * @param pathname the name of the path to test
2479 * @return Linux specific error code
2480 */
isDirReadable(const char * pathname,u32 * sz)2481 static int isDirReadable(const char *pathname, u32 *sz)
2482 {
2483 struct path path;
2484 int error = 0;
2485
2486 return kern_path(pathname, LOOKUP_FOLLOW, &path);
2487 }
2488
2489 /*
2490 * Test if the specifi @param path is a file and readable
2491 * If readable, @param sz is got
2492 * @param path the path of the file to test
2493 * @return Linux specific error code
2494 */
isFileReadable(const char * path,u32 * sz)2495 static int isFileReadable(const char *path, u32 *sz)
2496 {
2497 struct file *fp;
2498 int ret = 0;
2499 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2500 mm_segment_t oldfs;
2501 #endif
2502 char buf;
2503
2504 fp = filp_open(path, O_RDONLY, 0);
2505 if (IS_ERR(fp))
2506 ret = PTR_ERR(fp);
2507 else {
2508 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2509 oldfs = get_fs();
2510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2511 set_fs(KERNEL_DS);
2512 #else
2513 set_fs(get_ds());
2514 #endif
2515 #endif
2516
2517 if (1 != readFile(fp, &buf, 1))
2518 ret = PTR_ERR(fp);
2519
2520 if (ret == 0 && sz) {
2521 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
2522 *sz = i_size_read(fp->f_path.dentry->d_inode);
2523 #else
2524 *sz = i_size_read(fp->f_dentry->d_inode);
2525 #endif
2526 }
2527
2528 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2529 set_fs(oldfs);
2530 #endif
2531 filp_close(fp, NULL);
2532 }
2533 return ret;
2534 }
2535
2536 /*
2537 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2538 * @param path the path of the file to open and read
2539 * @param buf the starting address of the buffer to store file content
2540 * @param sz how many bytes to read at most
2541 * @return the byte we've read, or Linux specific error code
2542 */
retriveFromFile(const char * path,u8 * buf,u32 sz)2543 static int retriveFromFile(const char *path, u8 *buf, u32 sz)
2544 {
2545 int ret = -1;
2546 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2547 mm_segment_t oldfs;
2548 #endif
2549 struct file *fp;
2550
2551 if (path && buf) {
2552 ret = openFile(&fp, path, O_RDONLY, 0);
2553 if (0 == ret) {
2554 RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2555
2556 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2557 oldfs = get_fs();
2558 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2559 set_fs(KERNEL_DS);
2560 #else
2561 set_fs(get_ds());
2562 #endif
2563 #endif
2564
2565 ret = readFile(fp, buf, sz);
2566
2567 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2568 set_fs(oldfs);
2569 #endif
2570 closeFile(fp);
2571
2572 RTW_INFO("%s readFile, ret:%d\n", __FUNCTION__, ret);
2573
2574 } else
2575 RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2576 } else {
2577 RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2578 ret = -EINVAL;
2579 }
2580 return ret;
2581 }
2582
2583 /*
2584 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2585 * @param path the path of the file to open and write
2586 * @param buf the starting address of the data to write into file
2587 * @param sz how many bytes to write at most
2588 * @return the byte we've written, or Linux specific error code
2589 */
storeToFile(const char * path,u8 * buf,u32 sz)2590 static int storeToFile(const char *path, u8 *buf, u32 sz)
2591 {
2592 int ret = 0;
2593 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2594 mm_segment_t oldfs;
2595 #endif
2596 struct file *fp;
2597
2598 if (path && buf) {
2599 ret = openFile(&fp, path, O_CREAT | O_WRONLY, 0666);
2600 if (0 == ret) {
2601 RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2602
2603 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2604 oldfs = get_fs();
2605 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2606 set_fs(KERNEL_DS);
2607 #else
2608 set_fs(get_ds());
2609 #endif
2610 #endif
2611
2612 ret = writeFile(fp, buf, sz);
2613
2614 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2615 set_fs(oldfs);
2616 #endif
2617 closeFile(fp);
2618
2619 RTW_INFO("%s writeFile, ret:%d\n", __FUNCTION__, ret);
2620
2621 } else
2622 RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2623 } else {
2624 RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2625 ret = -EINVAL;
2626 }
2627 return ret;
2628 }
2629 #endif /* PLATFORM_LINUX */
2630
2631 /*
2632 * Test if the specifi @param path is a direct and readable
2633 * @param path the path of the direct to test
2634 * @return _TRUE or _FALSE
2635 */
rtw_is_dir_readable(const char * path)2636 int rtw_is_dir_readable(const char *path)
2637 {
2638 #ifdef PLATFORM_LINUX
2639 if (isDirReadable(path, NULL) == 0)
2640 return _TRUE;
2641 else
2642 return _FALSE;
2643 #else
2644 /* Todo... */
2645 return _FALSE;
2646 #endif
2647 }
2648
2649 /*
2650 * Test if the specifi @param path is a file and readable
2651 * @param path the path of the file to test
2652 * @return _TRUE or _FALSE
2653 */
rtw_is_file_readable(const char * path)2654 int rtw_is_file_readable(const char *path)
2655 {
2656 #ifdef PLATFORM_LINUX
2657 if (isFileReadable(path, NULL) == 0)
2658 return _TRUE;
2659 else
2660 return _FALSE;
2661 #else
2662 /* Todo... */
2663 return _FALSE;
2664 #endif
2665 }
2666
2667 /*
2668 * Test if the specifi @param path is a file and readable.
2669 * If readable, @param sz is got
2670 * @param path the path of the file to test
2671 * @return _TRUE or _FALSE
2672 */
rtw_is_file_readable_with_size(const char * path,u32 * sz)2673 int rtw_is_file_readable_with_size(const char *path, u32 *sz)
2674 {
2675 #ifdef PLATFORM_LINUX
2676 if (isFileReadable(path, sz) == 0)
2677 return _TRUE;
2678 else
2679 return _FALSE;
2680 #else
2681 /* Todo... */
2682 return _FALSE;
2683 #endif
2684 }
2685
2686 /*
2687 * Test if the specifi @param path is a readable file with valid size.
2688 * If readable, @param sz is got
2689 * @param path the path of the file to test
2690 * @return _TRUE or _FALSE
2691 */
rtw_readable_file_sz_chk(const char * path,u32 sz)2692 int rtw_readable_file_sz_chk(const char *path, u32 sz)
2693 {
2694 u32 fsz;
2695
2696 if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
2697 return _FALSE;
2698
2699 if (fsz > sz)
2700 return _FALSE;
2701
2702 return _TRUE;
2703 }
2704
2705 /*
2706 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2707 * @param path the path of the file to open and read
2708 * @param buf the starting address of the buffer to store file content
2709 * @param sz how many bytes to read at most
2710 * @return the byte we've read
2711 */
rtw_retrieve_from_file(const char * path,u8 * buf,u32 sz)2712 int rtw_retrieve_from_file(const char *path, u8 *buf, u32 sz)
2713 {
2714 #ifdef PLATFORM_LINUX
2715 int ret = retriveFromFile(path, buf, sz);
2716 return ret >= 0 ? ret : 0;
2717 #else
2718 /* Todo... */
2719 return 0;
2720 #endif
2721 }
2722
2723 /*
2724 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2725 * @param path the path of the file to open and write
2726 * @param buf the starting address of the data to write into file
2727 * @param sz how many bytes to write at most
2728 * @return the byte we've written
2729 */
rtw_store_to_file(const char * path,u8 * buf,u32 sz)2730 int rtw_store_to_file(const char *path, u8 *buf, u32 sz)
2731 {
2732 #ifdef PLATFORM_LINUX
2733 int ret = storeToFile(path, buf, sz);
2734 return ret >= 0 ? ret : 0;
2735 #else
2736 /* Todo... */
2737 return 0;
2738 #endif
2739 }
2740
2741 #ifdef PLATFORM_LINUX
rtw_alloc_etherdev_with_old_priv(int sizeof_priv,void * old_priv)2742 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
2743 {
2744 struct net_device *pnetdev;
2745 struct rtw_netdev_priv_indicator *pnpi;
2746
2747 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2748 pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2749 #else
2750 pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2751 #endif
2752 if (!pnetdev)
2753 goto RETURN;
2754
2755 pnpi = netdev_priv(pnetdev);
2756 pnpi->priv = old_priv;
2757 pnpi->sizeof_priv = sizeof_priv;
2758
2759 RETURN:
2760 return pnetdev;
2761 }
2762
rtw_alloc_etherdev(int sizeof_priv)2763 struct net_device *rtw_alloc_etherdev(int sizeof_priv)
2764 {
2765 struct net_device *pnetdev;
2766 struct rtw_netdev_priv_indicator *pnpi;
2767
2768 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2769 pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2770 #else
2771 pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2772 #endif
2773 if (!pnetdev)
2774 goto RETURN;
2775
2776 pnpi = netdev_priv(pnetdev);
2777
2778 pnpi->priv = rtw_zvmalloc(sizeof_priv);
2779 if (!pnpi->priv) {
2780 free_netdev(pnetdev);
2781 pnetdev = NULL;
2782 goto RETURN;
2783 }
2784
2785 pnpi->sizeof_priv = sizeof_priv;
2786 RETURN:
2787 return pnetdev;
2788 }
2789
rtw_free_netdev(struct net_device * netdev)2790 void rtw_free_netdev(struct net_device *netdev)
2791 {
2792 struct rtw_netdev_priv_indicator *pnpi;
2793
2794 if (!netdev)
2795 goto RETURN;
2796
2797 pnpi = netdev_priv(netdev);
2798
2799 if (!pnpi->priv)
2800 goto RETURN;
2801
2802 free_netdev(netdev);
2803
2804 RETURN:
2805 return;
2806 }
2807
2808 #endif
2809
2810 #ifdef PLATFORM_FREEBSD
2811 /*
2812 * Copy a buffer from userspace and write into kernel address
2813 * space.
2814 *
2815 * This emulation just calls the FreeBSD copyin function (to
2816 * copy data from user space buffer into a kernel space buffer)
2817 * and is designed to be used with the above io_write_wrapper.
2818 *
2819 * This function should return the number of bytes not copied.
2820 * I.e. success results in a zero value.
2821 * Negative error values are not returned.
2822 */
2823 unsigned long
copy_from_user(void * to,const void * from,unsigned long n)2824 copy_from_user(void *to, const void *from, unsigned long n)
2825 {
2826 if (copyin(from, to, n) != 0) {
2827 /* Any errors will be treated as a failure
2828 to copy any of the requested bytes */
2829 return n;
2830 }
2831
2832 return 0;
2833 }
2834
2835 unsigned long
copy_to_user(void * to,const void * from,unsigned long n)2836 copy_to_user(void *to, const void *from, unsigned long n)
2837 {
2838 if (copyout(from, to, n) != 0) {
2839 /* Any errors will be treated as a failure
2840 to copy any of the requested bytes */
2841 return n;
2842 }
2843
2844 return 0;
2845 }
2846
2847
2848 /*
2849 * The usb_register and usb_deregister functions are used to register
2850 * usb drivers with the usb subsystem. In this compatibility layer
2851 * emulation a list of drivers (struct usb_driver) is maintained
2852 * and is used for probing/attaching etc.
2853 *
2854 * usb_register and usb_deregister simply call these functions.
2855 */
2856 int
usb_register(struct usb_driver * driver)2857 usb_register(struct usb_driver *driver)
2858 {
2859 rtw_usb_linux_register(driver);
2860 return 0;
2861 }
2862
2863
2864 int
usb_deregister(struct usb_driver * driver)2865 usb_deregister(struct usb_driver *driver)
2866 {
2867 rtw_usb_linux_deregister(driver);
2868 return 0;
2869 }
2870
module_init_exit_wrapper(void * arg)2871 void module_init_exit_wrapper(void *arg)
2872 {
2873 int (*func)(void) = arg;
2874 func();
2875 return;
2876 }
2877
2878 #endif /* PLATFORM_FREEBSD */
2879
2880 #ifdef CONFIG_PLATFORM_SPRD
2881 #ifdef do_div
2882 #undef do_div
2883 #endif
2884 #include <asm-generic/div64.h>
2885 #endif
2886
rtw_modular64(u64 x,u64 y)2887 u64 rtw_modular64(u64 x, u64 y)
2888 {
2889 #ifdef PLATFORM_LINUX
2890 return do_div(x, y);
2891 #elif defined(PLATFORM_WINDOWS)
2892 return x % y;
2893 #elif defined(PLATFORM_FREEBSD)
2894 return x % y;
2895 #endif
2896 }
2897
rtw_division64(u64 x,u64 y)2898 u64 rtw_division64(u64 x, u64 y)
2899 {
2900 #ifdef PLATFORM_LINUX
2901 do_div(x, y);
2902 return x;
2903 #elif defined(PLATFORM_WINDOWS)
2904 return x / y;
2905 #elif defined(PLATFORM_FREEBSD)
2906 return x / y;
2907 #endif
2908 }
2909
rtw_random32(void)2910 inline u32 rtw_random32(void)
2911 {
2912 #ifdef PLATFORM_LINUX
2913 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
2914 return prandom_u32();
2915 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18))
2916 u32 random_int;
2917 get_random_bytes(&random_int , 4);
2918 return random_int;
2919 #else
2920 return random32();
2921 #endif
2922 #elif defined(PLATFORM_WINDOWS)
2923 #error "to be implemented\n"
2924 #elif defined(PLATFORM_FREEBSD)
2925 #error "to be implemented\n"
2926 #endif
2927 }
2928
rtw_buf_free(u8 ** buf,u32 * buf_len)2929 void rtw_buf_free(u8 **buf, u32 *buf_len)
2930 {
2931 u32 ori_len;
2932
2933 if (!buf || !buf_len)
2934 return;
2935
2936 ori_len = *buf_len;
2937
2938 if (*buf) {
2939 u32 tmp_buf_len = *buf_len;
2940 *buf_len = 0;
2941 rtw_mfree(*buf, tmp_buf_len);
2942 *buf = NULL;
2943 }
2944 }
2945
rtw_buf_update(u8 ** buf,u32 * buf_len,u8 * src,u32 src_len)2946 void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
2947 {
2948 u32 ori_len = 0, dup_len = 0;
2949 u8 *ori = NULL;
2950 u8 *dup = NULL;
2951
2952 if (!buf || !buf_len)
2953 return;
2954
2955 if (!src || !src_len)
2956 goto keep_ori;
2957
2958 /* duplicate src */
2959 dup = rtw_malloc(src_len);
2960 if (dup) {
2961 dup_len = src_len;
2962 _rtw_memcpy(dup, src, dup_len);
2963 }
2964
2965 keep_ori:
2966 ori = *buf;
2967 ori_len = *buf_len;
2968
2969 /* replace buf with dup */
2970 *buf_len = 0;
2971 *buf = dup;
2972 *buf_len = dup_len;
2973
2974 /* free ori */
2975 if (ori && ori_len > 0)
2976 rtw_mfree(ori, ori_len);
2977 }
2978
2979
2980 /**
2981 * rtw_cbuf_full - test if cbuf is full
2982 * @cbuf: pointer of struct rtw_cbuf
2983 *
2984 * Returns: _TRUE if cbuf is full
2985 */
rtw_cbuf_full(struct rtw_cbuf * cbuf)2986 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
2987 {
2988 return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
2989 }
2990
2991 /**
2992 * rtw_cbuf_empty - test if cbuf is empty
2993 * @cbuf: pointer of struct rtw_cbuf
2994 *
2995 * Returns: _TRUE if cbuf is empty
2996 */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)2997 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
2998 {
2999 return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
3000 }
3001
3002 /**
3003 * rtw_cbuf_push - push a pointer into cbuf
3004 * @cbuf: pointer of struct rtw_cbuf
3005 * @buf: pointer to push in
3006 *
3007 * Lock free operation, be careful of the use scheme
3008 * Returns: _TRUE push success
3009 */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)3010 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
3011 {
3012 if (rtw_cbuf_full(cbuf))
3013 return _FAIL;
3014
3015 if (0)
3016 RTW_INFO("%s on %u\n", __func__, cbuf->write);
3017 cbuf->bufs[cbuf->write] = buf;
3018 cbuf->write = (cbuf->write + 1) % cbuf->size;
3019
3020 return _SUCCESS;
3021 }
3022
3023 /**
3024 * rtw_cbuf_pop - pop a pointer from cbuf
3025 * @cbuf: pointer of struct rtw_cbuf
3026 *
3027 * Lock free operation, be careful of the use scheme
3028 * Returns: pointer popped out
3029 */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)3030 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
3031 {
3032 void *buf;
3033 if (rtw_cbuf_empty(cbuf))
3034 return NULL;
3035
3036 if (0)
3037 RTW_INFO("%s on %u\n", __func__, cbuf->read);
3038 buf = cbuf->bufs[cbuf->read];
3039 cbuf->read = (cbuf->read + 1) % cbuf->size;
3040
3041 return buf;
3042 }
3043
3044 /**
3045 * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
3046 * @size: size of pointer
3047 *
3048 * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
3049 */
rtw_cbuf_alloc(u32 size)3050 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
3051 {
3052 struct rtw_cbuf *cbuf;
3053
3054 cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
3055
3056 if (cbuf) {
3057 cbuf->write = cbuf->read = 0;
3058 cbuf->size = size;
3059 }
3060
3061 return cbuf;
3062 }
3063
3064 /**
3065 * rtw_cbuf_free - free the given rtw_cbuf
3066 * @cbuf: pointer of struct rtw_cbuf to free
3067 */
rtw_cbuf_free(struct rtw_cbuf * cbuf)3068 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
3069 {
3070 rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
3071 }
3072
3073 /**
3074 * map_readN - read a range of map data
3075 * @map: map to read
3076 * @offset: start address to read
3077 * @len: length to read
3078 * @buf: pointer of buffer to store data read
3079 *
3080 * Returns: _SUCCESS or _FAIL
3081 */
map_readN(const struct map_t * map,u16 offset,u16 len,u8 * buf)3082 int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
3083 {
3084 const struct map_seg_t *seg;
3085 int ret = _FAIL;
3086 int i;
3087
3088 if (len == 0) {
3089 rtw_warn_on(1);
3090 goto exit;
3091 }
3092
3093 if (offset + len > map->len) {
3094 rtw_warn_on(1);
3095 goto exit;
3096 }
3097
3098 _rtw_memset(buf, map->init_value, len);
3099
3100 for (i = 0; i < map->seg_num; i++) {
3101 u8 *c_dst, *c_src;
3102 u16 c_len;
3103
3104 seg = map->segs + i;
3105 if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
3106 continue;
3107
3108 if (seg->sa >= offset) {
3109 c_dst = buf + (seg->sa - offset);
3110 c_src = seg->c;
3111 if (seg->sa + seg->len <= offset + len)
3112 c_len = seg->len;
3113 else
3114 c_len = offset + len - seg->sa;
3115 } else {
3116 c_dst = buf;
3117 c_src = seg->c + (offset - seg->sa);
3118 if (seg->sa + seg->len >= offset + len)
3119 c_len = len;
3120 else
3121 c_len = seg->sa + seg->len - offset;
3122 }
3123
3124 _rtw_memcpy(c_dst, c_src, c_len);
3125 }
3126
3127 exit:
3128 return ret;
3129 }
3130
3131 /**
3132 * map_read8 - read 1 byte of map data
3133 * @map: map to read
3134 * @offset: address to read
3135 *
3136 * Returns: value of data of specified offset. map.init_value if offset is out of range
3137 */
map_read8(const struct map_t * map,u16 offset)3138 u8 map_read8(const struct map_t *map, u16 offset)
3139 {
3140 const struct map_seg_t *seg;
3141 u8 val = map->init_value;
3142 int i;
3143
3144 if (offset + 1 > map->len) {
3145 rtw_warn_on(1);
3146 goto exit;
3147 }
3148
3149 for (i = 0; i < map->seg_num; i++) {
3150 seg = map->segs + i;
3151 if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
3152 continue;
3153
3154 val = *(seg->c + offset - seg->sa);
3155 break;
3156 }
3157
3158 exit:
3159 return val;
3160 }
3161
3162 #ifdef CONFIG_RTW_MESH
rtw_blacklist_add(_queue * blist,const u8 * addr,u32 timeout_ms)3163 int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
3164 {
3165 struct blacklist_ent *ent;
3166 _list *list, *head;
3167 u8 exist = _FALSE, timeout = _FALSE;
3168
3169 enter_critical_bh(&blist->lock);
3170
3171 head = &blist->queue;
3172 list = get_next(head);
3173 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3174 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3175 list = get_next(list);
3176
3177 if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3178 exist = _TRUE;
3179 if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3180 timeout = _TRUE;
3181 ent->exp_time = rtw_get_current_time()
3182 + rtw_ms_to_systime(timeout_ms);
3183 break;
3184 }
3185
3186 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3187 rtw_list_delete(&ent->list);
3188 rtw_mfree(ent, sizeof(struct blacklist_ent));
3189 }
3190 }
3191
3192 if (exist == _FALSE) {
3193 ent = rtw_malloc(sizeof(struct blacklist_ent));
3194 if (ent) {
3195 _rtw_memcpy(ent->addr, addr, ETH_ALEN);
3196 ent->exp_time = rtw_get_current_time()
3197 + rtw_ms_to_systime(timeout_ms);
3198 rtw_list_insert_tail(&ent->list, head);
3199 }
3200 }
3201
3202 exit_critical_bh(&blist->lock);
3203
3204 return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
3205 }
3206
rtw_blacklist_del(_queue * blist,const u8 * addr)3207 int rtw_blacklist_del(_queue *blist, const u8 *addr)
3208 {
3209 struct blacklist_ent *ent = NULL;
3210 _list *list, *head;
3211 u8 exist = _FALSE;
3212
3213 enter_critical_bh(&blist->lock);
3214 head = &blist->queue;
3215 list = get_next(head);
3216 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3217 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3218 list = get_next(list);
3219
3220 if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3221 rtw_list_delete(&ent->list);
3222 rtw_mfree(ent, sizeof(struct blacklist_ent));
3223 exist = _TRUE;
3224 break;
3225 }
3226
3227 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3228 rtw_list_delete(&ent->list);
3229 rtw_mfree(ent, sizeof(struct blacklist_ent));
3230 }
3231 }
3232
3233 exit_critical_bh(&blist->lock);
3234
3235 return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
3236 }
3237
rtw_blacklist_search(_queue * blist,const u8 * addr)3238 int rtw_blacklist_search(_queue *blist, const u8 *addr)
3239 {
3240 struct blacklist_ent *ent = NULL;
3241 _list *list, *head;
3242 u8 exist = _FALSE;
3243
3244 enter_critical_bh(&blist->lock);
3245 head = &blist->queue;
3246 list = get_next(head);
3247 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3248 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3249 list = get_next(list);
3250
3251 if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3252 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3253 rtw_list_delete(&ent->list);
3254 rtw_mfree(ent, sizeof(struct blacklist_ent));
3255 } else
3256 exist = _TRUE;
3257 break;
3258 }
3259
3260 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3261 rtw_list_delete(&ent->list);
3262 rtw_mfree(ent, sizeof(struct blacklist_ent));
3263 }
3264 }
3265
3266 exit_critical_bh(&blist->lock);
3267
3268 return exist;
3269 }
3270
rtw_blacklist_flush(_queue * blist)3271 void rtw_blacklist_flush(_queue *blist)
3272 {
3273 struct blacklist_ent *ent;
3274 _list *list, *head;
3275 _list tmp;
3276
3277 _rtw_init_listhead(&tmp);
3278
3279 enter_critical_bh(&blist->lock);
3280 rtw_list_splice_init(&blist->queue, &tmp);
3281 exit_critical_bh(&blist->lock);
3282
3283 head = &tmp;
3284 list = get_next(head);
3285 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3286 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3287 list = get_next(list);
3288 rtw_list_delete(&ent->list);
3289 rtw_mfree(ent, sizeof(struct blacklist_ent));
3290 }
3291 }
3292
dump_blacklist(void * sel,_queue * blist,const char * title)3293 void dump_blacklist(void *sel, _queue *blist, const char *title)
3294 {
3295 struct blacklist_ent *ent = NULL;
3296 _list *list, *head;
3297
3298 enter_critical_bh(&blist->lock);
3299 head = &blist->queue;
3300 list = get_next(head);
3301
3302 if (rtw_end_of_queue_search(head, list) == _FALSE) {
3303 if (title)
3304 RTW_PRINT_SEL(sel, "%s:\n", title);
3305
3306 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3307 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3308 list = get_next(list);
3309
3310 if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3311 RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
3312 else
3313 RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
3314 , rtw_get_remaining_time_ms(ent->exp_time));
3315 }
3316
3317 }
3318 exit_critical_bh(&blist->lock);
3319 }
3320 #endif
3321
3322 /**
3323 * is_null -
3324 *
3325 * Return TRUE if c is null character
3326 * FALSE otherwise.
3327 */
is_null(char c)3328 inline BOOLEAN is_null(char c)
3329 {
3330 if (c == '\0')
3331 return _TRUE;
3332 else
3333 return _FALSE;
3334 }
3335
is_all_null(char * c,int len)3336 inline BOOLEAN is_all_null(char *c, int len)
3337 {
3338 for (; len > 0; len--)
3339 if (c[len - 1] != '\0')
3340 return _FALSE;
3341
3342 return _TRUE;
3343 }
3344
3345 /**
3346 * is_eol -
3347 *
3348 * Return TRUE if c is represent for EOL (end of line)
3349 * FALSE otherwise.
3350 */
is_eol(char c)3351 inline BOOLEAN is_eol(char c)
3352 {
3353 if (c == '\r' || c == '\n')
3354 return _TRUE;
3355 else
3356 return _FALSE;
3357 }
3358
3359 /**
3360 * is_space -
3361 *
3362 * Return TRUE if c is represent for space
3363 * FALSE otherwise.
3364 */
is_space(char c)3365 inline BOOLEAN is_space(char c)
3366 {
3367 if (c == ' ' || c == '\t')
3368 return _TRUE;
3369 else
3370 return _FALSE;
3371 }
3372
3373 /**
3374 * IsHexDigit -
3375 *
3376 * Return TRUE if chTmp is represent for hex digit
3377 * FALSE otherwise.
3378 */
IsHexDigit(char chTmp)3379 inline BOOLEAN IsHexDigit(char chTmp)
3380 {
3381 if ((chTmp >= '0' && chTmp <= '9') ||
3382 (chTmp >= 'a' && chTmp <= 'f') ||
3383 (chTmp >= 'A' && chTmp <= 'F'))
3384 return _TRUE;
3385 else
3386 return _FALSE;
3387 }
3388
3389 /**
3390 * is_alpha -
3391 *
3392 * Return TRUE if chTmp is represent for alphabet
3393 * FALSE otherwise.
3394 */
is_alpha(char chTmp)3395 inline BOOLEAN is_alpha(char chTmp)
3396 {
3397 if ((chTmp >= 'a' && chTmp <= 'z') ||
3398 (chTmp >= 'A' && chTmp <= 'Z'))
3399 return _TRUE;
3400 else
3401 return _FALSE;
3402 }
3403
alpha_to_upper(char c)3404 inline char alpha_to_upper(char c)
3405 {
3406 if ((c >= 'a' && c <= 'z'))
3407 c = 'A' + (c - 'a');
3408 return c;
3409 }
3410
hex2num_i(char c)3411 int hex2num_i(char c)
3412 {
3413 if (c >= '0' && c <= '9')
3414 return c - '0';
3415 if (c >= 'a' && c <= 'f')
3416 return c - 'a' + 10;
3417 if (c >= 'A' && c <= 'F')
3418 return c - 'A' + 10;
3419 return -1;
3420 }
3421
hex2byte_i(const char * hex)3422 int hex2byte_i(const char *hex)
3423 {
3424 int a, b;
3425 a = hex2num_i(*hex++);
3426 if (a < 0)
3427 return -1;
3428 b = hex2num_i(*hex++);
3429 if (b < 0)
3430 return -1;
3431 return (a << 4) | b;
3432 }
3433
hexstr2bin(const char * hex,u8 * buf,size_t len)3434 int hexstr2bin(const char *hex, u8 *buf, size_t len)
3435 {
3436 size_t i;
3437 int a;
3438 const char *ipos = hex;
3439 u8 *opos = buf;
3440
3441 for (i = 0; i < len; i++) {
3442 a = hex2byte_i(ipos);
3443 if (a < 0)
3444 return -1;
3445 *opos++ = a;
3446 ipos += 2;
3447 }
3448 return 0;
3449 }
3450
3451 /**
3452 * hwaddr_aton - Convert ASCII string to MAC address
3453 * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
3454 * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
3455 * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
3456 */
hwaddr_aton_i(const char * txt,u8 * addr)3457 int hwaddr_aton_i(const char *txt, u8 *addr)
3458 {
3459 int i;
3460
3461 for (i = 0; i < 6; i++) {
3462 int a, b;
3463
3464 a = hex2num_i(*txt++);
3465 if (a < 0)
3466 return -1;
3467 b = hex2num_i(*txt++);
3468 if (b < 0)
3469 return -1;
3470 *addr++ = (a << 4) | b;
3471 if (i < 5 && *txt++ != ':')
3472 return -1;
3473 }
3474
3475 return 0;
3476 }
3477
3478