1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2017 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15
16
17 #define _OSDEP_SERVICE_C_
18
19 #include <drv_types.h>
20
21 #define RT_TAG '1178'
22
23 #ifdef DBG_MEMORY_LEAK
24 #ifdef PLATFORM_LINUX
25 atomic_t _malloc_cnt = ATOMIC_INIT(0);
26 atomic_t _malloc_size = ATOMIC_INIT(0);
27 #endif
28 #endif /* DBG_MEMORY_LEAK */
29
30
31 #ifdef DBG_MEM_ERR_FREE
32
33 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
34
35 #define DBG_MEM_HASHBITS 10
36
37 #define DBG_MEM_TYPE_PHY 0
38 #define DBG_MEM_TYPE_VIR 1
39
40 /*
41 * DBG_MEM_ERR_FREE is only for the debug purpose.
42 *
43 * There is the limitation that this mechanism only can
44 * support one wifi device, and has problem if there
45 * are two or more wifi devices with one driver on
46 * the same system. It's because dbg_mem_ht is global
47 * variable, and if we move this dbg_mem_ht into struct
48 * dvobj_priv to support more wifi devices, the memory
49 * allocation functions, like rtw_malloc(), need to have
50 * the parameter dvobj to get relative hash table, and
51 * then it is the huge changes for the driver currently.
52 *
53 */
54 struct hlist_head dbg_mem_ht[1 << DBG_MEM_HASHBITS];
55
56 struct hash_mem {
57 void *mem;
58 int sz;
59 int type;
60 struct hlist_node node;
61 };
62
63 #endif /* LINUX_VERSION_CODE */
64
rtw_dbg_mem_init(void)65 void rtw_dbg_mem_init(void)
66 {
67 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
68 hash_init(dbg_mem_ht);
69 #endif /* LINUX_VERSION_CODE */
70 }
71
rtw_dbg_mem_deinit(void)72 void rtw_dbg_mem_deinit(void)
73 {
74 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
75 struct hlist_head *head;
76 struct hlist_node *p;
77 int i;
78
79 for (i = 0; i < HASH_SIZE(dbg_mem_ht); i++) {
80 head = &dbg_mem_ht[i];
81 p = head->first;
82 while (p) {
83 struct hlist_node *prev;
84 struct hash_mem *hm;
85
86 hm = container_of(p, struct hash_mem, node);
87 prev = p;
88 p = p->next;
89
90 RTW_ERR("%s: memory leak - 0x%x\n", __func__, hm->mem);
91 hash_del(prev);
92 kfree(hm);
93 }
94 }
95 #endif /* LINUX_VERSION_CODE */
96 }
97
98 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
rtw_dbg_mem_find(void * mem)99 struct hash_mem *rtw_dbg_mem_find(void *mem)
100 {
101 struct hash_mem *hm;
102 struct hlist_head *head;
103 struct hlist_node *p;
104
105 head = &dbg_mem_ht[hash_64((u64)(mem), DBG_MEM_HASHBITS)];
106
107 p = head->first;
108 while (p) {
109 hm = container_of(p, struct hash_mem, node);
110 if (hm->mem == mem)
111 goto out;
112 p = p->next;
113 }
114 hm = NULL;
115 out:
116 return hm;
117 }
118
rtw_dbg_mem_alloc(void * mem,int sz,int type)119 void rtw_dbg_mem_alloc(void *mem, int sz, int type)
120 {
121 struct hash_mem *hm;
122
123 hm = rtw_dbg_mem_find(mem);
124 if (!hm) {
125 hm = (struct hash_mem *)kmalloc(sizeof(*hm), GFP_ATOMIC);
126 hm->mem = mem;
127 hm->sz = sz;
128 hm->type = type;
129 hash_add(dbg_mem_ht, &hm->node, (u64)(mem));
130 } else {
131 RTW_ERR("%s mem(%x) is in hash already\n", __func__, mem);
132 rtw_warn_on(1);
133 }
134 }
135
rtw_dbg_mem_free(void * mem,int sz,int type)136 bool rtw_dbg_mem_free(void *mem, int sz, int type)
137 {
138 struct hash_mem *hm;
139 bool ret;
140
141 hm = rtw_dbg_mem_find(mem);
142 if (!hm) {
143 RTW_ERR("%s cannot find allocated memory: %x\n",
144 __func__, mem);
145 rtw_warn_on(1);
146 return false;
147 }
148
149 if (hm->sz != sz) {
150 RTW_ERR("%s memory (%x) size mismatch free(%d) != alloc(%d)\n",
151 __func__, mem, sz, hm->sz);
152 rtw_warn_on(1);
153 ret = false;
154 goto out;
155 }
156
157 if (hm->type != type) {
158 RTW_ERR("%s memory (%x) type mismatch free(%d) != alloc(%d)\n",
159 __func__, mem, type, hm->type);
160 rtw_warn_on(1);
161 ret = false;
162 goto out;
163 }
164 ret = true;
165
166 out:
167 hash_del(&hm->node);
168 kfree(hm);
169
170 return ret;
171 }
172
173 #endif /* LINUX_VERSION_CODE */
174 #endif /* DBG_MEM_ERR_FREE */
175
176 #if defined(PLATFORM_LINUX)
177 /*
178 * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
179 * @return: one of RTW_STATUS_CODE
180 */
RTW_STATUS_CODE(int error_code)181 inline int RTW_STATUS_CODE(int error_code)
182 {
183 if (error_code >= 0)
184 return _SUCCESS;
185
186 switch (error_code) {
187 /* case -ETIMEDOUT: */
188 /* return RTW_STATUS_TIMEDOUT; */
189 default:
190 return _FAIL;
191 }
192 }
193 #else
RTW_STATUS_CODE(int error_code)194 inline int RTW_STATUS_CODE(int error_code)
195 {
196 return error_code;
197 }
198 #endif
199
rtw_atoi(u8 * s)200 u32 rtw_atoi(u8 *s)
201 {
202
203 int num = 0, flag = 0;
204 int i;
205 for (i = 0; i <= strlen(s); i++) {
206 if (s[i] >= '0' && s[i] <= '9')
207 num = num * 10 + s[i] - '0';
208 else if (s[0] == '-' && i == 0)
209 flag = 1;
210 else
211 break;
212 }
213
214 if (flag == 1)
215 num = num * -1;
216
217 return num;
218
219 }
220
_rtw_vmalloc(u32 sz)221 inline void *_rtw_vmalloc(u32 sz)
222 {
223 void *pbuf;
224 #ifdef PLATFORM_LINUX
225 pbuf = vmalloc(sz);
226 #endif
227 #ifdef PLATFORM_FREEBSD
228 pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
229 #endif
230
231 #ifdef PLATFORM_WINDOWS
232 NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
233 #endif
234
235 #ifdef DBG_MEM_ERR_FREE
236 if (pbuf)
237 rtw_dbg_mem_alloc(pbuf, sz, DBG_MEM_TYPE_VIR);
238 #endif /* DBG_MEM_ERR_FREE */
239
240 #ifdef DBG_MEMORY_LEAK
241 #ifdef PLATFORM_LINUX
242 if (pbuf != NULL) {
243 atomic_inc(&_malloc_cnt);
244 atomic_add(sz, &_malloc_size);
245 }
246 #endif
247 #endif /* DBG_MEMORY_LEAK */
248
249 return pbuf;
250 }
251
_rtw_zvmalloc(u32 sz)252 inline void *_rtw_zvmalloc(u32 sz)
253 {
254 void *pbuf;
255 #ifdef PLATFORM_LINUX
256 pbuf = _rtw_vmalloc(sz);
257 if (pbuf != NULL)
258 memset(pbuf, 0, sz);
259 #endif
260 #ifdef PLATFORM_FREEBSD
261 pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
262 #endif
263 #ifdef PLATFORM_WINDOWS
264 NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
265 if (pbuf != NULL)
266 NdisFillMemory(pbuf, sz, 0);
267 #endif
268
269 return pbuf;
270 }
271
_rtw_vmfree(void * pbuf,u32 sz)272 inline void _rtw_vmfree(void *pbuf, u32 sz)
273 {
274 #ifdef DBG_MEM_ERR_FREE
275 if (!rtw_dbg_mem_free(pbuf, sz, DBG_MEM_TYPE_VIR))
276 return;
277 #endif /* DBG_MEM_ERR_FREE */
278
279 #ifdef PLATFORM_LINUX
280 vfree(pbuf);
281 #endif
282 #ifdef PLATFORM_FREEBSD
283 free(pbuf, M_DEVBUF);
284 #endif
285 #ifdef PLATFORM_WINDOWS
286 NdisFreeMemory(pbuf, sz, 0);
287 #endif
288
289 #ifdef DBG_MEMORY_LEAK
290 #ifdef PLATFORM_LINUX
291 atomic_dec(&_malloc_cnt);
292 atomic_sub(sz, &_malloc_size);
293 #endif
294 #endif /* DBG_MEMORY_LEAK */
295 }
296
_rtw_malloc(u32 sz)297 void *_rtw_malloc(u32 sz)
298 {
299 void *pbuf = NULL;
300
301 #ifdef PLATFORM_LINUX
302 #ifdef RTK_DMP_PLATFORM
303 if (sz > 0x4000)
304 pbuf = dvr_malloc(sz);
305 else
306 #endif
307 pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
308
309 #endif
310 #ifdef PLATFORM_FREEBSD
311 pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
312 #endif
313 #ifdef PLATFORM_WINDOWS
314
315 NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
316
317 #endif
318
319 #ifdef DBG_MEM_ERR_FREE
320 if (pbuf)
321 rtw_dbg_mem_alloc(pbuf, sz, DBG_MEM_TYPE_PHY);
322 #endif /* DBG_MEM_ERR_FREE */
323
324 #ifdef DBG_MEMORY_LEAK
325 #ifdef PLATFORM_LINUX
326 if (pbuf != NULL) {
327 atomic_inc(&_malloc_cnt);
328 atomic_add(sz, &_malloc_size);
329 }
330 #endif
331 #endif /* DBG_MEMORY_LEAK */
332
333 return pbuf;
334
335 }
336
337
_rtw_zmalloc(u32 sz)338 void *_rtw_zmalloc(u32 sz)
339 {
340 #ifdef PLATFORM_FREEBSD
341 return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
342 #else /* PLATFORM_FREEBSD */
343 void *pbuf = _rtw_malloc(sz);
344
345 if (pbuf != NULL) {
346
347 #ifdef PLATFORM_LINUX
348 memset(pbuf, 0, sz);
349 #endif
350
351 #ifdef PLATFORM_WINDOWS
352 NdisFillMemory(pbuf, sz, 0);
353 #endif
354 }
355
356 return pbuf;
357 #endif /* PLATFORM_FREEBSD */
358 }
359
_rtw_mfree(void * pbuf,u32 sz)360 void _rtw_mfree(void *pbuf, u32 sz)
361 {
362
363 #ifdef DBG_MEM_ERR_FREE
364 if (!rtw_dbg_mem_free(pbuf, sz, DBG_MEM_TYPE_PHY))
365 return;
366 #endif /* DBG_MEM_ERR_FREE */
367
368 #ifdef PLATFORM_LINUX
369 #ifdef RTK_DMP_PLATFORM
370 if (sz > 0x4000)
371 dvr_free(pbuf);
372 else
373 #endif
374 kfree(pbuf);
375
376 #endif
377 #ifdef PLATFORM_FREEBSD
378 free(pbuf, M_DEVBUF);
379 #endif
380 #ifdef PLATFORM_WINDOWS
381
382 NdisFreeMemory(pbuf, sz, 0);
383
384 #endif
385
386 #ifdef DBG_MEMORY_LEAK
387 #ifdef PLATFORM_LINUX
388 atomic_dec(&_malloc_cnt);
389 atomic_sub(sz, &_malloc_size);
390 #endif
391 #endif /* DBG_MEMORY_LEAK */
392
393 }
394
395 #ifdef PLATFORM_FREEBSD
396 /* review again */
dev_alloc_skb(unsigned int size)397 struct sk_buff *dev_alloc_skb(unsigned int size)
398 {
399 struct sk_buff *skb = NULL;
400 u8 *data = NULL;
401
402 /* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
403 skb = _rtw_malloc(sizeof(struct sk_buff));
404 if (!skb)
405 goto out;
406 data = _rtw_malloc(size);
407 if (!data)
408 goto nodata;
409
410 skb->head = (unsigned char *)data;
411 skb->data = (unsigned char *)data;
412 skb->tail = (unsigned char *)data;
413 skb->end = (unsigned char *)data + size;
414 skb->len = 0;
415 /* printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); */
416
417 out:
418 return skb;
419 nodata:
420 _rtw_mfree(skb, sizeof(struct sk_buff));
421 skb = NULL;
422 goto out;
423
424 }
425
dev_kfree_skb_any(struct sk_buff * skb)426 void dev_kfree_skb_any(struct sk_buff *skb)
427 {
428 /* printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); */
429 if (skb->head)
430 _rtw_mfree(skb->head, 0);
431 /* printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); */
432 if (skb)
433 _rtw_mfree(skb, 0);
434 }
skb_clone(const struct sk_buff * skb)435 struct sk_buff *skb_clone(const struct sk_buff *skb)
436 {
437 return NULL;
438 }
439
440 #endif /* PLATFORM_FREEBSD */
441
_rtw_skb_alloc(u32 sz)442 inline struct sk_buff *_rtw_skb_alloc(u32 sz)
443 {
444 #ifdef PLATFORM_LINUX
445 return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
446 #endif /* PLATFORM_LINUX */
447
448 #ifdef PLATFORM_FREEBSD
449 return dev_alloc_skb(sz);
450 #endif /* PLATFORM_FREEBSD */
451 }
452
_rtw_skb_free(struct sk_buff * skb)453 inline void _rtw_skb_free(struct sk_buff *skb)
454 {
455 dev_kfree_skb_any(skb);
456 }
457
_rtw_skb_copy(const struct sk_buff * skb)458 inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
459 {
460 #ifdef PLATFORM_LINUX
461 return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
462 #endif /* PLATFORM_LINUX */
463
464 #ifdef PLATFORM_FREEBSD
465 return NULL;
466 #endif /* PLATFORM_FREEBSD */
467 }
468
_rtw_skb_clone(struct sk_buff * skb)469 inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
470 {
471 #ifdef PLATFORM_LINUX
472 return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
473 #endif /* PLATFORM_LINUX */
474
475 #ifdef PLATFORM_FREEBSD
476 return skb_clone(skb);
477 #endif /* PLATFORM_FREEBSD */
478 }
_rtw_pskb_copy(struct sk_buff * skb)479 inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
480 {
481 #ifdef PLATFORM_LINUX
482 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
483 return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
484 #else
485 return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
486 #endif
487 #endif /* PLATFORM_LINUX */
488
489 #ifdef PLATFORM_FREEBSD
490 return NULL;
491 #endif /* PLATFORM_FREEBSD */
492 }
493
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)494 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
495 {
496 #if defined(PLATFORM_LINUX)
497 skb->dev = ndev;
498 return netif_rx(skb);
499 #elif defined(PLATFORM_FREEBSD)
500 return (*ndev->if_input)(ndev, skb);
501 #else
502 rtw_warn_on(1);
503 return -1;
504 #endif
505 }
506
507 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)508 inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
509 {
510 #if defined(PLATFORM_LINUX)
511 skb->dev = ndev;
512 return netif_receive_skb(skb);
513 #else
514 rtw_warn_on(1);
515 return -1;
516 #endif
517 }
518
519 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)520 inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
521 {
522 #if defined(PLATFORM_LINUX)
523 return napi_gro_receive(napi, skb);
524 #else
525 rtw_warn_on(1);
526 return -1;
527 #endif
528 }
529 #endif /* CONFIG_RTW_GRO */
530 #endif /* CONFIG_RTW_NAPI */
531
_rtw_skb_queue_purge(struct sk_buff_head * list)532 void _rtw_skb_queue_purge(struct sk_buff_head *list)
533 {
534 struct sk_buff *skb;
535
536 while ((skb = skb_dequeue(list)) != NULL)
537 _rtw_skb_free(skb);
538 }
539
540 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)541 inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
542 {
543 #ifdef PLATFORM_LINUX
544 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
545 return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
546 #else
547 return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
548 #endif
549 #endif /* PLATFORM_LINUX */
550
551 #ifdef PLATFORM_FREEBSD
552 return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
553 #endif /* PLATFORM_FREEBSD */
554 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)555 inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
556 {
557 #ifdef PLATFORM_LINUX
558 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
559 usb_free_coherent(dev, size, addr, dma);
560 #else
561 usb_buffer_free(dev, size, addr, dma);
562 #endif
563 #endif /* PLATFORM_LINUX */
564
565 #ifdef PLATFORM_FREEBSD
566 free(addr, M_USBDEV);
567 #endif /* PLATFORM_FREEBSD */
568 }
569 #endif /* CONFIG_USB_HCI */
570
571 #if defined(DBG_MEM_ALLOC)
572
573 struct rtw_mem_stat {
574 ATOMIC_T alloc; /* the memory bytes we allocate currently */
575 ATOMIC_T peak; /* the peak memory bytes we allocate */
576 ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
577 ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
578 };
579
580 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
581 #ifdef RTW_MEM_FUNC_STAT
582 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
583 #endif
584
585 char *MSTAT_TYPE_str[] = {
586 "VIR",
587 "PHY",
588 "SKB",
589 "USB",
590 };
591
592 #ifdef RTW_MEM_FUNC_STAT
593 char *MSTAT_FUNC_str[] = {
594 "UNSP",
595 "IO",
596 "TXIO",
597 "RXIO",
598 "TX",
599 "RX",
600 };
601 #endif
602
rtw_mstat_dump(void * sel)603 void rtw_mstat_dump(void *sel)
604 {
605 int i;
606 int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
607 #ifdef RTW_MEM_FUNC_STAT
608 int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
609 #endif
610
611 for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
612 value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
613 value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
614 value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
615 value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
616 }
617
618 #ifdef RTW_MEM_FUNC_STAT
619 for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
620 value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
621 value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
622 value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
623 value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
624 }
625 #endif
626
627 RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
628 RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
629 RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
630 for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
631 RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
632 #ifdef RTW_MEM_FUNC_STAT
633 RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
634 for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
635 RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
636 #endif
637 }
638
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)639 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
640 {
641 static systime update_time = 0;
642 int peak, alloc;
643 int i;
644
645 /* initialization */
646 if (!update_time) {
647 for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
648 ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
649 ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
650 ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
651 ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
652 }
653 #ifdef RTW_MEM_FUNC_STAT
654 for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
655 ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
656 ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
657 ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
658 ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
659 }
660 #endif
661 }
662
663 switch (status) {
664 case MSTAT_ALLOC_SUCCESS:
665 ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
666 alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
667 peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
668 if (peak < alloc)
669 ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
670
671 #ifdef RTW_MEM_FUNC_STAT
672 ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
673 alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
674 peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
675 if (peak < alloc)
676 ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
677 #endif
678 break;
679
680 case MSTAT_ALLOC_FAIL:
681 ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
682 #ifdef RTW_MEM_FUNC_STAT
683 ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
684 #endif
685 break;
686
687 case MSTAT_FREE:
688 ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
689 ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
690 #ifdef RTW_MEM_FUNC_STAT
691 ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
692 ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
693 #endif
694 break;
695 };
696
697 /* if (rtw_get_passing_time_ms(update_time) > 5000) { */
698 /* rtw_mstat_dump(RTW_DBGDUMP); */
699 update_time = rtw_get_current_time();
700 /* } */
701 }
702
703 #ifndef SIZE_MAX
704 #define SIZE_MAX (~(size_t)0)
705 #endif
706
707 struct mstat_sniff_rule {
708 enum mstat_f flags;
709 size_t lb;
710 size_t hb;
711 };
712
713 struct mstat_sniff_rule mstat_sniff_rules[] = {
714 {MSTAT_TYPE_PHY, 4097, SIZE_MAX},
715 };
716
717 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
718
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)719 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
720 {
721 int i;
722 for (i = 0; i < mstat_sniff_rule_num; i++) {
723 if (mstat_sniff_rules[i].flags == flags
724 && mstat_sniff_rules[i].lb <= size
725 && mstat_sniff_rules[i].hb >= size)
726 return _TRUE;
727 }
728
729 return _FALSE;
730 }
731
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)732 inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
733 {
734 void *p;
735
736 if (match_mstat_sniff_rules(flags, sz))
737 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
738
739 p = _rtw_vmalloc((sz));
740
741 rtw_mstat_update(
742 flags
743 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
744 , sz
745 );
746
747 return p;
748 }
749
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)750 inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
751 {
752 void *p;
753
754 if (match_mstat_sniff_rules(flags, sz))
755 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
756
757 p = _rtw_zvmalloc((sz));
758
759 rtw_mstat_update(
760 flags
761 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
762 , sz
763 );
764
765 return p;
766 }
767
dbg_rtw_vmfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)768 inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
769 {
770
771 if (match_mstat_sniff_rules(flags, sz))
772 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
773
774 _rtw_vmfree((pbuf), (sz));
775
776 rtw_mstat_update(
777 flags
778 , MSTAT_FREE
779 , sz
780 );
781 }
782
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)783 inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
784 {
785 void *p;
786
787 if (match_mstat_sniff_rules(flags, sz))
788 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
789
790 p = _rtw_malloc((sz));
791
792 rtw_mstat_update(
793 flags
794 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
795 , sz
796 );
797
798 return p;
799 }
800
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)801 inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
802 {
803 void *p;
804
805 if (match_mstat_sniff_rules(flags, sz))
806 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
807
808 p = _rtw_zmalloc((sz));
809
810 rtw_mstat_update(
811 flags
812 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
813 , sz
814 );
815
816 return p;
817 }
818
dbg_rtw_mfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)819 inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
820 {
821 if (match_mstat_sniff_rules(flags, sz))
822 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
823
824 _rtw_mfree((pbuf), (sz));
825
826 rtw_mstat_update(
827 flags
828 , MSTAT_FREE
829 , sz
830 );
831 }
832
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)833 inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
834 {
835 struct sk_buff *skb;
836 unsigned int truesize = 0;
837
838 skb = _rtw_skb_alloc(size);
839
840 if (skb)
841 truesize = skb->truesize;
842
843 if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
844 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
845
846 rtw_mstat_update(
847 flags
848 , skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
849 , truesize
850 );
851
852 return skb;
853 }
854
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)855 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
856 {
857 unsigned int truesize = skb->truesize;
858
859 if (match_mstat_sniff_rules(flags, truesize))
860 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
861
862 _rtw_skb_free(skb);
863
864 rtw_mstat_update(
865 flags
866 , MSTAT_FREE
867 , truesize
868 );
869 }
870
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)871 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
872 {
873 struct sk_buff *skb_cp;
874 unsigned int truesize = skb->truesize;
875 unsigned int cp_truesize = 0;
876
877 skb_cp = _rtw_skb_copy(skb);
878 if (skb_cp)
879 cp_truesize = skb_cp->truesize;
880
881 if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
882 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
883
884 rtw_mstat_update(
885 flags
886 , skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
887 , cp_truesize
888 );
889
890 return skb_cp;
891 }
892
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)893 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
894 {
895 struct sk_buff *skb_cl;
896 unsigned int truesize = skb->truesize;
897 unsigned int cl_truesize = 0;
898
899 skb_cl = _rtw_skb_clone(skb);
900 if (skb_cl)
901 cl_truesize = skb_cl->truesize;
902
903 if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
904 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
905
906 rtw_mstat_update(
907 flags
908 , skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
909 , cl_truesize
910 );
911
912 return skb_cl;
913 }
914
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)915 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
916 {
917 int ret;
918 unsigned int truesize = skb->truesize;
919
920 if (match_mstat_sniff_rules(flags, truesize))
921 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
922
923 ret = _rtw_netif_rx(ndev, skb);
924
925 rtw_mstat_update(
926 flags
927 , MSTAT_FREE
928 , truesize
929 );
930
931 return ret;
932 }
933
934 #ifdef CONFIG_RTW_NAPI
dbg_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)935 inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
936 {
937 int ret;
938 unsigned int truesize = skb->truesize;
939
940 if (match_mstat_sniff_rules(flags, truesize))
941 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
942
943 ret = _rtw_netif_receive_skb(ndev, skb);
944
945 rtw_mstat_update(
946 flags
947 , MSTAT_FREE
948 , truesize
949 );
950
951 return ret;
952 }
953
954 #ifdef CONFIG_RTW_GRO
dbg_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)955 inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
956 {
957 int ret;
958 unsigned int truesize = skb->truesize;
959
960 if (match_mstat_sniff_rules(flags, truesize))
961 RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
962
963 ret = _rtw_napi_gro_receive(napi, skb);
964
965 rtw_mstat_update(
966 flags
967 , MSTAT_FREE
968 , truesize
969 );
970
971 return ret;
972 }
973 #endif /* CONFIG_RTW_GRO */
974 #endif /* CONFIG_RTW_NAPI */
975
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)976 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
977 {
978 struct sk_buff *skb;
979
980 while ((skb = skb_dequeue(list)) != NULL)
981 dbg_rtw_skb_free(skb, flags, func, line);
982 }
983
984 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)985 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
986 {
987 void *p;
988
989 if (match_mstat_sniff_rules(flags, size))
990 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
991
992 p = _rtw_usb_buffer_alloc(dev, size, dma);
993
994 rtw_mstat_update(
995 flags
996 , p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
997 , size
998 );
999
1000 return p;
1001 }
1002
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)1003 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
1004 {
1005
1006 if (match_mstat_sniff_rules(flags, size))
1007 RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
1008
1009 _rtw_usb_buffer_free(dev, size, addr, dma);
1010
1011 rtw_mstat_update(
1012 flags
1013 , MSTAT_FREE
1014 , size
1015 );
1016 }
1017 #endif /* CONFIG_USB_HCI */
1018
1019 #endif /* defined(DBG_MEM_ALLOC) */
1020
rtw_malloc2d(int h,int w,size_t size)1021 void *rtw_malloc2d(int h, int w, size_t size)
1022 {
1023 int j;
1024
1025 void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
1026 if (a == NULL) {
1027 RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
1028 return NULL;
1029 }
1030
1031 for (j = 0; j < h; j++)
1032 a[j] = ((char *)(a + h)) + j * w * size;
1033
1034 return a;
1035 }
1036
rtw_mfree2d(void * pbuf,int h,int w,int size)1037 void rtw_mfree2d(void *pbuf, int h, int w, int size)
1038 {
1039 rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
1040 }
1041
rtw_os_pkt_free(_pkt * pkt)1042 inline void rtw_os_pkt_free(_pkt *pkt)
1043 {
1044 #if defined(PLATFORM_LINUX)
1045 rtw_skb_free(pkt);
1046 #elif defined(PLATFORM_FREEBSD)
1047 m_freem(pkt);
1048 #else
1049 #error "TBD\n"
1050 #endif
1051 }
1052
rtw_os_pkt_copy(_pkt * pkt)1053 inline _pkt *rtw_os_pkt_copy(_pkt *pkt)
1054 {
1055 #if defined(PLATFORM_LINUX)
1056 return rtw_skb_copy(pkt);
1057 #elif defined(PLATFORM_FREEBSD)
1058 return m_dup(pkt, M_NOWAIT);
1059 #else
1060 #error "TBD\n"
1061 #endif
1062 }
1063
rtw_os_pkt_data(_pkt * pkt)1064 inline void *rtw_os_pkt_data(_pkt *pkt)
1065 {
1066 #if defined(PLATFORM_LINUX)
1067 return pkt->data;
1068 #elif defined(PLATFORM_FREEBSD)
1069 return pkt->m_data;
1070 #else
1071 #error "TBD\n"
1072 #endif
1073 }
1074
rtw_os_pkt_len(_pkt * pkt)1075 inline u32 rtw_os_pkt_len(_pkt *pkt)
1076 {
1077 #if defined(PLATFORM_LINUX)
1078 return pkt->len;
1079 #elif defined(PLATFORM_FREEBSD)
1080 return pkt->m_pkthdr.len;
1081 #else
1082 #error "TBD\n"
1083 #endif
1084 }
1085
_rtw_memcpy(void * dst,const void * src,u32 sz)1086 void _rtw_memcpy(void *dst, const void *src, u32 sz)
1087 {
1088
1089 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1090
1091 memcpy(dst, src, sz);
1092
1093 #endif
1094
1095 #ifdef PLATFORM_WINDOWS
1096
1097 NdisMoveMemory(dst, src, sz);
1098
1099 #endif
1100
1101 }
1102
_rtw_memmove(void * dst,const void * src,u32 sz)1103 inline void _rtw_memmove(void *dst, const void *src, u32 sz)
1104 {
1105 #if defined(PLATFORM_LINUX)
1106 memmove(dst, src, sz);
1107 #else
1108 #error "TBD\n"
1109 #endif
1110 }
1111
_rtw_memcmp(const void * dst,const void * src,u32 sz)1112 int _rtw_memcmp(const void *dst, const void *src, u32 sz)
1113 {
1114
1115 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1116 /* under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 */
1117
1118 if (!(memcmp(dst, src, sz)))
1119 return _TRUE;
1120 else
1121 return _FALSE;
1122 #endif
1123
1124
1125 #ifdef PLATFORM_WINDOWS
1126 /* under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 */
1127
1128 if (NdisEqualMemory(dst, src, sz))
1129 return _TRUE;
1130 else
1131 return _FALSE;
1132
1133 #endif
1134
1135
1136
1137 }
1138
_rtw_memcmp2(const void * dst,const void * src,u32 sz)1139 int _rtw_memcmp2(const void *dst, const void *src, u32 sz)
1140 {
1141 const unsigned char *p1 = dst, *p2 = src;
1142
1143 if (sz == 0)
1144 return 0;
1145
1146 while (*p1 == *p2) {
1147 p1++;
1148 p2++;
1149 sz--;
1150 if (sz == 0)
1151 return 0;
1152 }
1153
1154 return *p1 - *p2;
1155 }
1156
_rtw_memset(void * pbuf,int c,u32 sz)1157 void _rtw_memset(void *pbuf, int c, u32 sz)
1158 {
1159
1160 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1161
1162 memset(pbuf, c, sz);
1163
1164 #endif
1165
1166 #ifdef PLATFORM_WINDOWS
1167 #if 0
1168 NdisZeroMemory(pbuf, sz);
1169 if (c != 0)
1170 memset(pbuf, c, sz);
1171 #else
1172 NdisFillMemory(pbuf, sz, c);
1173 #endif
1174 #endif
1175
1176 }
1177
1178 #ifdef PLATFORM_FREEBSD
__list_add(_list * pnew,_list * pprev,_list * pnext)1179 static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
1180 {
1181 pnext->prev = pnew;
1182 pnew->next = pnext;
1183 pnew->prev = pprev;
1184 pprev->next = pnew;
1185 }
1186 #endif /* PLATFORM_FREEBSD */
1187
1188
_rtw_init_listhead(_list * list)1189 void _rtw_init_listhead(_list *list)
1190 {
1191
1192 #ifdef PLATFORM_LINUX
1193
1194 INIT_LIST_HEAD(list);
1195
1196 #endif
1197
1198 #ifdef PLATFORM_FREEBSD
1199 list->next = list;
1200 list->prev = list;
1201 #endif
1202 #ifdef PLATFORM_WINDOWS
1203
1204 NdisInitializeListHead(list);
1205
1206 #endif
1207
1208 }
1209
1210
1211 /*
1212 For the following list_xxx operations,
1213 caller must guarantee the atomic context.
1214 Otherwise, there will be racing condition.
1215 */
rtw_is_list_empty(_list * phead)1216 u32 rtw_is_list_empty(_list *phead)
1217 {
1218
1219 #ifdef PLATFORM_LINUX
1220
1221 if (list_empty(phead))
1222 return _TRUE;
1223 else
1224 return _FALSE;
1225
1226 #endif
1227 #ifdef PLATFORM_FREEBSD
1228
1229 if (phead->next == phead)
1230 return _TRUE;
1231 else
1232 return _FALSE;
1233
1234 #endif
1235
1236
1237 #ifdef PLATFORM_WINDOWS
1238
1239 if (IsListEmpty(phead))
1240 return _TRUE;
1241 else
1242 return _FALSE;
1243
1244 #endif
1245
1246
1247 }
1248
rtw_list_insert_head(_list * plist,_list * phead)1249 void rtw_list_insert_head(_list *plist, _list *phead)
1250 {
1251
1252 #ifdef PLATFORM_LINUX
1253 list_add(plist, phead);
1254 #endif
1255
1256 #ifdef PLATFORM_FREEBSD
1257 __list_add(plist, phead, phead->next);
1258 #endif
1259
1260 #ifdef PLATFORM_WINDOWS
1261 InsertHeadList(phead, plist);
1262 #endif
1263 }
1264
rtw_list_insert_tail(_list * plist,_list * phead)1265 void rtw_list_insert_tail(_list *plist, _list *phead)
1266 {
1267
1268 #ifdef PLATFORM_LINUX
1269
1270 list_add_tail(plist, phead);
1271
1272 #endif
1273 #ifdef PLATFORM_FREEBSD
1274
1275 __list_add(plist, phead->prev, phead);
1276
1277 #endif
1278 #ifdef PLATFORM_WINDOWS
1279
1280 InsertTailList(phead, plist);
1281
1282 #endif
1283
1284 }
1285
rtw_list_splice(_list * list,_list * head)1286 inline void rtw_list_splice(_list *list, _list *head)
1287 {
1288 #ifdef PLATFORM_LINUX
1289 list_splice(list, head);
1290 #else
1291 #error "TBD\n"
1292 #endif
1293 }
1294
rtw_list_splice_init(_list * list,_list * head)1295 inline void rtw_list_splice_init(_list *list, _list *head)
1296 {
1297 #ifdef PLATFORM_LINUX
1298 list_splice_init(list, head);
1299 #else
1300 #error "TBD\n"
1301 #endif
1302 }
1303
rtw_list_splice_tail(_list * list,_list * head)1304 inline void rtw_list_splice_tail(_list *list, _list *head)
1305 {
1306 #ifdef PLATFORM_LINUX
1307 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
1308 if (!list_empty(list))
1309 __list_splice(list, head);
1310 #else
1311 list_splice_tail(list, head);
1312 #endif
1313 #else
1314 #error "TBD\n"
1315 #endif
1316 }
1317
rtw_hlist_head_init(rtw_hlist_head * h)1318 inline void rtw_hlist_head_init(rtw_hlist_head *h)
1319 {
1320 #ifdef PLATFORM_LINUX
1321 INIT_HLIST_HEAD(h);
1322 #else
1323 #error "TBD\n"
1324 #endif
1325 }
1326
rtw_hlist_add_head(rtw_hlist_node * n,rtw_hlist_head * h)1327 inline void rtw_hlist_add_head(rtw_hlist_node *n, rtw_hlist_head *h)
1328 {
1329 #ifdef PLATFORM_LINUX
1330 hlist_add_head(n, h);
1331 #else
1332 #error "TBD\n"
1333 #endif
1334 }
1335
rtw_hlist_del(rtw_hlist_node * n)1336 inline void rtw_hlist_del(rtw_hlist_node *n)
1337 {
1338 #ifdef PLATFORM_LINUX
1339 hlist_del(n);
1340 #else
1341 #error "TBD\n"
1342 #endif
1343 }
1344
rtw_hlist_add_head_rcu(rtw_hlist_node * n,rtw_hlist_head * h)1345 inline void rtw_hlist_add_head_rcu(rtw_hlist_node *n, rtw_hlist_head *h)
1346 {
1347 #ifdef PLATFORM_LINUX
1348 hlist_add_head_rcu(n, h);
1349 #else
1350 #error "TBD\n"
1351 #endif
1352 }
1353
rtw_hlist_del_rcu(rtw_hlist_node * n)1354 inline void rtw_hlist_del_rcu(rtw_hlist_node *n)
1355 {
1356 #ifdef PLATFORM_LINUX
1357 hlist_del_rcu(n);
1358 #else
1359 #error "TBD\n"
1360 #endif
1361 }
1362
rtw_init_timer(_timer * ptimer,void * padapter,void * pfunc,void * ctx)1363 void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc, void *ctx)
1364 {
1365 _adapter *adapter = (_adapter *)padapter;
1366
1367 #ifdef PLATFORM_LINUX
1368 _init_timer(ptimer, adapter->pnetdev, pfunc, ctx);
1369 #endif
1370 #ifdef PLATFORM_FREEBSD
1371 _init_timer(ptimer, adapter->pifp, pfunc, ctx);
1372 #endif
1373 #ifdef PLATFORM_WINDOWS
1374 _init_timer(ptimer, adapter->hndis_adapter, pfunc, ctx);
1375 #endif
1376 }
1377
1378 /*
1379
1380 Caller must check if the list is empty before calling rtw_list_delete
1381
1382 */
1383
1384
_rtw_init_sema(_sema * sema,int init_val)1385 void _rtw_init_sema(_sema *sema, int init_val)
1386 {
1387
1388 #ifdef PLATFORM_LINUX
1389
1390 sema_init(sema, init_val);
1391
1392 #endif
1393 #ifdef PLATFORM_FREEBSD
1394 sema_init(sema, init_val, "rtw_drv");
1395 #endif
1396 #ifdef PLATFORM_OS_XP
1397
1398 KeInitializeSemaphore(sema, init_val, SEMA_UPBND); /* count=0; */
1399
1400 #endif
1401
1402 #ifdef PLATFORM_OS_CE
1403 if (*sema == NULL)
1404 *sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1405 #endif
1406
1407 }
1408
_rtw_free_sema(_sema * sema)1409 void _rtw_free_sema(_sema *sema)
1410 {
1411 #ifdef PLATFORM_FREEBSD
1412 sema_destroy(sema);
1413 #endif
1414 #ifdef PLATFORM_OS_CE
1415 CloseHandle(*sema);
1416 #endif
1417
1418 }
1419
_rtw_up_sema(_sema * sema)1420 void _rtw_up_sema(_sema *sema)
1421 {
1422
1423 #ifdef PLATFORM_LINUX
1424
1425 up(sema);
1426
1427 #endif
1428 #ifdef PLATFORM_FREEBSD
1429 sema_post(sema);
1430 #endif
1431 #ifdef PLATFORM_OS_XP
1432
1433 KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1, FALSE);
1434
1435 #endif
1436
1437 #ifdef PLATFORM_OS_CE
1438 ReleaseSemaphore(*sema, 1, NULL);
1439 #endif
1440 }
1441
_rtw_down_sema(_sema * sema)1442 u32 _rtw_down_sema(_sema *sema)
1443 {
1444
1445 #ifdef PLATFORM_LINUX
1446
1447 if (down_killable(sema))
1448 return _FAIL;
1449 else
1450 return _SUCCESS;
1451
1452 #endif
1453 #ifdef PLATFORM_FREEBSD
1454 sema_wait(sema);
1455 return _SUCCESS;
1456 #endif
1457 #ifdef PLATFORM_OS_XP
1458
1459 if (STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1460 return _SUCCESS;
1461 else
1462 return _FAIL;
1463 #endif
1464
1465 #ifdef PLATFORM_OS_CE
1466 if (WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE))
1467 return _SUCCESS;
1468 else
1469 return _FAIL;
1470 #endif
1471 }
1472
thread_exit(_completion * comp)1473 inline void thread_exit(_completion *comp)
1474 {
1475 #ifdef PLATFORM_LINUX
1476 complete_and_exit(comp, 0);
1477 #endif
1478
1479 #ifdef PLATFORM_FREEBSD
1480 printf("%s", "RTKTHREAD_exit");
1481 #endif
1482
1483 #ifdef PLATFORM_OS_CE
1484 ExitThread(STATUS_SUCCESS);
1485 #endif
1486
1487 #ifdef PLATFORM_OS_XP
1488 PsTerminateSystemThread(STATUS_SUCCESS);
1489 #endif
1490 }
1491
_rtw_init_completion(_completion * comp)1492 inline void _rtw_init_completion(_completion *comp)
1493 {
1494 #ifdef PLATFORM_LINUX
1495 init_completion(comp);
1496 #endif
1497 }
_rtw_wait_for_comp_timeout(_completion * comp)1498 inline void _rtw_wait_for_comp_timeout(_completion *comp)
1499 {
1500 #ifdef PLATFORM_LINUX
1501 wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1502 #endif
1503 }
_rtw_wait_for_comp(_completion * comp)1504 inline void _rtw_wait_for_comp(_completion *comp)
1505 {
1506 #ifdef PLATFORM_LINUX
1507 wait_for_completion(comp);
1508 #endif
1509 }
1510
_rtw_mutex_init(_mutex * pmutex)1511 void _rtw_mutex_init(_mutex *pmutex)
1512 {
1513 #ifdef PLATFORM_LINUX
1514
1515 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1516 mutex_init(pmutex);
1517 #else
1518 init_MUTEX(pmutex);
1519 #endif
1520
1521 #endif
1522 #ifdef PLATFORM_FREEBSD
1523 mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
1524 #endif
1525 #ifdef PLATFORM_OS_XP
1526
1527 KeInitializeMutex(pmutex, 0);
1528
1529 #endif
1530
1531 #ifdef PLATFORM_OS_CE
1532 *pmutex = CreateMutex(NULL, _FALSE, NULL);
1533 #endif
1534 }
1535
1536 void _rtw_mutex_free(_mutex *pmutex);
_rtw_mutex_free(_mutex * pmutex)1537 void _rtw_mutex_free(_mutex *pmutex)
1538 {
1539 #ifdef PLATFORM_LINUX
1540
1541 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1542 mutex_destroy(pmutex);
1543 #else
1544 #endif
1545
1546 #ifdef PLATFORM_FREEBSD
1547 sema_destroy(pmutex);
1548 #endif
1549
1550 #endif
1551
1552 #ifdef PLATFORM_OS_XP
1553
1554 #endif
1555
1556 #ifdef PLATFORM_OS_CE
1557
1558 #endif
1559 }
1560
_rtw_spinlock_init(_lock * plock)1561 void _rtw_spinlock_init(_lock *plock)
1562 {
1563
1564 #ifdef PLATFORM_LINUX
1565
1566 spin_lock_init(plock);
1567
1568 #endif
1569 #ifdef PLATFORM_FREEBSD
1570 mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
1571 #endif
1572 #ifdef PLATFORM_WINDOWS
1573
1574 NdisAllocateSpinLock(plock);
1575
1576 #endif
1577
1578 }
1579
_rtw_spinlock_free(_lock * plock)1580 void _rtw_spinlock_free(_lock *plock)
1581 {
1582 #ifdef PLATFORM_FREEBSD
1583 mtx_destroy(plock);
1584 #endif
1585
1586 #ifdef PLATFORM_WINDOWS
1587
1588 NdisFreeSpinLock(plock);
1589
1590 #endif
1591
1592 }
1593 #ifdef PLATFORM_FREEBSD
1594 extern PADAPTER prtw_lock;
1595
rtw_mtx_lock(_lock * plock)1596 void rtw_mtx_lock(_lock *plock)
1597 {
1598 if (prtw_lock)
1599 mtx_lock(&prtw_lock->glock);
1600 else
1601 printf("%s prtw_lock==NULL", __FUNCTION__);
1602 }
rtw_mtx_unlock(_lock * plock)1603 void rtw_mtx_unlock(_lock *plock)
1604 {
1605 if (prtw_lock)
1606 mtx_unlock(&prtw_lock->glock);
1607 else
1608 printf("%s prtw_lock==NULL", __FUNCTION__);
1609
1610 }
1611 #endif /* PLATFORM_FREEBSD */
1612
1613
_rtw_spinlock(_lock * plock)1614 void _rtw_spinlock(_lock *plock)
1615 {
1616
1617 #ifdef PLATFORM_LINUX
1618
1619 spin_lock(plock);
1620
1621 #endif
1622 #ifdef PLATFORM_FREEBSD
1623 mtx_lock(plock);
1624 #endif
1625 #ifdef PLATFORM_WINDOWS
1626
1627 NdisAcquireSpinLock(plock);
1628
1629 #endif
1630
1631 }
1632
_rtw_spinunlock(_lock * plock)1633 void _rtw_spinunlock(_lock *plock)
1634 {
1635
1636 #ifdef PLATFORM_LINUX
1637
1638 spin_unlock(plock);
1639
1640 #endif
1641 #ifdef PLATFORM_FREEBSD
1642 mtx_unlock(plock);
1643 #endif
1644 #ifdef PLATFORM_WINDOWS
1645
1646 NdisReleaseSpinLock(plock);
1647
1648 #endif
1649 }
1650
1651
_rtw_spinlock_ex(_lock * plock)1652 void _rtw_spinlock_ex(_lock *plock)
1653 {
1654
1655 #ifdef PLATFORM_LINUX
1656
1657 spin_lock(plock);
1658
1659 #endif
1660 #ifdef PLATFORM_FREEBSD
1661 mtx_lock(plock);
1662 #endif
1663 #ifdef PLATFORM_WINDOWS
1664
1665 NdisDprAcquireSpinLock(plock);
1666
1667 #endif
1668
1669 }
1670
_rtw_spinunlock_ex(_lock * plock)1671 void _rtw_spinunlock_ex(_lock *plock)
1672 {
1673
1674 #ifdef PLATFORM_LINUX
1675
1676 spin_unlock(plock);
1677
1678 #endif
1679 #ifdef PLATFORM_FREEBSD
1680 mtx_unlock(plock);
1681 #endif
1682 #ifdef PLATFORM_WINDOWS
1683
1684 NdisDprReleaseSpinLock(plock);
1685
1686 #endif
1687 }
1688
1689
1690
_rtw_init_queue(_queue * pqueue)1691 void _rtw_init_queue(_queue *pqueue)
1692 {
1693 _rtw_init_listhead(&(pqueue->queue));
1694 _rtw_spinlock_init(&(pqueue->lock));
1695 }
1696
_rtw_deinit_queue(_queue * pqueue)1697 void _rtw_deinit_queue(_queue *pqueue)
1698 {
1699 _rtw_spinlock_free(&(pqueue->lock));
1700 }
1701
_rtw_queue_empty(_queue * pqueue)1702 u32 _rtw_queue_empty(_queue *pqueue)
1703 {
1704 return rtw_is_list_empty(&(pqueue->queue));
1705 }
1706
1707
rtw_end_of_queue_search(_list * head,_list * plist)1708 u32 rtw_end_of_queue_search(_list *head, _list *plist)
1709 {
1710 if (head == plist)
1711 return _TRUE;
1712 else
1713 return _FALSE;
1714 }
1715
1716
_rtw_get_current_time(void)1717 systime _rtw_get_current_time(void)
1718 {
1719
1720 #ifdef PLATFORM_LINUX
1721 return jiffies;
1722 #endif
1723 #ifdef PLATFORM_FREEBSD
1724 struct timeval tvp;
1725 getmicrotime(&tvp);
1726 return tvp.tv_sec;
1727 #endif
1728 #ifdef PLATFORM_WINDOWS
1729 LARGE_INTEGER SystemTime;
1730 NdisGetCurrentSystemTime(&SystemTime);
1731 return SystemTime.LowPart;/* count of 100-nanosecond intervals */
1732 #endif
1733 }
1734
_rtw_systime_to_ms(systime stime)1735 inline u32 _rtw_systime_to_ms(systime stime)
1736 {
1737 #ifdef PLATFORM_LINUX
1738 return jiffies_to_msecs(stime);
1739 #endif
1740 #ifdef PLATFORM_FREEBSD
1741 return stime * 1000;
1742 #endif
1743 #ifdef PLATFORM_WINDOWS
1744 return stime / 10000 ;
1745 #endif
1746 }
1747
_rtw_ms_to_systime(u32 ms)1748 inline systime _rtw_ms_to_systime(u32 ms)
1749 {
1750 #ifdef PLATFORM_LINUX
1751 return msecs_to_jiffies(ms);
1752 #endif
1753 #ifdef PLATFORM_FREEBSD
1754 return ms / 1000;
1755 #endif
1756 #ifdef PLATFORM_WINDOWS
1757 return ms * 10000 ;
1758 #endif
1759 }
1760
_rtw_us_to_systime(u32 us)1761 inline systime _rtw_us_to_systime(u32 us)
1762 {
1763 #ifdef PLATFORM_LINUX
1764 return usecs_to_jiffies(us);
1765 #else
1766 #error "TBD\n"
1767 #endif
1768 }
1769
1770 /* the input parameter start use the same unit as returned by rtw_get_current_time */
_rtw_get_passing_time_ms(systime start)1771 inline s32 _rtw_get_passing_time_ms(systime start)
1772 {
1773 return _rtw_systime_to_ms(_rtw_get_current_time() - start);
1774 }
1775
_rtw_get_remaining_time_ms(systime end)1776 inline s32 _rtw_get_remaining_time_ms(systime end)
1777 {
1778 return _rtw_systime_to_ms(end - _rtw_get_current_time());
1779 }
1780
_rtw_get_time_interval_ms(systime start,systime end)1781 inline s32 _rtw_get_time_interval_ms(systime start, systime end)
1782 {
1783 return _rtw_systime_to_ms(end - start);
1784 }
1785
_rtw_time_after(systime a,systime b)1786 inline bool _rtw_time_after(systime a, systime b)
1787 {
1788 #ifdef PLATFORM_LINUX
1789 return time_after(a, b);
1790 #else
1791 #error "TBD\n"
1792 #endif
1793 }
1794
rtw_sptime_get(void)1795 sysptime rtw_sptime_get(void)
1796 {
1797 /* CLOCK_MONOTONIC */
1798 #ifdef PLATFORM_LINUX
1799 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
1800 struct timespec64 cur;
1801
1802 ktime_get_ts64(&cur);
1803 return timespec64_to_ktime(cur);
1804 #else
1805 struct timespec cur;
1806
1807 ktime_get_ts(&cur);
1808 return timespec_to_ktime(cur);
1809 #endif
1810 #else
1811 #error "TBD\n"
1812 #endif
1813 }
1814
rtw_sptime_set(s64 secs,const u32 nsecs)1815 sysptime rtw_sptime_set(s64 secs, const u32 nsecs)
1816 {
1817 #ifdef PLATFORM_LINUX
1818 return ktime_set(secs, nsecs);
1819 #else
1820 #error "TBD\n"
1821 #endif
1822 }
1823
rtw_sptime_zero(void)1824 sysptime rtw_sptime_zero(void)
1825 {
1826 #ifdef PLATFORM_LINUX
1827 return ktime_set(0, 0);
1828 #else
1829 #error "TBD\n"
1830 #endif
1831 }
1832
1833 /*
1834 * cmp1 < cmp2: return <0
1835 * cmp1 == cmp2: return 0
1836 * cmp1 > cmp2: return >0
1837 */
rtw_sptime_cmp(const sysptime cmp1,const sysptime cmp2)1838 int rtw_sptime_cmp(const sysptime cmp1, const sysptime cmp2)
1839 {
1840 #ifdef PLATFORM_LINUX
1841 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
1842 return ktime_compare(cmp1, cmp2);
1843 #else
1844 if (cmp1.tv64 < cmp2.tv64)
1845 return -1;
1846 if (cmp1.tv64 > cmp2.tv64)
1847 return 1;
1848 return 0;
1849 #endif
1850 #else
1851 #error "TBD\n"
1852 #endif
1853 }
1854
rtw_sptime_eql(const sysptime cmp1,const sysptime cmp2)1855 bool rtw_sptime_eql(const sysptime cmp1, const sysptime cmp2)
1856 {
1857 #ifdef PLATFORM_LINUX
1858 return rtw_sptime_cmp(cmp1, cmp2) == 0;
1859 #else
1860 #error "TBD\n"
1861 #endif
1862 }
1863
rtw_sptime_is_zero(const sysptime sptime)1864 bool rtw_sptime_is_zero(const sysptime sptime)
1865 {
1866 #ifdef PLATFORM_LINUX
1867 return rtw_sptime_cmp(sptime, rtw_sptime_zero()) == 0;
1868 #else
1869 #error "TBD\n"
1870 #endif
1871 }
1872
1873 /*
1874 * sub = lhs - rhs, in normalized form
1875 */
rtw_sptime_sub(const sysptime lhs,const sysptime rhs)1876 sysptime rtw_sptime_sub(const sysptime lhs, const sysptime rhs)
1877 {
1878 #ifdef PLATFORM_LINUX
1879 return ktime_sub(lhs, rhs);
1880 #else
1881 #error "TBD\n"
1882 #endif
1883 }
1884
1885 /*
1886 * add = lhs + rhs, in normalized form
1887 */
rtw_sptime_add(const sysptime lhs,const sysptime rhs)1888 sysptime rtw_sptime_add(const sysptime lhs, const sysptime rhs)
1889 {
1890 #ifdef PLATFORM_LINUX
1891 return ktime_add(lhs, rhs);
1892 #else
1893 #error "TBD\n"
1894 #endif
1895 }
1896
rtw_sptime_to_ms(const sysptime sptime)1897 s64 rtw_sptime_to_ms(const sysptime sptime)
1898 {
1899 #ifdef PLATFORM_LINUX
1900 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
1901 return ktime_to_ms(sptime);
1902 #else
1903 struct timeval tv = ktime_to_timeval(sptime);
1904
1905 return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
1906 #endif
1907 #else
1908 #error "TBD\n"
1909 #endif
1910 }
1911
rtw_ms_to_sptime(u64 ms)1912 sysptime rtw_ms_to_sptime(u64 ms)
1913 {
1914 #ifdef PLATFORM_LINUX
1915 return ns_to_ktime(ms * NSEC_PER_MSEC);
1916 #else
1917 #error "TBD\n"
1918 #endif
1919 }
1920
rtw_sptime_to_us(const sysptime sptime)1921 s64 rtw_sptime_to_us(const sysptime sptime)
1922 {
1923 #ifdef PLATFORM_LINUX
1924 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
1925 return ktime_to_us(sptime);
1926 #else
1927 struct timeval tv = ktime_to_timeval(sptime);
1928
1929 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
1930 #endif
1931 #else
1932 #error "TBD\n"
1933 #endif
1934 }
1935
rtw_us_to_sptime(u64 us)1936 sysptime rtw_us_to_sptime(u64 us)
1937 {
1938 #ifdef PLATFORM_LINUX
1939 return ns_to_ktime(us * NSEC_PER_USEC);
1940 #else
1941 #error "TBD\n"
1942 #endif
1943 }
1944
rtw_sptime_to_ns(const sysptime sptime)1945 s64 rtw_sptime_to_ns(const sysptime sptime)
1946 {
1947 #ifdef PLATFORM_LINUX
1948 return ktime_to_ns(sptime);
1949 #else
1950 #error "TBD\n"
1951 #endif
1952 }
1953
rtw_ns_to_sptime(u64 ns)1954 sysptime rtw_ns_to_sptime(u64 ns)
1955 {
1956 #ifdef PLATFORM_LINUX
1957 return ns_to_ktime(ns);
1958 #else
1959 #error "TBD\n"
1960 #endif
1961 }
1962
rtw_sptime_diff_ms(const sysptime start,const sysptime end)1963 s64 rtw_sptime_diff_ms(const sysptime start, const sysptime end)
1964 {
1965 sysptime diff;
1966
1967 diff = rtw_sptime_sub(end, start);
1968
1969 return rtw_sptime_to_ms(diff);
1970 }
1971
rtw_sptime_pass_ms(const sysptime start)1972 s64 rtw_sptime_pass_ms(const sysptime start)
1973 {
1974 sysptime cur, diff;
1975
1976 cur = rtw_sptime_get();
1977 diff = rtw_sptime_sub(cur, start);
1978
1979 return rtw_sptime_to_ms(diff);
1980 }
1981
rtw_sptime_diff_us(const sysptime start,const sysptime end)1982 s64 rtw_sptime_diff_us(const sysptime start, const sysptime end)
1983 {
1984 sysptime diff;
1985
1986 diff = rtw_sptime_sub(end, start);
1987
1988 return rtw_sptime_to_us(diff);
1989 }
1990
rtw_sptime_pass_us(const sysptime start)1991 s64 rtw_sptime_pass_us(const sysptime start)
1992 {
1993 sysptime cur, diff;
1994
1995 cur = rtw_sptime_get();
1996 diff = rtw_sptime_sub(cur, start);
1997
1998 return rtw_sptime_to_us(diff);
1999 }
2000
rtw_sptime_diff_ns(const sysptime start,const sysptime end)2001 s64 rtw_sptime_diff_ns(const sysptime start, const sysptime end)
2002 {
2003 sysptime diff;
2004
2005 diff = rtw_sptime_sub(end, start);
2006
2007 return rtw_sptime_to_ns(diff);
2008 }
2009
rtw_sptime_pass_ns(const sysptime start)2010 s64 rtw_sptime_pass_ns(const sysptime start)
2011 {
2012 sysptime cur, diff;
2013
2014 cur = rtw_sptime_get();
2015 diff = rtw_sptime_sub(cur, start);
2016
2017 return rtw_sptime_to_ns(diff);
2018 }
2019
rtw_sleep_schedulable(int ms)2020 void rtw_sleep_schedulable(int ms)
2021 {
2022
2023 #ifdef PLATFORM_LINUX
2024
2025 u32 delta;
2026
2027 delta = (ms * HZ) / 1000; /* (ms) */
2028 if (delta == 0) {
2029 delta = 1;/* 1 ms */
2030 }
2031 set_current_state(TASK_INTERRUPTIBLE);
2032 schedule_timeout(delta);
2033 return;
2034
2035 #endif
2036 #ifdef PLATFORM_FREEBSD
2037 DELAY(ms * 1000);
2038 return ;
2039 #endif
2040
2041 #ifdef PLATFORM_WINDOWS
2042
2043 NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
2044
2045 #endif
2046
2047 }
2048
2049
rtw_msleep_os(int ms)2050 void rtw_msleep_os(int ms)
2051 {
2052
2053 #ifdef PLATFORM_LINUX
2054 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
2055 if (ms < 20) {
2056 unsigned long us = ms * 1000UL;
2057 usleep_range(us, us + 1000UL);
2058 } else
2059 #endif
2060 msleep((unsigned int)ms);
2061
2062 #endif
2063 #ifdef PLATFORM_FREEBSD
2064 /* Delay for delay microseconds */
2065 DELAY(ms * 1000);
2066 return ;
2067 #endif
2068 #ifdef PLATFORM_WINDOWS
2069
2070 NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
2071
2072 #endif
2073
2074
2075 }
rtw_usleep_os(int us)2076 void rtw_usleep_os(int us)
2077 {
2078 #ifdef PLATFORM_LINUX
2079
2080 /* msleep((unsigned int)us); */
2081 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
2082 usleep_range(us, us + 1);
2083 #else
2084 if (1 < (us / 1000))
2085 msleep(1);
2086 else
2087 msleep((us / 1000) + 1);
2088 #endif
2089 #endif
2090
2091 #ifdef PLATFORM_FREEBSD
2092 /* Delay for delay microseconds */
2093 DELAY(us);
2094
2095 return ;
2096 #endif
2097 #ifdef PLATFORM_WINDOWS
2098
2099 NdisMSleep(us); /* (us) */
2100
2101 #endif
2102
2103
2104 }
2105
2106
2107 #ifdef DBG_DELAY_OS
_rtw_mdelay_os(int ms,const char * func,const int line)2108 void _rtw_mdelay_os(int ms, const char *func, const int line)
2109 {
2110 #if 0
2111 if (ms > 10)
2112 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
2113 rtw_msleep_os(ms);
2114 return;
2115 #endif
2116
2117
2118 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
2119
2120 #if defined(PLATFORM_LINUX)
2121
2122 mdelay((unsigned long)ms);
2123
2124 #elif defined(PLATFORM_WINDOWS)
2125
2126 NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2127
2128 #endif
2129
2130
2131 }
_rtw_udelay_os(int us,const char * func,const int line)2132 void _rtw_udelay_os(int us, const char *func, const int line)
2133 {
2134
2135 #if 0
2136 if (us > 1000) {
2137 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
2138 rtw_usleep_os(us);
2139 return;
2140 }
2141 #endif
2142
2143
2144 RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
2145
2146
2147 #if defined(PLATFORM_LINUX)
2148
2149 udelay((unsigned long)us);
2150
2151 #elif defined(PLATFORM_WINDOWS)
2152
2153 NdisStallExecution(us); /* (us) */
2154
2155 #endif
2156
2157 }
2158 #else
rtw_mdelay_os(int ms)2159 void rtw_mdelay_os(int ms)
2160 {
2161
2162 #ifdef PLATFORM_LINUX
2163
2164 mdelay((unsigned long)ms);
2165
2166 #endif
2167 #ifdef PLATFORM_FREEBSD
2168 DELAY(ms * 1000);
2169 return ;
2170 #endif
2171 #ifdef PLATFORM_WINDOWS
2172
2173 NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2174
2175 #endif
2176
2177
2178 }
rtw_udelay_os(int us)2179 void rtw_udelay_os(int us)
2180 {
2181
2182 #ifdef PLATFORM_LINUX
2183
2184 udelay((unsigned long)us);
2185
2186 #endif
2187 #ifdef PLATFORM_FREEBSD
2188 /* Delay for delay microseconds */
2189 DELAY(us);
2190 return ;
2191 #endif
2192 #ifdef PLATFORM_WINDOWS
2193
2194 NdisStallExecution(us); /* (us) */
2195
2196 #endif
2197
2198 }
2199 #endif
2200
rtw_yield_os(void)2201 void rtw_yield_os(void)
2202 {
2203 #ifdef PLATFORM_LINUX
2204 yield();
2205 #endif
2206 #ifdef PLATFORM_FREEBSD
2207 yield();
2208 #endif
2209 #ifdef PLATFORM_WINDOWS
2210 SwitchToThread();
2211 #endif
2212 }
2213
2214 const char *_rtw_pwait_type_str[] = {
2215 [RTW_PWAIT_TYPE_MSLEEP] = "MS",
2216 [RTW_PWAIT_TYPE_USLEEP] = "US",
2217 [RTW_PWAIT_TYPE_YIELD] = "Y",
2218 [RTW_PWAIT_TYPE_MDELAY] = "MD",
2219 [RTW_PWAIT_TYPE_UDELAY] = "UD",
2220 [RTW_PWAIT_TYPE_NUM] = "unknown",
2221 };
2222
rtw_pwctx_yield(int us)2223 static void rtw_pwctx_yield(int us)
2224 {
2225 rtw_yield_os();
2226 }
2227
2228 static void (*const rtw_pwait_hdl[])(int)= {
2229 [RTW_PWAIT_TYPE_MSLEEP] = rtw_msleep_os,
2230 [RTW_PWAIT_TYPE_USLEEP] = rtw_usleep_os,
2231 [RTW_PWAIT_TYPE_YIELD] = rtw_pwctx_yield,
2232 [RTW_PWAIT_TYPE_MDELAY] = rtw_mdelay_os,
2233 [RTW_PWAIT_TYPE_UDELAY] = rtw_udelay_os,
2234 };
2235
rtw_pwctx_config(struct rtw_pwait_ctx * pwctx,enum rtw_pwait_type type,s32 time,s32 cnt_lmt)2236 int rtw_pwctx_config(struct rtw_pwait_ctx *pwctx, enum rtw_pwait_type type, s32 time, s32 cnt_lmt)
2237 {
2238 int ret = _FAIL;
2239
2240 if (!RTW_PWAIT_TYPE_VALID(type))
2241 goto exit;
2242
2243 pwctx->conf.type = type;
2244 pwctx->conf.wait_time = time;
2245 pwctx->conf.wait_cnt_lmt = cnt_lmt;
2246 pwctx->wait_hdl = rtw_pwait_hdl[type];
2247
2248 ret = _SUCCESS;
2249
2250 exit:
2251 return ret;
2252 }
2253
rtw_macaddr_is_larger(const u8 * a,const u8 * b)2254 bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
2255 {
2256 u32 va, vb;
2257
2258 va = be32_to_cpu(*((u32 *)a));
2259 vb = be32_to_cpu(*((u32 *)b));
2260 if (va > vb)
2261 return 1;
2262 else if (va < vb)
2263 return 0;
2264
2265 return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
2266 }
2267
2268 #define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
2269 #define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
2270 #define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
2271 #ifdef CONFIG_WAKELOCK
2272 static struct wake_lock rtw_suspend_lock;
2273 static struct wake_lock rtw_suspend_traffic_lock;
2274 static struct wake_lock rtw_suspend_resume_lock;
2275 #elif defined(CONFIG_ANDROID_POWER)
2276 static android_suspend_lock_t rtw_suspend_lock = {
2277 .name = RTW_SUSPEND_LOCK_NAME
2278 };
2279 static android_suspend_lock_t rtw_suspend_traffic_lock = {
2280 .name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
2281 };
2282 static android_suspend_lock_t rtw_suspend_resume_lock = {
2283 .name = RTW_SUSPEND_RESUME_LOCK_NAME
2284 };
2285 #endif
2286
rtw_suspend_lock_init(void)2287 inline void rtw_suspend_lock_init(void)
2288 {
2289 #ifdef CONFIG_WAKELOCK
2290 wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
2291 wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
2292 wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
2293 #elif defined(CONFIG_ANDROID_POWER)
2294 android_init_suspend_lock(&rtw_suspend_lock);
2295 android_init_suspend_lock(&rtw_suspend_traffic_lock);
2296 android_init_suspend_lock(&rtw_suspend_resume_lock);
2297 #endif
2298 }
2299
rtw_suspend_lock_uninit(void)2300 inline void rtw_suspend_lock_uninit(void)
2301 {
2302 #ifdef CONFIG_WAKELOCK
2303 wake_lock_destroy(&rtw_suspend_lock);
2304 wake_lock_destroy(&rtw_suspend_traffic_lock);
2305 wake_lock_destroy(&rtw_suspend_resume_lock);
2306 #elif defined(CONFIG_ANDROID_POWER)
2307 android_uninit_suspend_lock(&rtw_suspend_lock);
2308 android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
2309 android_uninit_suspend_lock(&rtw_suspend_resume_lock);
2310 #endif
2311 }
2312
rtw_lock_suspend(void)2313 inline void rtw_lock_suspend(void)
2314 {
2315 #ifdef CONFIG_WAKELOCK
2316 wake_lock(&rtw_suspend_lock);
2317 #elif defined(CONFIG_ANDROID_POWER)
2318 android_lock_suspend(&rtw_suspend_lock);
2319 #endif
2320
2321 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2322 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2323 #endif
2324 }
2325
rtw_unlock_suspend(void)2326 inline void rtw_unlock_suspend(void)
2327 {
2328 #ifdef CONFIG_WAKELOCK
2329 wake_unlock(&rtw_suspend_lock);
2330 #elif defined(CONFIG_ANDROID_POWER)
2331 android_unlock_suspend(&rtw_suspend_lock);
2332 #endif
2333
2334 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2335 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2336 #endif
2337 }
2338
rtw_resume_lock_suspend(void)2339 inline void rtw_resume_lock_suspend(void)
2340 {
2341 #ifdef CONFIG_WAKELOCK
2342 wake_lock(&rtw_suspend_resume_lock);
2343 #elif defined(CONFIG_ANDROID_POWER)
2344 android_lock_suspend(&rtw_suspend_resume_lock);
2345 #endif
2346
2347 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2348 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2349 #endif
2350 }
2351
rtw_resume_unlock_suspend(void)2352 inline void rtw_resume_unlock_suspend(void)
2353 {
2354 #ifdef CONFIG_WAKELOCK
2355 wake_unlock(&rtw_suspend_resume_lock);
2356 #elif defined(CONFIG_ANDROID_POWER)
2357 android_unlock_suspend(&rtw_suspend_resume_lock);
2358 #endif
2359
2360 #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2361 /* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2362 #endif
2363 }
2364
rtw_lock_suspend_timeout(u32 timeout_ms)2365 inline void rtw_lock_suspend_timeout(u32 timeout_ms)
2366 {
2367 #ifdef CONFIG_WAKELOCK
2368 wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2369 #elif defined(CONFIG_ANDROID_POWER)
2370 android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2371 #endif
2372 }
2373
2374
rtw_lock_traffic_suspend_timeout(u32 timeout_ms)2375 inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
2376 {
2377 #ifdef CONFIG_WAKELOCK
2378 wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2379 #elif defined(CONFIG_ANDROID_POWER)
2380 android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2381 #endif
2382 /* RTW_INFO("traffic lock timeout:%d\n", timeout_ms); */
2383 }
2384
rtw_set_bit(int nr,unsigned long * addr)2385 inline void rtw_set_bit(int nr, unsigned long *addr)
2386 {
2387 #ifdef PLATFORM_LINUX
2388 set_bit(nr, addr);
2389 #else
2390 #error "TBD\n";
2391 #endif
2392 }
2393
rtw_clear_bit(int nr,unsigned long * addr)2394 inline void rtw_clear_bit(int nr, unsigned long *addr)
2395 {
2396 #ifdef PLATFORM_LINUX
2397 clear_bit(nr, addr);
2398 #else
2399 #error "TBD\n";
2400 #endif
2401 }
2402
rtw_test_and_clear_bit(int nr,unsigned long * addr)2403 inline int rtw_test_and_clear_bit(int nr, unsigned long *addr)
2404 {
2405 #ifdef PLATFORM_LINUX
2406 return test_and_clear_bit(nr, addr);
2407 #else
2408 #error "TBD\n";
2409 #endif
2410 }
2411
ATOMIC_SET(ATOMIC_T * v,int i)2412 inline void ATOMIC_SET(ATOMIC_T *v, int i)
2413 {
2414 #ifdef PLATFORM_LINUX
2415 atomic_set(v, i);
2416 #elif defined(PLATFORM_WINDOWS)
2417 *v = i; /* other choice???? */
2418 #elif defined(PLATFORM_FREEBSD)
2419 atomic_set_int(v, i);
2420 #endif
2421 }
2422
ATOMIC_READ(ATOMIC_T * v)2423 inline int ATOMIC_READ(ATOMIC_T *v)
2424 {
2425 #ifdef PLATFORM_LINUX
2426 return atomic_read(v);
2427 #elif defined(PLATFORM_WINDOWS)
2428 return *v; /* other choice???? */
2429 #elif defined(PLATFORM_FREEBSD)
2430 return atomic_load_acq_32(v);
2431 #endif
2432 }
2433
ATOMIC_ADD(ATOMIC_T * v,int i)2434 inline void ATOMIC_ADD(ATOMIC_T *v, int i)
2435 {
2436 #ifdef PLATFORM_LINUX
2437 atomic_add(i, v);
2438 #elif defined(PLATFORM_WINDOWS)
2439 InterlockedAdd(v, i);
2440 #elif defined(PLATFORM_FREEBSD)
2441 atomic_add_int(v, i);
2442 #endif
2443 }
ATOMIC_SUB(ATOMIC_T * v,int i)2444 inline void ATOMIC_SUB(ATOMIC_T *v, int i)
2445 {
2446 #ifdef PLATFORM_LINUX
2447 atomic_sub(i, v);
2448 #elif defined(PLATFORM_WINDOWS)
2449 InterlockedAdd(v, -i);
2450 #elif defined(PLATFORM_FREEBSD)
2451 atomic_subtract_int(v, i);
2452 #endif
2453 }
2454
ATOMIC_INC(ATOMIC_T * v)2455 inline void ATOMIC_INC(ATOMIC_T *v)
2456 {
2457 #ifdef PLATFORM_LINUX
2458 atomic_inc(v);
2459 #elif defined(PLATFORM_WINDOWS)
2460 InterlockedIncrement(v);
2461 #elif defined(PLATFORM_FREEBSD)
2462 atomic_add_int(v, 1);
2463 #endif
2464 }
2465
ATOMIC_DEC(ATOMIC_T * v)2466 inline void ATOMIC_DEC(ATOMIC_T *v)
2467 {
2468 #ifdef PLATFORM_LINUX
2469 atomic_dec(v);
2470 #elif defined(PLATFORM_WINDOWS)
2471 InterlockedDecrement(v);
2472 #elif defined(PLATFORM_FREEBSD)
2473 atomic_subtract_int(v, 1);
2474 #endif
2475 }
2476
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)2477 inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
2478 {
2479 #ifdef PLATFORM_LINUX
2480 return atomic_add_return(i, v);
2481 #elif defined(PLATFORM_WINDOWS)
2482 return InterlockedAdd(v, i);
2483 #elif defined(PLATFORM_FREEBSD)
2484 atomic_add_int(v, i);
2485 return atomic_load_acq_32(v);
2486 #endif
2487 }
2488
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)2489 inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
2490 {
2491 #ifdef PLATFORM_LINUX
2492 return atomic_sub_return(i, v);
2493 #elif defined(PLATFORM_WINDOWS)
2494 return InterlockedAdd(v, -i);
2495 #elif defined(PLATFORM_FREEBSD)
2496 atomic_subtract_int(v, i);
2497 return atomic_load_acq_32(v);
2498 #endif
2499 }
2500
ATOMIC_INC_RETURN(ATOMIC_T * v)2501 inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
2502 {
2503 #ifdef PLATFORM_LINUX
2504 return atomic_inc_return(v);
2505 #elif defined(PLATFORM_WINDOWS)
2506 return InterlockedIncrement(v);
2507 #elif defined(PLATFORM_FREEBSD)
2508 atomic_add_int(v, 1);
2509 return atomic_load_acq_32(v);
2510 #endif
2511 }
2512
ATOMIC_DEC_RETURN(ATOMIC_T * v)2513 inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
2514 {
2515 #ifdef PLATFORM_LINUX
2516 return atomic_dec_return(v);
2517 #elif defined(PLATFORM_WINDOWS)
2518 return InterlockedDecrement(v);
2519 #elif defined(PLATFORM_FREEBSD)
2520 atomic_subtract_int(v, 1);
2521 return atomic_load_acq_32(v);
2522 #endif
2523 }
2524
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)2525 inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
2526 {
2527 #ifdef PLATFORM_LINUX
2528 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
2529 return atomic_add_unless(v, 1, u);
2530 #else
2531 /* only make sure not exceed after this function */
2532 if (ATOMIC_INC_RETURN(v) > u) {
2533 ATOMIC_DEC(v);
2534 return 0;
2535 }
2536 return 1;
2537 #endif
2538 #else
2539 #error "TBD\n"
2540 #endif
2541 }
2542
2543 #ifdef PLATFORM_LINUX
2544 #if !defined(CONFIG_RTW_ANDROID_GKI)
2545 /*
2546 * Open a file with the specific @param path, @param flag, @param mode
2547 * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
2548 * @param path the path of the file to open
2549 * @param flag file operation flags, please refer to linux document
2550 * @param mode please refer to linux document
2551 * @return Linux specific error code
2552 */
openFile(struct file ** fpp,const char * path,int flag,int mode)2553 static int openFile(struct file **fpp, const char *path, int flag, int mode)
2554 {
2555 struct file *fp;
2556
2557 fp = filp_open(path, flag, mode);
2558 if (IS_ERR(fp)) {
2559 *fpp = NULL;
2560 return PTR_ERR(fp);
2561 } else {
2562 *fpp = fp;
2563 return 0;
2564 }
2565 }
2566
2567 /*
2568 * Close the file with the specific @param fp
2569 * @param fp the pointer of struct file to close
2570 * @return always 0
2571 */
closeFile(struct file * fp)2572 static int closeFile(struct file *fp)
2573 {
2574 filp_close(fp, NULL);
2575 return 0;
2576 }
2577
readFile(struct file * fp,char * buf,int len)2578 static int readFile(struct file *fp, char *buf, int len)
2579 {
2580 int rlen = 0, sum = 0;
2581
2582 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2583 if (!(fp->f_mode & FMODE_CAN_READ))
2584 #else
2585 if (!fp->f_op || !fp->f_op->read)
2586 #endif
2587 return -EPERM;
2588
2589 while (sum < len) {
2590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2591 rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
2592 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2593 rlen = __vfs_read(fp, buf + sum, len - sum, &fp->f_pos);
2594 #else
2595 rlen = fp->f_op->read(fp, buf + sum, len - sum, &fp->f_pos);
2596 #endif
2597 if (rlen > 0)
2598 sum += rlen;
2599 else if (0 != rlen)
2600 return rlen;
2601 else
2602 break;
2603 }
2604
2605 return sum;
2606
2607 }
2608
writeFile(struct file * fp,char * buf,int len)2609 static int writeFile(struct file *fp, char *buf, int len)
2610 {
2611 int wlen = 0, sum = 0;
2612
2613 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2614 if (!(fp->f_mode & FMODE_CAN_WRITE))
2615 #else
2616 if (!fp->f_op || !fp->f_op->write)
2617 #endif
2618 return -EPERM;
2619
2620 while (sum < len) {
2621 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2622 wlen = kernel_write(fp, buf + sum, len - sum, &fp->f_pos);
2623 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2624 wlen = __vfs_write(fp, buf + sum, len - sum, &fp->f_pos);
2625 #else
2626 wlen = fp->f_op->write(fp, buf + sum, len - sum, &fp->f_pos);
2627 #endif
2628 if (wlen > 0)
2629 sum += wlen;
2630 else if (0 != wlen)
2631 return wlen;
2632 else
2633 break;
2634 }
2635
2636 return sum;
2637
2638 }
2639
2640 /*
2641 * Test if the specifi @param pathname is a direct and readable
2642 * If readable, @param sz is not used
2643 * @param pathname the name of the path to test
2644 * @return Linux specific error code
2645 */
isDirReadable(const char * pathname,u32 * sz)2646 static int isDirReadable(const char *pathname, u32 *sz)
2647 {
2648 struct path path;
2649 int error = 0;
2650
2651 return kern_path(pathname, LOOKUP_FOLLOW, &path);
2652 }
2653
2654 /*
2655 * Test if the specifi @param path is a file and readable
2656 * If readable, @param sz is got
2657 * @param path the path of the file to test
2658 * @return Linux specific error code
2659 */
isFileReadable(const char * path,u32 * sz)2660 static int isFileReadable(const char *path, u32 *sz)
2661 {
2662 struct file *fp;
2663 int ret = 0;
2664 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2665 mm_segment_t oldfs;
2666 #endif
2667 char buf;
2668
2669 fp = filp_open(path, O_RDONLY, 0);
2670 if (IS_ERR(fp))
2671 ret = PTR_ERR(fp);
2672 else {
2673 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2674 oldfs = get_fs();
2675 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2676 set_fs(KERNEL_DS);
2677 #else
2678 set_fs(get_ds());
2679 #endif
2680 #endif
2681
2682 if (1 != readFile(fp, &buf, 1))
2683 ret = PTR_ERR(fp);
2684
2685 if (ret == 0 && sz) {
2686 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
2687 *sz = i_size_read(fp->f_path.dentry->d_inode);
2688 #else
2689 *sz = i_size_read(fp->f_dentry->d_inode);
2690 #endif
2691 }
2692
2693 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2694 set_fs(oldfs);
2695 #endif
2696 filp_close(fp, NULL);
2697 }
2698 return ret;
2699 }
2700
2701 /*
2702 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2703 * @param path the path of the file to open and write
2704 * @param buf the starting address of the data to write into file
2705 * @param sz how many bytes to write at most
2706 * @return the byte we've written, or Linux specific error code
2707 */
storeToFile(const char * path,u8 * buf,u32 sz)2708 static int storeToFile(const char *path, u8 *buf, u32 sz)
2709 {
2710 int ret = 0;
2711 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2712 mm_segment_t oldfs;
2713 #endif
2714 struct file *fp;
2715
2716 if (path && buf) {
2717 ret = openFile(&fp, path, O_CREAT | O_WRONLY, 0666);
2718 if (0 == ret) {
2719 RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2720
2721 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2722 oldfs = get_fs();
2723 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2724 set_fs(KERNEL_DS);
2725 #else
2726 set_fs(get_ds());
2727 #endif
2728 #endif
2729
2730 ret = writeFile(fp, buf, sz);
2731
2732 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2733 set_fs(oldfs);
2734 #endif
2735 closeFile(fp);
2736
2737 RTW_INFO("%s writeFile, ret:%d\n", __FUNCTION__, ret);
2738
2739 } else
2740 RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2741 } else {
2742 RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2743 ret = -EINVAL;
2744 }
2745 return ret;
2746 }
2747 #endif /* !defined(CONFIG_RTW_ANDROID_GKI)*/
2748 #endif /* PLATFORM_LINUX */
2749
2750 #if !defined(CONFIG_RTW_ANDROID_GKI)
2751 /*
2752 * Test if the specifi @param path is a direct and readable
2753 * @param path the path of the direct to test
2754 * @return _TRUE or _FALSE
2755 */
rtw_is_dir_readable(const char * path)2756 int rtw_is_dir_readable(const char *path)
2757 {
2758 #ifdef PLATFORM_LINUX
2759 if (isDirReadable(path, NULL) == 0)
2760 return _TRUE;
2761 else
2762 return _FALSE;
2763 #else
2764 /* Todo... */
2765 return _FALSE;
2766 #endif
2767 }
2768 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2769
2770 /*
2771 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2772 * @param path the path of the file to open and read
2773 * @param buf the starting address of the buffer to store file content
2774 * @param sz how many bytes to read at most
2775 * @return the byte we've read, or Linux specific error code
2776 */
retriveFromFile(const char * path,u8 * buf,u32 sz)2777 static int retriveFromFile(const char *path, u8 *buf, u32 sz)
2778 {
2779 #if defined(CONFIG_RTW_ANDROID_GKI)
2780 int ret = -EINVAL;
2781 const struct firmware *fw = NULL;
2782 char* const delim = "/";
2783 char *name, *token, *cur, *path_tmp = NULL;
2784
2785
2786 if (path == NULL || buf == NULL) {
2787 RTW_ERR("%s() NULL pointer\n", __func__);
2788 goto err;
2789 }
2790
2791 path_tmp = kstrdup(path, GFP_KERNEL);
2792 if (path_tmp == NULL) {
2793 RTW_ERR("%s() cannot copy path for parsing file name\n", __func__);
2794 goto err;
2795 }
2796
2797 /* parsing file name from path */
2798 cur = path_tmp;
2799 token = strsep(&cur, delim);
2800 while (token != NULL) {
2801 token = strsep(&cur, delim);
2802 if(token)
2803 name = token;
2804 }
2805
2806 if (name == NULL) {
2807 RTW_ERR("%s() parsing file name fail\n", __func__);
2808 goto err;
2809 }
2810
2811 /* request_firmware() will find file in /vendor/firmware but not in path */
2812 ret = request_firmware(&fw, name, NULL);
2813 if (ret == 0) {
2814 RTW_INFO("%s() Success. retrieve file : %s, file size : %zu\n", __func__, name, fw->size);
2815
2816 if ((u32)fw->size < sz) {
2817 _rtw_memcpy(buf, fw->data, (u32)fw->size);
2818 ret = (u32)fw->size;
2819 goto exit;
2820 } else {
2821 RTW_ERR("%s() file size : %zu exceed buf size : %u\n", __func__, fw->size, sz);
2822 ret = -EFBIG;
2823 goto err;
2824 }
2825 } else {
2826 RTW_ERR("%s() Fail. retrieve file : %s, error : %d\n", __func__, name, ret);
2827 goto err;
2828 }
2829
2830
2831
2832 err:
2833 RTW_ERR("%s() Fail. retrieve file : %s, error : %d\n", __func__, path, ret);
2834 exit:
2835 if (path_tmp)
2836 kfree(path_tmp);
2837 if (fw)
2838 release_firmware(fw);
2839 return ret;
2840 #else /* !defined(CONFIG_RTW_ANDROID_GKI) */
2841 int ret = -1;
2842 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2843 mm_segment_t oldfs;
2844 #endif
2845 struct file *fp;
2846
2847 if (path && buf) {
2848 ret = openFile(&fp, path, O_RDONLY, 0);
2849 if (0 == ret) {
2850 RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2851
2852 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2853 oldfs = get_fs();
2854 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2855 set_fs(KERNEL_DS);
2856 #else
2857 set_fs(get_ds());
2858 #endif
2859 #endif
2860
2861 ret = readFile(fp, buf, sz);
2862
2863 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2864 set_fs(oldfs);
2865 #endif
2866 closeFile(fp);
2867
2868 RTW_INFO("%s readFile, ret:%d\n", __FUNCTION__, ret);
2869
2870 } else
2871 RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2872 } else {
2873 RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2874 ret = -EINVAL;
2875 }
2876 return ret;
2877 #endif /* defined(CONFIG_RTW_ANDROID_GKI) */
2878 }
2879
2880 /*
2881 * Test if the specifi @param path is a file and readable
2882 * @param path the path of the file to test
2883 * @return _TRUE or _FALSE
2884 */
rtw_is_file_readable(const char * path)2885 int rtw_is_file_readable(const char *path)
2886 {
2887 #ifdef PLATFORM_LINUX
2888 #if !defined(CONFIG_RTW_ANDROID_GKI)
2889 if (isFileReadable(path, NULL) == 0)
2890 return _TRUE;
2891 else
2892 return _FALSE;
2893 #else
2894 RTW_INFO("%s() Android GKI prohibbit kernel_read, return _TRUE\n", __func__);
2895 return _TRUE;
2896 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2897 #else
2898 /* Todo... */
2899 return _FALSE;
2900 #endif
2901 }
2902
2903 /*
2904 * Test if the specifi @param path is a file and readable.
2905 * If readable, @param sz is got
2906 * @param path the path of the file to test
2907 * @return _TRUE or _FALSE
2908 */
rtw_is_file_readable_with_size(const char * path,u32 * sz)2909 int rtw_is_file_readable_with_size(const char *path, u32 *sz)
2910 {
2911 #ifdef PLATFORM_LINUX
2912 #if !defined(CONFIG_RTW_ANDROID_GKI)
2913 if (isFileReadable(path, sz) == 0)
2914 return _TRUE;
2915 else
2916 return _FALSE;
2917 #else
2918 RTW_INFO("%s() Android GKI prohibbit kernel_read, return _TRUE\n", __func__);
2919 *sz = 0;
2920 return _TRUE;
2921 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2922 #else
2923 /* Todo... */
2924 return _FALSE;
2925 #endif
2926 }
2927
2928 /*
2929 * Test if the specifi @param path is a readable file with valid size.
2930 * If readable, @param sz is got
2931 * @param path the path of the file to test
2932 * @return _TRUE or _FALSE
2933 */
rtw_readable_file_sz_chk(const char * path,u32 sz)2934 int rtw_readable_file_sz_chk(const char *path, u32 sz)
2935 {
2936 u32 fsz;
2937
2938 if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
2939 return _FALSE;
2940
2941 if (fsz > sz)
2942 return _FALSE;
2943
2944 return _TRUE;
2945 }
2946
2947 /*
2948 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2949 * @param path the path of the file to open and read
2950 * @param buf the starting address of the buffer to store file content
2951 * @param sz how many bytes to read at most
2952 * @return the byte we've read
2953 */
rtw_retrieve_from_file(const char * path,u8 * buf,u32 sz)2954 int rtw_retrieve_from_file(const char *path, u8 *buf, u32 sz)
2955 {
2956 #ifdef PLATFORM_LINUX
2957 int ret = retriveFromFile(path, buf, sz);
2958 return ret >= 0 ? ret : 0;
2959 #else
2960 /* Todo... */
2961 return 0;
2962 #endif
2963 }
2964
2965 #if !defined(CONFIG_RTW_ANDROID_GKI)
2966 /*
2967 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2968 * @param path the path of the file to open and write
2969 * @param buf the starting address of the data to write into file
2970 * @param sz how many bytes to write at most
2971 * @return the byte we've written
2972 */
rtw_store_to_file(const char * path,u8 * buf,u32 sz)2973 int rtw_store_to_file(const char *path, u8 *buf, u32 sz)
2974 {
2975 #ifdef PLATFORM_LINUX
2976 int ret = storeToFile(path, buf, sz);
2977 return ret >= 0 ? ret : 0;
2978 #else
2979 /* Todo... */
2980 return 0;
2981 #endif
2982 }
2983 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2984
2985 #ifdef PLATFORM_LINUX
rtw_alloc_etherdev_with_old_priv(int sizeof_priv,void * old_priv)2986 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
2987 {
2988 struct net_device *pnetdev;
2989 struct rtw_netdev_priv_indicator *pnpi;
2990
2991 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
2992 pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
2993 #else
2994 pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
2995 #endif
2996 if (!pnetdev)
2997 goto RETURN;
2998
2999 pnpi = netdev_priv(pnetdev);
3000 pnpi->priv = old_priv;
3001 pnpi->sizeof_priv = sizeof_priv;
3002
3003 RETURN:
3004 return pnetdev;
3005 }
3006
rtw_alloc_etherdev(int sizeof_priv)3007 struct net_device *rtw_alloc_etherdev(int sizeof_priv)
3008 {
3009 struct net_device *pnetdev;
3010 struct rtw_netdev_priv_indicator *pnpi;
3011
3012 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
3013 pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
3014 #else
3015 pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
3016 #endif
3017 if (!pnetdev)
3018 goto RETURN;
3019
3020 pnpi = netdev_priv(pnetdev);
3021
3022 pnpi->priv = rtw_zvmalloc(sizeof_priv);
3023 if (!pnpi->priv) {
3024 free_netdev(pnetdev);
3025 pnetdev = NULL;
3026 goto RETURN;
3027 }
3028
3029 pnpi->sizeof_priv = sizeof_priv;
3030 RETURN:
3031 return pnetdev;
3032 }
3033
rtw_free_netdev(struct net_device * netdev)3034 void rtw_free_netdev(struct net_device *netdev)
3035 {
3036 struct rtw_netdev_priv_indicator *pnpi;
3037
3038 if (!netdev)
3039 goto RETURN;
3040
3041 pnpi = netdev_priv(netdev);
3042
3043 if (!pnpi->priv)
3044 goto RETURN;
3045
3046 free_netdev(netdev);
3047
3048 RETURN:
3049 return;
3050 }
3051
3052 #endif
3053
3054 #ifdef PLATFORM_FREEBSD
3055 /*
3056 * Copy a buffer from userspace and write into kernel address
3057 * space.
3058 *
3059 * This emulation just calls the FreeBSD copyin function (to
3060 * copy data from user space buffer into a kernel space buffer)
3061 * and is designed to be used with the above io_write_wrapper.
3062 *
3063 * This function should return the number of bytes not copied.
3064 * I.e. success results in a zero value.
3065 * Negative error values are not returned.
3066 */
3067 unsigned long
copy_from_user(void * to,const void * from,unsigned long n)3068 copy_from_user(void *to, const void *from, unsigned long n)
3069 {
3070 if (copyin(from, to, n) != 0) {
3071 /* Any errors will be treated as a failure
3072 to copy any of the requested bytes */
3073 return n;
3074 }
3075
3076 return 0;
3077 }
3078
3079 unsigned long
copy_to_user(void * to,const void * from,unsigned long n)3080 copy_to_user(void *to, const void *from, unsigned long n)
3081 {
3082 if (copyout(from, to, n) != 0) {
3083 /* Any errors will be treated as a failure
3084 to copy any of the requested bytes */
3085 return n;
3086 }
3087
3088 return 0;
3089 }
3090
3091
3092 /*
3093 * The usb_register and usb_deregister functions are used to register
3094 * usb drivers with the usb subsystem. In this compatibility layer
3095 * emulation a list of drivers (struct usb_driver) is maintained
3096 * and is used for probing/attaching etc.
3097 *
3098 * usb_register and usb_deregister simply call these functions.
3099 */
3100 int
usb_register(struct usb_driver * driver)3101 usb_register(struct usb_driver *driver)
3102 {
3103 rtw_usb_linux_register(driver);
3104 return 0;
3105 }
3106
3107
3108 int
usb_deregister(struct usb_driver * driver)3109 usb_deregister(struct usb_driver *driver)
3110 {
3111 rtw_usb_linux_deregister(driver);
3112 return 0;
3113 }
3114
module_init_exit_wrapper(void * arg)3115 void module_init_exit_wrapper(void *arg)
3116 {
3117 int (*func)(void) = arg;
3118 func();
3119 return;
3120 }
3121
3122 #endif /* PLATFORM_FREEBSD */
3123
3124 #ifdef CONFIG_PLATFORM_SPRD
3125 #ifdef do_div
3126 #undef do_div
3127 #endif
3128 #include <asm-generic/div64.h>
3129 #endif
3130
rtw_modular64(u64 x,u64 y)3131 u64 rtw_modular64(u64 x, u64 y)
3132 {
3133 #ifdef PLATFORM_LINUX
3134 return do_div(x, y);
3135 #elif defined(PLATFORM_WINDOWS)
3136 return x % y;
3137 #elif defined(PLATFORM_FREEBSD)
3138 return x % y;
3139 #endif
3140 }
3141
rtw_division64(u64 x,u64 y)3142 u64 rtw_division64(u64 x, u64 y)
3143 {
3144 #ifdef PLATFORM_LINUX
3145 do_div(x, y);
3146 return x;
3147 #elif defined(PLATFORM_WINDOWS)
3148 return x / y;
3149 #elif defined(PLATFORM_FREEBSD)
3150 return x / y;
3151 #endif
3152 }
3153
rtw_random32(void)3154 inline u32 rtw_random32(void)
3155 {
3156 #ifdef PLATFORM_LINUX
3157 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3158 return prandom_u32();
3159 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18))
3160 u32 random_int;
3161 get_random_bytes(&random_int , 4);
3162 return random_int;
3163 #else
3164 return random32();
3165 #endif
3166 #elif defined(PLATFORM_WINDOWS)
3167 #error "to be implemented\n"
3168 #elif defined(PLATFORM_FREEBSD)
3169 #error "to be implemented\n"
3170 #endif
3171 }
3172
rtw_buf_free(u8 ** buf,u32 * buf_len)3173 void rtw_buf_free(u8 **buf, u32 *buf_len)
3174 {
3175 u32 ori_len;
3176
3177 if (!buf || !buf_len)
3178 return;
3179
3180 ori_len = *buf_len;
3181
3182 if (*buf) {
3183 u32 tmp_buf_len = *buf_len;
3184 *buf_len = 0;
3185 rtw_mfree(*buf, tmp_buf_len);
3186 *buf = NULL;
3187 }
3188 }
3189
rtw_buf_update(u8 ** buf,u32 * buf_len,const u8 * src,u32 src_len)3190 void rtw_buf_update(u8 **buf, u32 *buf_len, const u8 *src, u32 src_len)
3191 {
3192 u32 ori_len = 0, dup_len = 0;
3193 u8 *ori = NULL;
3194 u8 *dup = NULL;
3195
3196 if (!buf || !buf_len)
3197 return;
3198
3199 if (!src || !src_len)
3200 goto keep_ori;
3201
3202 /* duplicate src */
3203 dup = rtw_malloc(src_len);
3204 if (dup) {
3205 dup_len = src_len;
3206 _rtw_memcpy(dup, src, dup_len);
3207 }
3208
3209 keep_ori:
3210 ori = *buf;
3211 ori_len = *buf_len;
3212
3213 /* replace buf with dup */
3214 *buf_len = 0;
3215 *buf = dup;
3216 *buf_len = dup_len;
3217
3218 /* free ori */
3219 if (ori && ori_len > 0)
3220 rtw_mfree(ori, ori_len);
3221 }
3222
3223
3224 /**
3225 * rtw_cbuf_full - test if cbuf is full
3226 * @cbuf: pointer of struct rtw_cbuf
3227 *
3228 * Returns: _TRUE if cbuf is full
3229 */
rtw_cbuf_full(struct rtw_cbuf * cbuf)3230 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
3231 {
3232 return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
3233 }
3234
3235 /**
3236 * rtw_cbuf_empty - test if cbuf is empty
3237 * @cbuf: pointer of struct rtw_cbuf
3238 *
3239 * Returns: _TRUE if cbuf is empty
3240 */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)3241 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
3242 {
3243 return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
3244 }
3245
3246 /**
3247 * rtw_cbuf_push - push a pointer into cbuf
3248 * @cbuf: pointer of struct rtw_cbuf
3249 * @buf: pointer to push in
3250 *
3251 * Lock free operation, be careful of the use scheme
3252 * Returns: _TRUE push success
3253 */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)3254 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
3255 {
3256 if (rtw_cbuf_full(cbuf))
3257 return _FAIL;
3258
3259 if (0)
3260 RTW_INFO("%s on %u\n", __func__, cbuf->write);
3261 cbuf->bufs[cbuf->write] = buf;
3262 cbuf->write = (cbuf->write + 1) % cbuf->size;
3263
3264 return _SUCCESS;
3265 }
3266
3267 /**
3268 * rtw_cbuf_pop - pop a pointer from cbuf
3269 * @cbuf: pointer of struct rtw_cbuf
3270 *
3271 * Lock free operation, be careful of the use scheme
3272 * Returns: pointer popped out
3273 */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)3274 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
3275 {
3276 void *buf;
3277 if (rtw_cbuf_empty(cbuf))
3278 return NULL;
3279
3280 if (0)
3281 RTW_INFO("%s on %u\n", __func__, cbuf->read);
3282 buf = cbuf->bufs[cbuf->read];
3283 cbuf->read = (cbuf->read + 1) % cbuf->size;
3284
3285 return buf;
3286 }
3287
3288 /**
3289 * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
3290 * @size: size of pointer
3291 *
3292 * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
3293 */
rtw_cbuf_alloc(u32 size)3294 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
3295 {
3296 struct rtw_cbuf *cbuf;
3297
3298 cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
3299
3300 if (cbuf) {
3301 cbuf->write = cbuf->read = 0;
3302 cbuf->size = size;
3303 }
3304
3305 return cbuf;
3306 }
3307
3308 /**
3309 * rtw_cbuf_free - free the given rtw_cbuf
3310 * @cbuf: pointer of struct rtw_cbuf to free
3311 */
rtw_cbuf_free(struct rtw_cbuf * cbuf)3312 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
3313 {
3314 rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
3315 }
3316
3317 /**
3318 * map_readN - read a range of map data
3319 * @map: map to read
3320 * @offset: start address to read
3321 * @len: length to read
3322 * @buf: pointer of buffer to store data read
3323 *
3324 * Returns: _SUCCESS or _FAIL
3325 */
map_readN(const struct map_t * map,u16 offset,u16 len,u8 * buf)3326 int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
3327 {
3328 const struct map_seg_t *seg;
3329 int ret = _FAIL;
3330 int i;
3331
3332 if (len == 0) {
3333 rtw_warn_on(1);
3334 goto exit;
3335 }
3336
3337 if (offset + len > map->len) {
3338 rtw_warn_on(1);
3339 goto exit;
3340 }
3341
3342 _rtw_memset(buf, map->init_value, len);
3343
3344 for (i = 0; i < map->seg_num; i++) {
3345 u8 *c_dst, *c_src;
3346 u16 c_len;
3347
3348 seg = map->segs + i;
3349 if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
3350 continue;
3351
3352 if (seg->sa >= offset) {
3353 c_dst = buf + (seg->sa - offset);
3354 c_src = seg->c;
3355 if (seg->sa + seg->len <= offset + len)
3356 c_len = seg->len;
3357 else
3358 c_len = offset + len - seg->sa;
3359 } else {
3360 c_dst = buf;
3361 c_src = seg->c + (offset - seg->sa);
3362 if (seg->sa + seg->len >= offset + len)
3363 c_len = len;
3364 else
3365 c_len = seg->sa + seg->len - offset;
3366 }
3367
3368 _rtw_memcpy(c_dst, c_src, c_len);
3369 }
3370
3371 exit:
3372 return ret;
3373 }
3374
3375 /**
3376 * map_read8 - read 1 byte of map data
3377 * @map: map to read
3378 * @offset: address to read
3379 *
3380 * Returns: value of data of specified offset. map.init_value if offset is out of range
3381 */
map_read8(const struct map_t * map,u16 offset)3382 u8 map_read8(const struct map_t *map, u16 offset)
3383 {
3384 const struct map_seg_t *seg;
3385 u8 val = map->init_value;
3386 int i;
3387
3388 if (offset + 1 > map->len) {
3389 rtw_warn_on(1);
3390 goto exit;
3391 }
3392
3393 for (i = 0; i < map->seg_num; i++) {
3394 seg = map->segs + i;
3395 if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
3396 continue;
3397
3398 val = *(seg->c + offset - seg->sa);
3399 break;
3400 }
3401
3402 exit:
3403 return val;
3404 }
3405
3406 #ifdef CONFIG_RTW_MESH
rtw_blacklist_add(_queue * blist,const u8 * addr,u32 timeout_ms)3407 int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
3408 {
3409 struct blacklist_ent *ent;
3410 _list *list, *head;
3411 u8 exist = _FALSE, timeout = _FALSE;
3412
3413 enter_critical_bh(&blist->lock);
3414
3415 head = &blist->queue;
3416 list = get_next(head);
3417 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3418 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3419 list = get_next(list);
3420
3421 if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3422 exist = _TRUE;
3423 if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3424 timeout = _TRUE;
3425 ent->exp_time = rtw_get_current_time()
3426 + rtw_ms_to_systime(timeout_ms);
3427 break;
3428 }
3429
3430 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3431 rtw_list_delete(&ent->list);
3432 rtw_mfree(ent, sizeof(struct blacklist_ent));
3433 }
3434 }
3435
3436 if (exist == _FALSE) {
3437 ent = rtw_malloc(sizeof(struct blacklist_ent));
3438 if (ent) {
3439 _rtw_memcpy(ent->addr, addr, ETH_ALEN);
3440 ent->exp_time = rtw_get_current_time()
3441 + rtw_ms_to_systime(timeout_ms);
3442 rtw_list_insert_tail(&ent->list, head);
3443 }
3444 }
3445
3446 exit_critical_bh(&blist->lock);
3447
3448 return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
3449 }
3450
rtw_blacklist_del(_queue * blist,const u8 * addr)3451 int rtw_blacklist_del(_queue *blist, const u8 *addr)
3452 {
3453 struct blacklist_ent *ent = NULL;
3454 _list *list, *head;
3455 u8 exist = _FALSE;
3456
3457 enter_critical_bh(&blist->lock);
3458 head = &blist->queue;
3459 list = get_next(head);
3460 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3461 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3462 list = get_next(list);
3463
3464 if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3465 rtw_list_delete(&ent->list);
3466 rtw_mfree(ent, sizeof(struct blacklist_ent));
3467 exist = _TRUE;
3468 break;
3469 }
3470
3471 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3472 rtw_list_delete(&ent->list);
3473 rtw_mfree(ent, sizeof(struct blacklist_ent));
3474 }
3475 }
3476
3477 exit_critical_bh(&blist->lock);
3478
3479 return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
3480 }
3481
rtw_blacklist_search(_queue * blist,const u8 * addr)3482 int rtw_blacklist_search(_queue *blist, const u8 *addr)
3483 {
3484 struct blacklist_ent *ent = NULL;
3485 _list *list, *head;
3486 u8 exist = _FALSE;
3487
3488 enter_critical_bh(&blist->lock);
3489 head = &blist->queue;
3490 list = get_next(head);
3491 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3492 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3493 list = get_next(list);
3494
3495 if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3496 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3497 rtw_list_delete(&ent->list);
3498 rtw_mfree(ent, sizeof(struct blacklist_ent));
3499 } else
3500 exist = _TRUE;
3501 break;
3502 }
3503
3504 if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3505 rtw_list_delete(&ent->list);
3506 rtw_mfree(ent, sizeof(struct blacklist_ent));
3507 }
3508 }
3509
3510 exit_critical_bh(&blist->lock);
3511
3512 return exist;
3513 }
3514
rtw_blacklist_flush(_queue * blist)3515 void rtw_blacklist_flush(_queue *blist)
3516 {
3517 struct blacklist_ent *ent;
3518 _list *list, *head;
3519 _list tmp;
3520
3521 _rtw_init_listhead(&tmp);
3522
3523 enter_critical_bh(&blist->lock);
3524 rtw_list_splice_init(&blist->queue, &tmp);
3525 exit_critical_bh(&blist->lock);
3526
3527 head = &tmp;
3528 list = get_next(head);
3529 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3530 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3531 list = get_next(list);
3532 rtw_list_delete(&ent->list);
3533 rtw_mfree(ent, sizeof(struct blacklist_ent));
3534 }
3535 }
3536
dump_blacklist(void * sel,_queue * blist,const char * title)3537 void dump_blacklist(void *sel, _queue *blist, const char *title)
3538 {
3539 struct blacklist_ent *ent = NULL;
3540 _list *list, *head;
3541
3542 enter_critical_bh(&blist->lock);
3543 head = &blist->queue;
3544 list = get_next(head);
3545
3546 if (rtw_end_of_queue_search(head, list) == _FALSE) {
3547 if (title)
3548 RTW_PRINT_SEL(sel, "%s:\n", title);
3549
3550 while (rtw_end_of_queue_search(head, list) == _FALSE) {
3551 ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3552 list = get_next(list);
3553
3554 if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3555 RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
3556 else
3557 RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
3558 , rtw_get_remaining_time_ms(ent->exp_time));
3559 }
3560
3561 }
3562 exit_critical_bh(&blist->lock);
3563 }
3564 #endif
3565
3566 /**
3567 * is_null -
3568 *
3569 * Return TRUE if c is null character
3570 * FALSE otherwise.
3571 */
is_null(char c)3572 inline BOOLEAN is_null(char c)
3573 {
3574 if (c == '\0')
3575 return _TRUE;
3576 else
3577 return _FALSE;
3578 }
3579
is_all_null(char * c,int len)3580 inline BOOLEAN is_all_null(char *c, int len)
3581 {
3582 for (; len > 0; len--)
3583 if (c[len - 1] != '\0')
3584 return _FALSE;
3585
3586 return _TRUE;
3587 }
3588
3589 /**
3590 * is_eol -
3591 *
3592 * Return TRUE if c is represent for EOL (end of line)
3593 * FALSE otherwise.
3594 */
is_eol(char c)3595 inline BOOLEAN is_eol(char c)
3596 {
3597 if (c == '\r' || c == '\n')
3598 return _TRUE;
3599 else
3600 return _FALSE;
3601 }
3602
3603 /**
3604 * is_space -
3605 *
3606 * Return TRUE if c is represent for space
3607 * FALSE otherwise.
3608 */
is_space(char c)3609 inline BOOLEAN is_space(char c)
3610 {
3611 if (c == ' ' || c == '\t')
3612 return _TRUE;
3613 else
3614 return _FALSE;
3615 }
3616
3617 /**
3618 * is_decimal -
3619 *
3620 * Return TRUE if chTmp is represent for decimal digit
3621 * FALSE otherwise.
3622 */
is_decimal(char chTmp)3623 inline BOOLEAN is_decimal(char chTmp)
3624 {
3625 if ((chTmp >= '0' && chTmp <= '9'))
3626 return _TRUE;
3627 else
3628 return _FALSE;
3629 }
3630
3631 /**
3632 * IsHexDigit -
3633 *
3634 * Return TRUE if chTmp is represent for hex digit
3635 * FALSE otherwise.
3636 */
IsHexDigit(char chTmp)3637 inline BOOLEAN IsHexDigit(char chTmp)
3638 {
3639 if ((chTmp >= '0' && chTmp <= '9') ||
3640 (chTmp >= 'a' && chTmp <= 'f') ||
3641 (chTmp >= 'A' && chTmp <= 'F'))
3642 return _TRUE;
3643 else
3644 return _FALSE;
3645 }
3646
3647 /**
3648 * is_alpha -
3649 *
3650 * Return TRUE if chTmp is represent for alphabet
3651 * FALSE otherwise.
3652 */
is_alpha(char chTmp)3653 inline BOOLEAN is_alpha(char chTmp)
3654 {
3655 if ((chTmp >= 'a' && chTmp <= 'z') ||
3656 (chTmp >= 'A' && chTmp <= 'Z'))
3657 return _TRUE;
3658 else
3659 return _FALSE;
3660 }
3661
alpha_to_upper(char c)3662 inline char alpha_to_upper(char c)
3663 {
3664 if ((c >= 'a' && c <= 'z'))
3665 c = 'A' + (c - 'a');
3666 return c;
3667 }
3668
hex2num_i(char c)3669 int hex2num_i(char c)
3670 {
3671 if (c >= '0' && c <= '9')
3672 return c - '0';
3673 if (c >= 'a' && c <= 'f')
3674 return c - 'a' + 10;
3675 if (c >= 'A' && c <= 'F')
3676 return c - 'A' + 10;
3677 return -1;
3678 }
3679
hex2byte_i(const char * hex)3680 int hex2byte_i(const char *hex)
3681 {
3682 int a, b;
3683 a = hex2num_i(*hex++);
3684 if (a < 0)
3685 return -1;
3686 b = hex2num_i(*hex++);
3687 if (b < 0)
3688 return -1;
3689 return (a << 4) | b;
3690 }
3691
hexstr2bin(const char * hex,u8 * buf,size_t len)3692 int hexstr2bin(const char *hex, u8 *buf, size_t len)
3693 {
3694 size_t i;
3695 int a;
3696 const char *ipos = hex;
3697 u8 *opos = buf;
3698
3699 for (i = 0; i < len; i++) {
3700 a = hex2byte_i(ipos);
3701 if (a < 0)
3702 return -1;
3703 *opos++ = a;
3704 ipos += 2;
3705 }
3706 return 0;
3707 }
3708
3709 /**
3710 * hwaddr_aton - Convert ASCII string to MAC address
3711 * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
3712 * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
3713 * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
3714 */
hwaddr_aton_i(const char * txt,u8 * addr)3715 int hwaddr_aton_i(const char *txt, u8 *addr)
3716 {
3717 int i;
3718
3719 for (i = 0; i < 6; i++) {
3720 int a, b;
3721
3722 a = hex2num_i(*txt++);
3723 if (a < 0)
3724 return -1;
3725 b = hex2num_i(*txt++);
3726 if (b < 0)
3727 return -1;
3728 *addr++ = (a << 4) | b;
3729 if (i < 5 && *txt++ != ':')
3730 return -1;
3731 }
3732
3733 return 0;
3734 }
3735
3736