xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8822cs/os_dep/osdep_service.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2017 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 
16 
17 #define _OSDEP_SERVICE_C_
18 
19 #include <drv_types.h>
20 
21 #define RT_TAG	'1178'
22 
23 #ifdef DBG_MEMORY_LEAK
24 #ifdef PLATFORM_LINUX
25 atomic_t _malloc_cnt = ATOMIC_INIT(0);
26 atomic_t _malloc_size = ATOMIC_INIT(0);
27 #endif
28 #endif /* DBG_MEMORY_LEAK */
29 
30 
31 #ifdef DBG_MEM_ERR_FREE
32 
33 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
34 
35 #define DBG_MEM_HASHBITS 10
36 
37 #define DBG_MEM_TYPE_PHY 0
38 #define DBG_MEM_TYPE_VIR 1
39 
40 /*
41  * DBG_MEM_ERR_FREE is only for the debug purpose.
42  *
43  * There is the limitation that this mechanism only can
44  * support one wifi device, and has problem if there
45  * are two or more wifi devices with one driver on
46  * the same system. It's because dbg_mem_ht is global
47  * variable, and if we move this dbg_mem_ht into struct
48  * dvobj_priv to support more wifi devices, the memory
49  * allocation functions, like rtw_malloc(), need to have
50  * the parameter dvobj to get relative hash table, and
51  * then it is the huge changes for the driver currently.
52  *
53  */
54 struct hlist_head dbg_mem_ht[1 << DBG_MEM_HASHBITS];
55 
56 struct hash_mem {
57 	void *mem;
58 	int sz;
59 	int type;
60 	struct hlist_node node;
61 };
62 
63 #endif /* LINUX_VERSION_CODE */
64 
rtw_dbg_mem_init(void)65 void rtw_dbg_mem_init(void)
66 {
67 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
68 	hash_init(dbg_mem_ht);
69 #endif /* LINUX_VERSION_CODE */
70 }
71 
rtw_dbg_mem_deinit(void)72 void rtw_dbg_mem_deinit(void)
73 {
74 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
75 	struct hlist_head *head;
76 	struct hlist_node *p;
77 	int i;
78 
79 	for (i = 0; i < HASH_SIZE(dbg_mem_ht); i++) {
80 		head = &dbg_mem_ht[i];
81 		p = head->first;
82 		while (p) {
83 			struct hlist_node *prev;
84 			struct hash_mem *hm;
85 
86 			hm = container_of(p, struct hash_mem, node);
87 			prev = p;
88 			p = p->next;
89 
90 			RTW_ERR("%s: memory leak - 0x%x\n", __func__, hm->mem);
91 			hash_del(prev);
92 			kfree(hm);
93 		}
94 	}
95 #endif /* LINUX_VERSION_CODE */
96 }
97 
98 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
rtw_dbg_mem_find(void * mem)99 struct hash_mem *rtw_dbg_mem_find(void *mem)
100 {
101 	struct hash_mem *hm;
102 	struct hlist_head *head;
103 	struct hlist_node *p;
104 
105 	head = &dbg_mem_ht[hash_64((u64)(mem), DBG_MEM_HASHBITS)];
106 
107 	p = head->first;
108 	while (p) {
109 		hm = container_of(p, struct hash_mem, node);
110 		if (hm->mem == mem)
111 			goto out;
112 		p = p->next;
113 	}
114 	hm = NULL;
115 out:
116 	return hm;
117 }
118 
rtw_dbg_mem_alloc(void * mem,int sz,int type)119 void rtw_dbg_mem_alloc(void *mem, int sz, int type)
120 {
121 	struct hash_mem *hm;
122 
123 	hm = rtw_dbg_mem_find(mem);
124 	if (!hm) {
125 		hm = (struct hash_mem *)kmalloc(sizeof(*hm), GFP_ATOMIC);
126 		hm->mem = mem;
127 		hm->sz = sz;
128 		hm->type = type;
129 		hash_add(dbg_mem_ht, &hm->node, (u64)(mem));
130 	} else {
131 		RTW_ERR("%s mem(%x) is in hash already\n", __func__, mem);
132 		rtw_warn_on(1);
133 	}
134 }
135 
rtw_dbg_mem_free(void * mem,int sz,int type)136 bool rtw_dbg_mem_free(void *mem, int sz, int type)
137 {
138 	struct hash_mem *hm;
139 	bool ret;
140 
141 	hm = rtw_dbg_mem_find(mem);
142 	if (!hm) {
143 		RTW_ERR("%s cannot find allocated memory: %x\n",
144 			__func__, mem);
145 		rtw_warn_on(1);
146 		return false;
147 	}
148 
149 	if (hm->sz != sz) {
150 		RTW_ERR("%s memory (%x) size mismatch free(%d) != alloc(%d)\n",
151 			__func__, mem, sz, hm->sz);
152 		rtw_warn_on(1);
153 		ret = false;
154 		goto out;
155 	}
156 
157 	if (hm->type != type) {
158 		RTW_ERR("%s memory (%x) type mismatch free(%d) != alloc(%d)\n",
159 			__func__, mem, type, hm->type);
160 		rtw_warn_on(1);
161 		ret = false;
162 		goto out;
163 	}
164 	ret = true;
165 
166 out:
167 	hash_del(&hm->node);
168 	kfree(hm);
169 
170 	return ret;
171 }
172 
173 #endif /* LINUX_VERSION_CODE */
174 #endif /* DBG_MEM_ERR_FREE */
175 
176 #if defined(PLATFORM_LINUX)
177 /*
178 * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
179 * @return: one of RTW_STATUS_CODE
180 */
RTW_STATUS_CODE(int error_code)181 inline int RTW_STATUS_CODE(int error_code)
182 {
183 	if (error_code >= 0)
184 		return _SUCCESS;
185 
186 	switch (error_code) {
187 	/* case -ETIMEDOUT: */
188 	/*	return RTW_STATUS_TIMEDOUT; */
189 	default:
190 		return _FAIL;
191 	}
192 }
193 #else
RTW_STATUS_CODE(int error_code)194 inline int RTW_STATUS_CODE(int error_code)
195 {
196 	return error_code;
197 }
198 #endif
199 
rtw_atoi(u8 * s)200 u32 rtw_atoi(u8 *s)
201 {
202 
203 	int num = 0, flag = 0;
204 	int i;
205 	for (i = 0; i <= strlen(s); i++) {
206 		if (s[i] >= '0' && s[i] <= '9')
207 			num = num * 10 + s[i] - '0';
208 		else if (s[0] == '-' && i == 0)
209 			flag = 1;
210 		else
211 			break;
212 	}
213 
214 	if (flag == 1)
215 		num = num * -1;
216 
217 	return num;
218 
219 }
220 
_rtw_vmalloc(u32 sz)221 inline void *_rtw_vmalloc(u32 sz)
222 {
223 	void *pbuf;
224 #ifdef PLATFORM_LINUX
225 	pbuf = vmalloc(sz);
226 #endif
227 #ifdef PLATFORM_FREEBSD
228 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
229 #endif
230 
231 #ifdef PLATFORM_WINDOWS
232 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
233 #endif
234 
235 #ifdef DBG_MEM_ERR_FREE
236 	if (pbuf)
237 		rtw_dbg_mem_alloc(pbuf, sz, DBG_MEM_TYPE_VIR);
238 #endif /* DBG_MEM_ERR_FREE */
239 
240 #ifdef DBG_MEMORY_LEAK
241 #ifdef PLATFORM_LINUX
242 	if (pbuf != NULL) {
243 		atomic_inc(&_malloc_cnt);
244 		atomic_add(sz, &_malloc_size);
245 	}
246 #endif
247 #endif /* DBG_MEMORY_LEAK */
248 
249 	return pbuf;
250 }
251 
_rtw_zvmalloc(u32 sz)252 inline void *_rtw_zvmalloc(u32 sz)
253 {
254 	void *pbuf;
255 #ifdef PLATFORM_LINUX
256 	pbuf = _rtw_vmalloc(sz);
257 	if (pbuf != NULL)
258 		memset(pbuf, 0, sz);
259 #endif
260 #ifdef PLATFORM_FREEBSD
261 	pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
262 #endif
263 #ifdef PLATFORM_WINDOWS
264 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
265 	if (pbuf != NULL)
266 		NdisFillMemory(pbuf, sz, 0);
267 #endif
268 
269 	return pbuf;
270 }
271 
_rtw_vmfree(void * pbuf,u32 sz)272 inline void _rtw_vmfree(void *pbuf, u32 sz)
273 {
274 #ifdef DBG_MEM_ERR_FREE
275 	if (!rtw_dbg_mem_free(pbuf, sz, DBG_MEM_TYPE_VIR))
276 		return;
277 #endif /* DBG_MEM_ERR_FREE */
278 
279 #ifdef PLATFORM_LINUX
280 	vfree(pbuf);
281 #endif
282 #ifdef PLATFORM_FREEBSD
283 	free(pbuf, M_DEVBUF);
284 #endif
285 #ifdef PLATFORM_WINDOWS
286 	NdisFreeMemory(pbuf, sz, 0);
287 #endif
288 
289 #ifdef DBG_MEMORY_LEAK
290 #ifdef PLATFORM_LINUX
291 	atomic_dec(&_malloc_cnt);
292 	atomic_sub(sz, &_malloc_size);
293 #endif
294 #endif /* DBG_MEMORY_LEAK */
295 }
296 
_rtw_malloc(u32 sz)297 void *_rtw_malloc(u32 sz)
298 {
299 	void *pbuf = NULL;
300 
301 #ifdef PLATFORM_LINUX
302 #ifdef RTK_DMP_PLATFORM
303 	if (sz > 0x4000)
304 		pbuf = dvr_malloc(sz);
305 	else
306 #endif
307 		pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
308 
309 #endif
310 #ifdef PLATFORM_FREEBSD
311 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
312 #endif
313 #ifdef PLATFORM_WINDOWS
314 
315 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
316 
317 #endif
318 
319 #ifdef DBG_MEM_ERR_FREE
320 	if (pbuf)
321 		rtw_dbg_mem_alloc(pbuf, sz, DBG_MEM_TYPE_PHY);
322 #endif /* DBG_MEM_ERR_FREE */
323 
324 #ifdef DBG_MEMORY_LEAK
325 #ifdef PLATFORM_LINUX
326 	if (pbuf != NULL) {
327 		atomic_inc(&_malloc_cnt);
328 		atomic_add(sz, &_malloc_size);
329 	}
330 #endif
331 #endif /* DBG_MEMORY_LEAK */
332 
333 	return pbuf;
334 
335 }
336 
337 
_rtw_zmalloc(u32 sz)338 void *_rtw_zmalloc(u32 sz)
339 {
340 #ifdef PLATFORM_FREEBSD
341 	return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
342 #else /* PLATFORM_FREEBSD */
343 	void *pbuf = _rtw_malloc(sz);
344 
345 	if (pbuf != NULL) {
346 
347 #ifdef PLATFORM_LINUX
348 		memset(pbuf, 0, sz);
349 #endif
350 
351 #ifdef PLATFORM_WINDOWS
352 		NdisFillMemory(pbuf, sz, 0);
353 #endif
354 	}
355 
356 	return pbuf;
357 #endif /* PLATFORM_FREEBSD */
358 }
359 
_rtw_mfree(void * pbuf,u32 sz)360 void _rtw_mfree(void *pbuf, u32 sz)
361 {
362 
363 #ifdef DBG_MEM_ERR_FREE
364 	if (!rtw_dbg_mem_free(pbuf, sz, DBG_MEM_TYPE_PHY))
365 		return;
366 #endif /* DBG_MEM_ERR_FREE */
367 
368 #ifdef PLATFORM_LINUX
369 #ifdef RTK_DMP_PLATFORM
370 	if (sz > 0x4000)
371 		dvr_free(pbuf);
372 	else
373 #endif
374 		kfree(pbuf);
375 
376 #endif
377 #ifdef PLATFORM_FREEBSD
378 	free(pbuf, M_DEVBUF);
379 #endif
380 #ifdef PLATFORM_WINDOWS
381 
382 	NdisFreeMemory(pbuf, sz, 0);
383 
384 #endif
385 
386 #ifdef DBG_MEMORY_LEAK
387 #ifdef PLATFORM_LINUX
388 	atomic_dec(&_malloc_cnt);
389 	atomic_sub(sz, &_malloc_size);
390 #endif
391 #endif /* DBG_MEMORY_LEAK */
392 
393 }
394 
395 #ifdef PLATFORM_FREEBSD
396 /* review again */
dev_alloc_skb(unsigned int size)397 struct sk_buff *dev_alloc_skb(unsigned int size)
398 {
399 	struct sk_buff *skb = NULL;
400 	u8 *data = NULL;
401 
402 	/* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
403 	skb = _rtw_malloc(sizeof(struct sk_buff));
404 	if (!skb)
405 		goto out;
406 	data = _rtw_malloc(size);
407 	if (!data)
408 		goto nodata;
409 
410 	skb->head = (unsigned char *)data;
411 	skb->data = (unsigned char *)data;
412 	skb->tail = (unsigned char *)data;
413 	skb->end = (unsigned char *)data + size;
414 	skb->len = 0;
415 	/* printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); */
416 
417 out:
418 	return skb;
419 nodata:
420 	_rtw_mfree(skb, sizeof(struct sk_buff));
421 	skb = NULL;
422 	goto out;
423 
424 }
425 
dev_kfree_skb_any(struct sk_buff * skb)426 void dev_kfree_skb_any(struct sk_buff *skb)
427 {
428 	/* printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); */
429 	if (skb->head)
430 		_rtw_mfree(skb->head, 0);
431 	/* printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); */
432 	if (skb)
433 		_rtw_mfree(skb, 0);
434 }
skb_clone(const struct sk_buff * skb)435 struct sk_buff *skb_clone(const struct sk_buff *skb)
436 {
437 	return NULL;
438 }
439 
440 #endif /* PLATFORM_FREEBSD */
441 
442 #ifdef CONFIG_PCIE_DMA_COHERENT
dev_alloc_skb_coherent(struct pci_dev * pdev,unsigned int size)443 struct sk_buff *dev_alloc_skb_coherent(struct pci_dev *pdev, unsigned int size)
444 {
445 	struct sk_buff *skb = NULL;
446 	unsigned char *data = NULL;
447 
448 	/* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
449 
450 	skb = _rtw_malloc(sizeof(struct sk_buff));
451 	if (!skb)
452 		goto out;
453 
454 	data = dma_alloc_coherent(&pdev->dev, size, (dma_addr_t *)&skb->cb, GFP_KERNEL);
455 
456 	if (!data)
457 		goto nodata;
458 
459 	skb->head = data;
460 	skb->data = data;
461 	skb_reset_tail_pointer(skb);
462 	skb->end = skb->tail + size;
463 	skb->len = 0;
464 out:
465 	return skb;
466 nodata:
467 	_rtw_mfree(skb, sizeof(struct sk_buff));
468 	skb = NULL;
469 	goto out;
470 
471 }
472 #endif
473 
_rtw_skb_alloc(u32 sz)474 inline struct sk_buff *_rtw_skb_alloc(u32 sz)
475 {
476 #ifdef PLATFORM_LINUX
477 	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
478 #endif /* PLATFORM_LINUX */
479 
480 #ifdef PLATFORM_FREEBSD
481 	return dev_alloc_skb(sz);
482 #endif /* PLATFORM_FREEBSD */
483 }
484 
_rtw_skb_free(struct sk_buff * skb)485 inline void _rtw_skb_free(struct sk_buff *skb)
486 {
487 	dev_kfree_skb_any(skb);
488 }
489 
_rtw_skb_copy(const struct sk_buff * skb)490 inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
491 {
492 #ifdef PLATFORM_LINUX
493 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
494 #endif /* PLATFORM_LINUX */
495 
496 #ifdef PLATFORM_FREEBSD
497 	return NULL;
498 #endif /* PLATFORM_FREEBSD */
499 }
500 
_rtw_skb_clone(struct sk_buff * skb)501 inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
502 {
503 #ifdef PLATFORM_LINUX
504 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
505 #endif /* PLATFORM_LINUX */
506 
507 #ifdef PLATFORM_FREEBSD
508 	return skb_clone(skb);
509 #endif /* PLATFORM_FREEBSD */
510 }
_rtw_pskb_copy(struct sk_buff * skb)511 inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
512 {
513 #ifdef PLATFORM_LINUX
514 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
515 	return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
516 #else
517 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
518 #endif
519 #endif /* PLATFORM_LINUX */
520 
521 #ifdef PLATFORM_FREEBSD
522 	return NULL;
523 #endif /* PLATFORM_FREEBSD */
524 }
525 
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)526 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
527 {
528 #if defined(PLATFORM_LINUX)
529 	skb->dev = ndev;
530 	return netif_rx(skb);
531 #elif defined(PLATFORM_FREEBSD)
532 	return (*ndev->if_input)(ndev, skb);
533 #else
534 	rtw_warn_on(1);
535 	return -1;
536 #endif
537 }
538 
539 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)540 inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
541 {
542 #if defined(PLATFORM_LINUX)
543 	skb->dev = ndev;
544 	return netif_receive_skb(skb);
545 #else
546 	rtw_warn_on(1);
547 	return -1;
548 #endif
549 }
550 
551 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)552 inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
553 {
554 #if defined(PLATFORM_LINUX)
555 	return napi_gro_receive(napi, skb);
556 #else
557 	rtw_warn_on(1);
558 	return -1;
559 #endif
560 }
561 #endif /* CONFIG_RTW_GRO */
562 #endif /* CONFIG_RTW_NAPI */
563 
_rtw_skb_queue_purge(struct sk_buff_head * list)564 void _rtw_skb_queue_purge(struct sk_buff_head *list)
565 {
566 	struct sk_buff *skb;
567 
568 	while ((skb = skb_dequeue(list)) != NULL)
569 		_rtw_skb_free(skb);
570 }
571 
572 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)573 inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
574 {
575 #ifdef PLATFORM_LINUX
576 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
577 	return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
578 #else
579 	return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
580 #endif
581 #endif /* PLATFORM_LINUX */
582 
583 #ifdef PLATFORM_FREEBSD
584 	return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
585 #endif /* PLATFORM_FREEBSD */
586 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)587 inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
588 {
589 #ifdef PLATFORM_LINUX
590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
591 	usb_free_coherent(dev, size, addr, dma);
592 #else
593 	usb_buffer_free(dev, size, addr, dma);
594 #endif
595 #endif /* PLATFORM_LINUX */
596 
597 #ifdef PLATFORM_FREEBSD
598 	free(addr, M_USBDEV);
599 #endif /* PLATFORM_FREEBSD */
600 }
601 #endif /* CONFIG_USB_HCI */
602 
603 #if defined(DBG_MEM_ALLOC)
604 
605 struct rtw_mem_stat {
606 	ATOMIC_T alloc; /* the memory bytes we allocate currently */
607 	ATOMIC_T peak; /* the peak memory bytes we allocate */
608 	ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
609 	ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
610 };
611 
612 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
613 #ifdef RTW_MEM_FUNC_STAT
614 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
615 #endif
616 
617 char *MSTAT_TYPE_str[] = {
618 	"VIR",
619 	"PHY",
620 	"SKB",
621 	"USB",
622 };
623 
624 #ifdef RTW_MEM_FUNC_STAT
625 char *MSTAT_FUNC_str[] = {
626 	"UNSP",
627 	"IO",
628 	"TXIO",
629 	"RXIO",
630 	"TX",
631 	"RX",
632 };
633 #endif
634 
rtw_mstat_dump(void * sel)635 void rtw_mstat_dump(void *sel)
636 {
637 	int i;
638 	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
639 #ifdef RTW_MEM_FUNC_STAT
640 	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
641 #endif
642 
643 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
644 		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
645 		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
646 		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
647 		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
648 	}
649 
650 #ifdef RTW_MEM_FUNC_STAT
651 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
652 		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
653 		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
654 		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
655 		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
656 	}
657 #endif
658 
659 	RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
660 	RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
661 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
662 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
663 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
664 #ifdef RTW_MEM_FUNC_STAT
665 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
666 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
667 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
668 #endif
669 }
670 
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)671 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
672 {
673 	static systime update_time = 0;
674 	int peak, alloc;
675 	int i;
676 
677 	/* initialization */
678 	if (!update_time) {
679 		for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
680 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
681 			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
682 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
683 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
684 		}
685 		#ifdef RTW_MEM_FUNC_STAT
686 		for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
687 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
688 			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
689 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
690 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
691 		}
692 		#endif
693 	}
694 
695 	switch (status) {
696 	case MSTAT_ALLOC_SUCCESS:
697 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
698 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
699 		peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
700 		if (peak < alloc)
701 			ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
702 
703 		#ifdef RTW_MEM_FUNC_STAT
704 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
705 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
706 		peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
707 		if (peak < alloc)
708 			ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
709 		#endif
710 		break;
711 
712 	case MSTAT_ALLOC_FAIL:
713 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
714 		#ifdef RTW_MEM_FUNC_STAT
715 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
716 		#endif
717 		break;
718 
719 	case MSTAT_FREE:
720 		ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
721 		ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
722 		#ifdef RTW_MEM_FUNC_STAT
723 		ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
724 		ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
725 		#endif
726 		break;
727 	};
728 
729 	/* if (rtw_get_passing_time_ms(update_time) > 5000) { */
730 	/*	rtw_mstat_dump(RTW_DBGDUMP); */
731 	update_time = rtw_get_current_time();
732 	/* } */
733 }
734 
735 #ifndef SIZE_MAX
736 	#define SIZE_MAX (~(size_t)0)
737 #endif
738 
739 struct mstat_sniff_rule {
740 	enum mstat_f flags;
741 	size_t lb;
742 	size_t hb;
743 };
744 
745 struct mstat_sniff_rule mstat_sniff_rules[] = {
746 	{MSTAT_TYPE_PHY, 4097, SIZE_MAX},
747 };
748 
749 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
750 
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)751 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
752 {
753 	int i;
754 	for (i = 0; i < mstat_sniff_rule_num; i++) {
755 		if (mstat_sniff_rules[i].flags == flags
756 			&& mstat_sniff_rules[i].lb <= size
757 			&& mstat_sniff_rules[i].hb >= size)
758 			return _TRUE;
759 	}
760 
761 	return _FALSE;
762 }
763 
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)764 inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
765 {
766 	void *p;
767 
768 	if (match_mstat_sniff_rules(flags, sz))
769 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
770 
771 	p = _rtw_vmalloc((sz));
772 
773 	rtw_mstat_update(
774 		flags
775 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
776 		, sz
777 	);
778 
779 	return p;
780 }
781 
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)782 inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
783 {
784 	void *p;
785 
786 	if (match_mstat_sniff_rules(flags, sz))
787 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
788 
789 	p = _rtw_zvmalloc((sz));
790 
791 	rtw_mstat_update(
792 		flags
793 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
794 		, sz
795 	);
796 
797 	return p;
798 }
799 
dbg_rtw_vmfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)800 inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
801 {
802 
803 	if (match_mstat_sniff_rules(flags, sz))
804 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
805 
806 	_rtw_vmfree((pbuf), (sz));
807 
808 	rtw_mstat_update(
809 		flags
810 		, MSTAT_FREE
811 		, sz
812 	);
813 }
814 
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)815 inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
816 {
817 	void *p;
818 
819 	if (match_mstat_sniff_rules(flags, sz))
820 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
821 
822 	p = _rtw_malloc((sz));
823 
824 	rtw_mstat_update(
825 		flags
826 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
827 		, sz
828 	);
829 
830 	return p;
831 }
832 
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)833 inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
834 {
835 	void *p;
836 
837 	if (match_mstat_sniff_rules(flags, sz))
838 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
839 
840 	p = _rtw_zmalloc((sz));
841 
842 	rtw_mstat_update(
843 		flags
844 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
845 		, sz
846 	);
847 
848 	return p;
849 }
850 
dbg_rtw_mfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)851 inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
852 {
853 	if (match_mstat_sniff_rules(flags, sz))
854 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
855 
856 	_rtw_mfree((pbuf), (sz));
857 
858 	rtw_mstat_update(
859 		flags
860 		, MSTAT_FREE
861 		, sz
862 	);
863 }
864 
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)865 inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
866 {
867 	struct sk_buff *skb;
868 	unsigned int truesize = 0;
869 
870 	skb = _rtw_skb_alloc(size);
871 
872 	if (skb)
873 		truesize = skb->truesize;
874 
875 	if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
876 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
877 
878 	rtw_mstat_update(
879 		flags
880 		, skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
881 		, truesize
882 	);
883 
884 	return skb;
885 }
886 
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)887 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
888 {
889 	unsigned int truesize = skb->truesize;
890 
891 	if (match_mstat_sniff_rules(flags, truesize))
892 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
893 
894 	_rtw_skb_free(skb);
895 
896 	rtw_mstat_update(
897 		flags
898 		, MSTAT_FREE
899 		, truesize
900 	);
901 }
902 
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)903 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
904 {
905 	struct sk_buff *skb_cp;
906 	unsigned int truesize = skb->truesize;
907 	unsigned int cp_truesize = 0;
908 
909 	skb_cp = _rtw_skb_copy(skb);
910 	if (skb_cp)
911 		cp_truesize = skb_cp->truesize;
912 
913 	if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
914 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
915 
916 	rtw_mstat_update(
917 		flags
918 		, skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
919 		, cp_truesize
920 	);
921 
922 	return skb_cp;
923 }
924 
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)925 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
926 {
927 	struct sk_buff *skb_cl;
928 	unsigned int truesize = skb->truesize;
929 	unsigned int cl_truesize = 0;
930 
931 	skb_cl = _rtw_skb_clone(skb);
932 	if (skb_cl)
933 		cl_truesize = skb_cl->truesize;
934 
935 	if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
936 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
937 
938 	rtw_mstat_update(
939 		flags
940 		, skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
941 		, cl_truesize
942 	);
943 
944 	return skb_cl;
945 }
946 
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)947 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
948 {
949 	int ret;
950 	unsigned int truesize = skb->truesize;
951 
952 	if (match_mstat_sniff_rules(flags, truesize))
953 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
954 
955 	ret = _rtw_netif_rx(ndev, skb);
956 
957 	rtw_mstat_update(
958 		flags
959 		, MSTAT_FREE
960 		, truesize
961 	);
962 
963 	return ret;
964 }
965 
966 #ifdef CONFIG_RTW_NAPI
dbg_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)967 inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
968 {
969 	int ret;
970 	unsigned int truesize = skb->truesize;
971 
972 	if (match_mstat_sniff_rules(flags, truesize))
973 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
974 
975 	ret = _rtw_netif_receive_skb(ndev, skb);
976 
977 	rtw_mstat_update(
978 		flags
979 		, MSTAT_FREE
980 		, truesize
981 	);
982 
983 	return ret;
984 }
985 
986 #ifdef CONFIG_RTW_GRO
dbg_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)987 inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
988 {
989 	int ret;
990 	unsigned int truesize = skb->truesize;
991 
992 	if (match_mstat_sniff_rules(flags, truesize))
993 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
994 
995 	ret = _rtw_napi_gro_receive(napi, skb);
996 
997 	rtw_mstat_update(
998 		flags
999 		, MSTAT_FREE
1000 		, truesize
1001 	);
1002 
1003 	return ret;
1004 }
1005 #endif /* CONFIG_RTW_GRO */
1006 #endif /* CONFIG_RTW_NAPI */
1007 
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)1008 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
1009 {
1010 	struct sk_buff *skb;
1011 
1012 	while ((skb = skb_dequeue(list)) != NULL)
1013 		dbg_rtw_skb_free(skb, flags, func, line);
1014 }
1015 
1016 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)1017 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
1018 {
1019 	void *p;
1020 
1021 	if (match_mstat_sniff_rules(flags, size))
1022 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
1023 
1024 	p = _rtw_usb_buffer_alloc(dev, size, dma);
1025 
1026 	rtw_mstat_update(
1027 		flags
1028 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
1029 		, size
1030 	);
1031 
1032 	return p;
1033 }
1034 
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)1035 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
1036 {
1037 
1038 	if (match_mstat_sniff_rules(flags, size))
1039 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
1040 
1041 	_rtw_usb_buffer_free(dev, size, addr, dma);
1042 
1043 	rtw_mstat_update(
1044 		flags
1045 		, MSTAT_FREE
1046 		, size
1047 	);
1048 }
1049 #endif /* CONFIG_USB_HCI */
1050 
1051 #endif /* defined(DBG_MEM_ALLOC) */
1052 
rtw_malloc2d(int h,int w,size_t size)1053 void *rtw_malloc2d(int h, int w, size_t size)
1054 {
1055 	int j;
1056 
1057 	void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
1058 	if (a == NULL) {
1059 		RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
1060 		return NULL;
1061 	}
1062 
1063 	for (j = 0; j < h; j++)
1064 		a[j] = ((char *)(a + h)) + j * w * size;
1065 
1066 	return a;
1067 }
1068 
rtw_mfree2d(void * pbuf,int h,int w,int size)1069 void rtw_mfree2d(void *pbuf, int h, int w, int size)
1070 {
1071 	rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
1072 }
1073 
rtw_os_pkt_free(_pkt * pkt)1074 inline void rtw_os_pkt_free(_pkt *pkt)
1075 {
1076 #if defined(PLATFORM_LINUX)
1077 	rtw_skb_free(pkt);
1078 #elif defined(PLATFORM_FREEBSD)
1079 	m_freem(pkt);
1080 #else
1081 	#error "TBD\n"
1082 #endif
1083 }
1084 
rtw_os_pkt_copy(_pkt * pkt)1085 inline _pkt *rtw_os_pkt_copy(_pkt *pkt)
1086 {
1087 #if defined(PLATFORM_LINUX)
1088 	return rtw_skb_copy(pkt);
1089 #elif defined(PLATFORM_FREEBSD)
1090 	return m_dup(pkt, M_NOWAIT);
1091 #else
1092 	#error "TBD\n"
1093 #endif
1094 }
1095 
rtw_os_pkt_data(_pkt * pkt)1096 inline void *rtw_os_pkt_data(_pkt *pkt)
1097 {
1098 #if defined(PLATFORM_LINUX)
1099 	return pkt->data;
1100 #elif defined(PLATFORM_FREEBSD)
1101 	return pkt->m_data;
1102 #else
1103 	#error "TBD\n"
1104 #endif
1105 }
1106 
rtw_os_pkt_len(_pkt * pkt)1107 inline u32 rtw_os_pkt_len(_pkt *pkt)
1108 {
1109 #if defined(PLATFORM_LINUX)
1110 	return pkt->len;
1111 #elif defined(PLATFORM_FREEBSD)
1112 	return pkt->m_pkthdr.len;
1113 #else
1114 	#error "TBD\n"
1115 #endif
1116 }
1117 
_rtw_memcpy(void * dst,const void * src,u32 sz)1118 void _rtw_memcpy(void *dst, const void *src, u32 sz)
1119 {
1120 
1121 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1122 
1123 	memcpy(dst, src, sz);
1124 
1125 #endif
1126 
1127 #ifdef PLATFORM_WINDOWS
1128 
1129 	NdisMoveMemory(dst, src, sz);
1130 
1131 #endif
1132 
1133 }
1134 
_rtw_memmove(void * dst,const void * src,u32 sz)1135 inline void _rtw_memmove(void *dst, const void *src, u32 sz)
1136 {
1137 #if defined(PLATFORM_LINUX)
1138 	memmove(dst, src, sz);
1139 #else
1140 	#error "TBD\n"
1141 #endif
1142 }
1143 
_rtw_memcmp(const void * dst,const void * src,u32 sz)1144 int	_rtw_memcmp(const void *dst, const void *src, u32 sz)
1145 {
1146 
1147 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1148 	/* under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 */
1149 
1150 	if (!(memcmp(dst, src, sz)))
1151 		return _TRUE;
1152 	else
1153 		return _FALSE;
1154 #endif
1155 
1156 
1157 #ifdef PLATFORM_WINDOWS
1158 	/* under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 */
1159 
1160 	if (NdisEqualMemory(dst, src, sz))
1161 		return _TRUE;
1162 	else
1163 		return _FALSE;
1164 
1165 #endif
1166 
1167 
1168 
1169 }
1170 
_rtw_memcmp2(const void * dst,const void * src,u32 sz)1171 int _rtw_memcmp2(const void *dst, const void *src, u32 sz)
1172 {
1173 	const unsigned char *p1 = dst, *p2 = src;
1174 
1175 	if (sz == 0)
1176 		return 0;
1177 
1178 	while (*p1 == *p2) {
1179 		p1++;
1180 		p2++;
1181 		sz--;
1182 		if (sz == 0)
1183 			return 0;
1184 	}
1185 
1186 	return *p1 - *p2;
1187 }
1188 
_rtw_memset(void * pbuf,int c,u32 sz)1189 void _rtw_memset(void *pbuf, int c, u32 sz)
1190 {
1191 
1192 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1193 
1194 	memset(pbuf, c, sz);
1195 
1196 #endif
1197 
1198 #ifdef PLATFORM_WINDOWS
1199 #if 0
1200 	NdisZeroMemory(pbuf, sz);
1201 	if (c != 0)
1202 		memset(pbuf, c, sz);
1203 #else
1204 	NdisFillMemory(pbuf, sz, c);
1205 #endif
1206 #endif
1207 
1208 }
1209 
1210 #ifdef PLATFORM_FREEBSD
__list_add(_list * pnew,_list * pprev,_list * pnext)1211 static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
1212 {
1213 	pnext->prev = pnew;
1214 	pnew->next = pnext;
1215 	pnew->prev = pprev;
1216 	pprev->next = pnew;
1217 }
1218 #endif /* PLATFORM_FREEBSD */
1219 
1220 
_rtw_init_listhead(_list * list)1221 void _rtw_init_listhead(_list *list)
1222 {
1223 
1224 #ifdef PLATFORM_LINUX
1225 
1226 	INIT_LIST_HEAD(list);
1227 
1228 #endif
1229 
1230 #ifdef PLATFORM_FREEBSD
1231 	list->next = list;
1232 	list->prev = list;
1233 #endif
1234 #ifdef PLATFORM_WINDOWS
1235 
1236 	NdisInitializeListHead(list);
1237 
1238 #endif
1239 
1240 }
1241 
1242 
1243 /*
1244 For the following list_xxx operations,
1245 caller must guarantee the atomic context.
1246 Otherwise, there will be racing condition.
1247 */
rtw_is_list_empty(_list * phead)1248 u32	rtw_is_list_empty(_list *phead)
1249 {
1250 
1251 #ifdef PLATFORM_LINUX
1252 
1253 	if (list_empty(phead))
1254 		return _TRUE;
1255 	else
1256 		return _FALSE;
1257 
1258 #endif
1259 #ifdef PLATFORM_FREEBSD
1260 
1261 	if (phead->next == phead)
1262 		return _TRUE;
1263 	else
1264 		return _FALSE;
1265 
1266 #endif
1267 
1268 
1269 #ifdef PLATFORM_WINDOWS
1270 
1271 	if (IsListEmpty(phead))
1272 		return _TRUE;
1273 	else
1274 		return _FALSE;
1275 
1276 #endif
1277 
1278 
1279 }
1280 
rtw_list_insert_head(_list * plist,_list * phead)1281 void rtw_list_insert_head(_list *plist, _list *phead)
1282 {
1283 
1284 #ifdef PLATFORM_LINUX
1285 	list_add(plist, phead);
1286 #endif
1287 
1288 #ifdef PLATFORM_FREEBSD
1289 	__list_add(plist, phead, phead->next);
1290 #endif
1291 
1292 #ifdef PLATFORM_WINDOWS
1293 	InsertHeadList(phead, plist);
1294 #endif
1295 }
1296 
rtw_list_insert_tail(_list * plist,_list * phead)1297 void rtw_list_insert_tail(_list *plist, _list *phead)
1298 {
1299 
1300 #ifdef PLATFORM_LINUX
1301 
1302 	list_add_tail(plist, phead);
1303 
1304 #endif
1305 #ifdef PLATFORM_FREEBSD
1306 
1307 	__list_add(plist, phead->prev, phead);
1308 
1309 #endif
1310 #ifdef PLATFORM_WINDOWS
1311 
1312 	InsertTailList(phead, plist);
1313 
1314 #endif
1315 
1316 }
1317 
rtw_list_splice(_list * list,_list * head)1318 inline void rtw_list_splice(_list *list, _list *head)
1319 {
1320 #ifdef PLATFORM_LINUX
1321 	list_splice(list, head);
1322 #else
1323 	#error "TBD\n"
1324 #endif
1325 }
1326 
rtw_list_splice_init(_list * list,_list * head)1327 inline void rtw_list_splice_init(_list *list, _list *head)
1328 {
1329 #ifdef PLATFORM_LINUX
1330 	list_splice_init(list, head);
1331 #else
1332 	#error "TBD\n"
1333 #endif
1334 }
1335 
rtw_list_splice_tail(_list * list,_list * head)1336 inline void rtw_list_splice_tail(_list *list, _list *head)
1337 {
1338 #ifdef PLATFORM_LINUX
1339 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
1340 	if (!list_empty(list))
1341 		__list_splice(list, head);
1342 	#else
1343 	list_splice_tail(list, head);
1344 	#endif
1345 #else
1346 	#error "TBD\n"
1347 #endif
1348 }
1349 
rtw_hlist_head_init(rtw_hlist_head * h)1350 inline void rtw_hlist_head_init(rtw_hlist_head *h)
1351 {
1352 #ifdef PLATFORM_LINUX
1353 	INIT_HLIST_HEAD(h);
1354 #else
1355 	#error "TBD\n"
1356 #endif
1357 }
1358 
rtw_hlist_add_head(rtw_hlist_node * n,rtw_hlist_head * h)1359 inline void rtw_hlist_add_head(rtw_hlist_node *n, rtw_hlist_head *h)
1360 {
1361 #ifdef PLATFORM_LINUX
1362 	hlist_add_head(n, h);
1363 #else
1364 	#error "TBD\n"
1365 #endif
1366 }
1367 
rtw_hlist_del(rtw_hlist_node * n)1368 inline void rtw_hlist_del(rtw_hlist_node *n)
1369 {
1370 #ifdef PLATFORM_LINUX
1371 	hlist_del(n);
1372 #else
1373 	#error "TBD\n"
1374 #endif
1375 }
1376 
rtw_hlist_add_head_rcu(rtw_hlist_node * n,rtw_hlist_head * h)1377 inline void rtw_hlist_add_head_rcu(rtw_hlist_node *n, rtw_hlist_head *h)
1378 {
1379 #ifdef PLATFORM_LINUX
1380 	hlist_add_head_rcu(n, h);
1381 #else
1382 	#error "TBD\n"
1383 #endif
1384 }
1385 
rtw_hlist_del_rcu(rtw_hlist_node * n)1386 inline void rtw_hlist_del_rcu(rtw_hlist_node *n)
1387 {
1388 #ifdef PLATFORM_LINUX
1389 	hlist_del_rcu(n);
1390 #else
1391 	#error "TBD\n"
1392 #endif
1393 }
1394 
rtw_init_timer(_timer * ptimer,void * padapter,void * pfunc,void * ctx)1395 void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc, void *ctx)
1396 {
1397 	_adapter *adapter = (_adapter *)padapter;
1398 
1399 #ifdef PLATFORM_LINUX
1400 	_init_timer(ptimer, adapter->pnetdev, pfunc, ctx);
1401 #endif
1402 #ifdef PLATFORM_FREEBSD
1403 	_init_timer(ptimer, adapter->pifp, pfunc, ctx);
1404 #endif
1405 #ifdef PLATFORM_WINDOWS
1406 	_init_timer(ptimer, adapter->hndis_adapter, pfunc, ctx);
1407 #endif
1408 }
1409 
1410 /*
1411 
1412 Caller must check if the list is empty before calling rtw_list_delete
1413 
1414 */
1415 
1416 
_rtw_init_sema(_sema * sema,int init_val)1417 void _rtw_init_sema(_sema	*sema, int init_val)
1418 {
1419 
1420 #ifdef PLATFORM_LINUX
1421 
1422 	sema_init(sema, init_val);
1423 
1424 #endif
1425 #ifdef PLATFORM_FREEBSD
1426 	sema_init(sema, init_val, "rtw_drv");
1427 #endif
1428 #ifdef PLATFORM_OS_XP
1429 
1430 	KeInitializeSemaphore(sema, init_val,  SEMA_UPBND); /* count=0; */
1431 
1432 #endif
1433 
1434 #ifdef PLATFORM_OS_CE
1435 	if (*sema == NULL)
1436 		*sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1437 #endif
1438 
1439 }
1440 
_rtw_free_sema(_sema * sema)1441 void _rtw_free_sema(_sema	*sema)
1442 {
1443 #ifdef PLATFORM_FREEBSD
1444 	sema_destroy(sema);
1445 #endif
1446 #ifdef PLATFORM_OS_CE
1447 	CloseHandle(*sema);
1448 #endif
1449 
1450 }
1451 
_rtw_up_sema(_sema * sema)1452 void _rtw_up_sema(_sema	*sema)
1453 {
1454 
1455 #ifdef PLATFORM_LINUX
1456 
1457 	up(sema);
1458 
1459 #endif
1460 #ifdef PLATFORM_FREEBSD
1461 	sema_post(sema);
1462 #endif
1463 #ifdef PLATFORM_OS_XP
1464 
1465 	KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1,  FALSE);
1466 
1467 #endif
1468 
1469 #ifdef PLATFORM_OS_CE
1470 	ReleaseSemaphore(*sema,  1,  NULL);
1471 #endif
1472 }
1473 
_rtw_down_sema(_sema * sema)1474 u32 _rtw_down_sema(_sema *sema)
1475 {
1476 
1477 #ifdef PLATFORM_LINUX
1478 
1479 	if (down_killable(sema))
1480 		return _FAIL;
1481 	else
1482 		return _SUCCESS;
1483 
1484 #endif
1485 #ifdef PLATFORM_FREEBSD
1486 	sema_wait(sema);
1487 	return  _SUCCESS;
1488 #endif
1489 #ifdef PLATFORM_OS_XP
1490 
1491 	if (STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1492 		return  _SUCCESS;
1493 	else
1494 		return _FAIL;
1495 #endif
1496 
1497 #ifdef PLATFORM_OS_CE
1498 	if (WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE))
1499 		return _SUCCESS;
1500 	else
1501 		return _FAIL;
1502 #endif
1503 }
1504 
thread_exit(_completion * comp)1505 inline void thread_exit(_completion *comp)
1506 {
1507 #ifdef PLATFORM_LINUX
1508 	complete_and_exit(comp, 0);
1509 #endif
1510 
1511 #ifdef PLATFORM_FREEBSD
1512 	printf("%s", "RTKTHREAD_exit");
1513 #endif
1514 
1515 #ifdef PLATFORM_OS_CE
1516 	ExitThread(STATUS_SUCCESS);
1517 #endif
1518 
1519 #ifdef PLATFORM_OS_XP
1520 	PsTerminateSystemThread(STATUS_SUCCESS);
1521 #endif
1522 }
1523 
_rtw_init_completion(_completion * comp)1524 inline void _rtw_init_completion(_completion *comp)
1525 {
1526 #ifdef PLATFORM_LINUX
1527 	init_completion(comp);
1528 #endif
1529 }
_rtw_wait_for_comp_timeout(_completion * comp)1530 inline void _rtw_wait_for_comp_timeout(_completion *comp)
1531 {
1532 #ifdef PLATFORM_LINUX
1533 	wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1534 #endif
1535 }
_rtw_wait_for_comp(_completion * comp)1536 inline void _rtw_wait_for_comp(_completion *comp)
1537 {
1538 #ifdef PLATFORM_LINUX
1539 	wait_for_completion(comp);
1540 #endif
1541 }
1542 
_rtw_mutex_init(_mutex * pmutex)1543 void	_rtw_mutex_init(_mutex *pmutex)
1544 {
1545 #ifdef PLATFORM_LINUX
1546 
1547 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1548 	mutex_init(pmutex);
1549 #else
1550 	init_MUTEX(pmutex);
1551 #endif
1552 
1553 #endif
1554 #ifdef PLATFORM_FREEBSD
1555 	mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
1556 #endif
1557 #ifdef PLATFORM_OS_XP
1558 
1559 	KeInitializeMutex(pmutex, 0);
1560 
1561 #endif
1562 
1563 #ifdef PLATFORM_OS_CE
1564 	*pmutex =  CreateMutex(NULL, _FALSE, NULL);
1565 #endif
1566 }
1567 
1568 void	_rtw_mutex_free(_mutex *pmutex);
_rtw_mutex_free(_mutex * pmutex)1569 void	_rtw_mutex_free(_mutex *pmutex)
1570 {
1571 #ifdef PLATFORM_LINUX
1572 
1573 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1574 	mutex_destroy(pmutex);
1575 #else
1576 #endif
1577 
1578 #ifdef PLATFORM_FREEBSD
1579 	sema_destroy(pmutex);
1580 #endif
1581 
1582 #endif
1583 
1584 #ifdef PLATFORM_OS_XP
1585 
1586 #endif
1587 
1588 #ifdef PLATFORM_OS_CE
1589 
1590 #endif
1591 }
1592 
_rtw_spinlock_init(_lock * plock)1593 void	_rtw_spinlock_init(_lock *plock)
1594 {
1595 
1596 #ifdef PLATFORM_LINUX
1597 
1598 	spin_lock_init(plock);
1599 
1600 #endif
1601 #ifdef PLATFORM_FREEBSD
1602 	mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
1603 #endif
1604 #ifdef PLATFORM_WINDOWS
1605 
1606 	NdisAllocateSpinLock(plock);
1607 
1608 #endif
1609 
1610 }
1611 
_rtw_spinlock_free(_lock * plock)1612 void	_rtw_spinlock_free(_lock *plock)
1613 {
1614 #ifdef PLATFORM_FREEBSD
1615 	mtx_destroy(plock);
1616 #endif
1617 
1618 #ifdef PLATFORM_WINDOWS
1619 
1620 	NdisFreeSpinLock(plock);
1621 
1622 #endif
1623 
1624 }
1625 #ifdef PLATFORM_FREEBSD
1626 extern PADAPTER prtw_lock;
1627 
rtw_mtx_lock(_lock * plock)1628 void rtw_mtx_lock(_lock *plock)
1629 {
1630 	if (prtw_lock)
1631 		mtx_lock(&prtw_lock->glock);
1632 	else
1633 		printf("%s prtw_lock==NULL", __FUNCTION__);
1634 }
rtw_mtx_unlock(_lock * plock)1635 void rtw_mtx_unlock(_lock *plock)
1636 {
1637 	if (prtw_lock)
1638 		mtx_unlock(&prtw_lock->glock);
1639 	else
1640 		printf("%s prtw_lock==NULL", __FUNCTION__);
1641 
1642 }
1643 #endif /* PLATFORM_FREEBSD */
1644 
1645 
_rtw_spinlock(_lock * plock)1646 void	_rtw_spinlock(_lock	*plock)
1647 {
1648 
1649 #ifdef PLATFORM_LINUX
1650 
1651 	spin_lock(plock);
1652 
1653 #endif
1654 #ifdef PLATFORM_FREEBSD
1655 	mtx_lock(plock);
1656 #endif
1657 #ifdef PLATFORM_WINDOWS
1658 
1659 	NdisAcquireSpinLock(plock);
1660 
1661 #endif
1662 
1663 }
1664 
_rtw_spinunlock(_lock * plock)1665 void	_rtw_spinunlock(_lock *plock)
1666 {
1667 
1668 #ifdef PLATFORM_LINUX
1669 
1670 	spin_unlock(plock);
1671 
1672 #endif
1673 #ifdef PLATFORM_FREEBSD
1674 	mtx_unlock(plock);
1675 #endif
1676 #ifdef PLATFORM_WINDOWS
1677 
1678 	NdisReleaseSpinLock(plock);
1679 
1680 #endif
1681 }
1682 
1683 
_rtw_spinlock_ex(_lock * plock)1684 void	_rtw_spinlock_ex(_lock	*plock)
1685 {
1686 
1687 #ifdef PLATFORM_LINUX
1688 
1689 	spin_lock(plock);
1690 
1691 #endif
1692 #ifdef PLATFORM_FREEBSD
1693 	mtx_lock(plock);
1694 #endif
1695 #ifdef PLATFORM_WINDOWS
1696 
1697 	NdisDprAcquireSpinLock(plock);
1698 
1699 #endif
1700 
1701 }
1702 
_rtw_spinunlock_ex(_lock * plock)1703 void	_rtw_spinunlock_ex(_lock *plock)
1704 {
1705 
1706 #ifdef PLATFORM_LINUX
1707 
1708 	spin_unlock(plock);
1709 
1710 #endif
1711 #ifdef PLATFORM_FREEBSD
1712 	mtx_unlock(plock);
1713 #endif
1714 #ifdef PLATFORM_WINDOWS
1715 
1716 	NdisDprReleaseSpinLock(plock);
1717 
1718 #endif
1719 }
1720 
1721 
1722 
_rtw_init_queue(_queue * pqueue)1723 void _rtw_init_queue(_queue *pqueue)
1724 {
1725 	_rtw_init_listhead(&(pqueue->queue));
1726 	_rtw_spinlock_init(&(pqueue->lock));
1727 }
1728 
_rtw_deinit_queue(_queue * pqueue)1729 void _rtw_deinit_queue(_queue *pqueue)
1730 {
1731 	_rtw_spinlock_free(&(pqueue->lock));
1732 }
1733 
_rtw_queue_empty(_queue * pqueue)1734 u32	  _rtw_queue_empty(_queue	*pqueue)
1735 {
1736 	return rtw_is_list_empty(&(pqueue->queue));
1737 }
1738 
1739 
rtw_end_of_queue_search(_list * head,_list * plist)1740 u32 rtw_end_of_queue_search(_list *head, _list *plist)
1741 {
1742 	if (head == plist)
1743 		return _TRUE;
1744 	else
1745 		return _FALSE;
1746 }
1747 
1748 
_rtw_get_current_time(void)1749 systime _rtw_get_current_time(void)
1750 {
1751 
1752 #ifdef PLATFORM_LINUX
1753 	return jiffies;
1754 #endif
1755 #ifdef PLATFORM_FREEBSD
1756 	struct timeval tvp;
1757 	getmicrotime(&tvp);
1758 	return tvp.tv_sec;
1759 #endif
1760 #ifdef PLATFORM_WINDOWS
1761 	LARGE_INTEGER	SystemTime;
1762 	NdisGetCurrentSystemTime(&SystemTime);
1763 	return SystemTime.LowPart;/* count of 100-nanosecond intervals */
1764 #endif
1765 }
1766 
_rtw_systime_to_ms(systime stime)1767 inline u32 _rtw_systime_to_ms(systime stime)
1768 {
1769 #ifdef PLATFORM_LINUX
1770 	return jiffies_to_msecs(stime);
1771 #endif
1772 #ifdef PLATFORM_FREEBSD
1773 	return stime * 1000;
1774 #endif
1775 #ifdef PLATFORM_WINDOWS
1776 	return stime / 10000 ;
1777 #endif
1778 }
1779 
_rtw_ms_to_systime(u32 ms)1780 inline systime _rtw_ms_to_systime(u32 ms)
1781 {
1782 #ifdef PLATFORM_LINUX
1783 	return msecs_to_jiffies(ms);
1784 #endif
1785 #ifdef PLATFORM_FREEBSD
1786 	return ms / 1000;
1787 #endif
1788 #ifdef PLATFORM_WINDOWS
1789 	return ms * 10000 ;
1790 #endif
1791 }
1792 
_rtw_us_to_systime(u32 us)1793 inline systime _rtw_us_to_systime(u32 us)
1794 {
1795 #ifdef PLATFORM_LINUX
1796 	return usecs_to_jiffies(us);
1797 #else
1798 	#error "TBD\n"
1799 #endif
1800 }
1801 
1802 /* the input parameter start use the same unit as returned by rtw_get_current_time */
_rtw_get_passing_time_ms(systime start)1803 inline s32 _rtw_get_passing_time_ms(systime start)
1804 {
1805 	return _rtw_systime_to_ms(_rtw_get_current_time() - start);
1806 }
1807 
_rtw_get_remaining_time_ms(systime end)1808 inline s32 _rtw_get_remaining_time_ms(systime end)
1809 {
1810 	return _rtw_systime_to_ms(end - _rtw_get_current_time());
1811 }
1812 
_rtw_get_time_interval_ms(systime start,systime end)1813 inline s32 _rtw_get_time_interval_ms(systime start, systime end)
1814 {
1815 	return _rtw_systime_to_ms(end - start);
1816 }
1817 
_rtw_time_after(systime a,systime b)1818 inline bool _rtw_time_after(systime a, systime b)
1819 {
1820 #ifdef PLATFORM_LINUX
1821 	return time_after(a, b);
1822 #else
1823 	#error "TBD\n"
1824 #endif
1825 }
1826 
rtw_sptime_get(void)1827 sysptime rtw_sptime_get(void)
1828 {
1829 	/* CLOCK_MONOTONIC */
1830 #ifdef PLATFORM_LINUX
1831 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
1832 	struct timespec64 cur;
1833 
1834 	ktime_get_ts64(&cur);
1835 	return timespec64_to_ktime(cur);
1836 	#else
1837 	struct timespec cur;
1838 
1839 	ktime_get_ts(&cur);
1840 	return timespec_to_ktime(cur);
1841 	#endif
1842 #else
1843 	#error "TBD\n"
1844 #endif
1845 }
1846 
rtw_sptime_set(s64 secs,const u32 nsecs)1847 sysptime rtw_sptime_set(s64 secs, const u32 nsecs)
1848 {
1849 #ifdef PLATFORM_LINUX
1850 	return ktime_set(secs, nsecs);
1851 #else
1852 	#error "TBD\n"
1853 #endif
1854 }
1855 
rtw_sptime_zero(void)1856 sysptime rtw_sptime_zero(void)
1857 {
1858 #ifdef PLATFORM_LINUX
1859 	return ktime_set(0, 0);
1860 #else
1861 	#error "TBD\n"
1862 #endif
1863 }
1864 
1865 /*
1866  *   cmp1  < cmp2: return <0
1867  *   cmp1 == cmp2: return 0
1868  *   cmp1  > cmp2: return >0
1869  */
rtw_sptime_cmp(const sysptime cmp1,const sysptime cmp2)1870 int rtw_sptime_cmp(const sysptime cmp1, const sysptime cmp2)
1871 {
1872 #ifdef PLATFORM_LINUX
1873 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
1874 	return ktime_compare(cmp1, cmp2);
1875 	#else
1876 	if (cmp1.tv64 < cmp2.tv64)
1877 		return -1;
1878 	if (cmp1.tv64 > cmp2.tv64)
1879 		return 1;
1880 	return 0;
1881 	#endif
1882 #else
1883 	#error "TBD\n"
1884 #endif
1885 }
1886 
rtw_sptime_eql(const sysptime cmp1,const sysptime cmp2)1887 bool rtw_sptime_eql(const sysptime cmp1, const sysptime cmp2)
1888 {
1889 #ifdef PLATFORM_LINUX
1890 	return rtw_sptime_cmp(cmp1, cmp2) == 0;
1891 #else
1892 	#error "TBD\n"
1893 #endif
1894 }
1895 
rtw_sptime_is_zero(const sysptime sptime)1896 bool rtw_sptime_is_zero(const sysptime sptime)
1897 {
1898 #ifdef PLATFORM_LINUX
1899 	return rtw_sptime_cmp(sptime, rtw_sptime_zero()) == 0;
1900 #else
1901 	#error "TBD\n"
1902 #endif
1903 }
1904 
1905 /*
1906  * sub = lhs - rhs, in normalized form
1907  */
rtw_sptime_sub(const sysptime lhs,const sysptime rhs)1908 sysptime rtw_sptime_sub(const sysptime lhs, const sysptime rhs)
1909 {
1910 #ifdef PLATFORM_LINUX
1911 	return ktime_sub(lhs, rhs);
1912 #else
1913 	#error "TBD\n"
1914 #endif
1915 }
1916 
1917 /*
1918  * add = lhs + rhs, in normalized form
1919  */
rtw_sptime_add(const sysptime lhs,const sysptime rhs)1920 sysptime rtw_sptime_add(const sysptime lhs, const sysptime rhs)
1921 {
1922 #ifdef PLATFORM_LINUX
1923 	return ktime_add(lhs, rhs);
1924 #else
1925 	#error "TBD\n"
1926 #endif
1927 }
1928 
rtw_sptime_to_ms(const sysptime sptime)1929 s64 rtw_sptime_to_ms(const sysptime sptime)
1930 {
1931 #ifdef PLATFORM_LINUX
1932 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
1933 	return ktime_to_ms(sptime);
1934 	#else
1935 	struct timeval tv = ktime_to_timeval(sptime);
1936 
1937 	return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
1938 	#endif
1939 #else
1940 	#error "TBD\n"
1941 #endif
1942 }
1943 
rtw_ms_to_sptime(u64 ms)1944 sysptime rtw_ms_to_sptime(u64 ms)
1945 {
1946 #ifdef PLATFORM_LINUX
1947 	return ns_to_ktime(ms * NSEC_PER_MSEC);
1948 #else
1949 	#error "TBD\n"
1950 #endif
1951 }
1952 
rtw_sptime_to_us(const sysptime sptime)1953 s64 rtw_sptime_to_us(const sysptime sptime)
1954 {
1955 #ifdef PLATFORM_LINUX
1956 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
1957 	return ktime_to_us(sptime);
1958 	#else
1959 	struct timeval tv = ktime_to_timeval(sptime);
1960 
1961 	return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
1962 	#endif
1963 #else
1964 	#error "TBD\n"
1965 #endif
1966 }
1967 
rtw_us_to_sptime(u64 us)1968 sysptime rtw_us_to_sptime(u64 us)
1969 {
1970 #ifdef PLATFORM_LINUX
1971 	return ns_to_ktime(us * NSEC_PER_USEC);
1972 #else
1973 	#error "TBD\n"
1974 #endif
1975 }
1976 
rtw_sptime_to_ns(const sysptime sptime)1977 s64 rtw_sptime_to_ns(const sysptime sptime)
1978 {
1979 #ifdef PLATFORM_LINUX
1980 	return ktime_to_ns(sptime);
1981 #else
1982 	#error "TBD\n"
1983 #endif
1984 }
1985 
rtw_ns_to_sptime(u64 ns)1986 sysptime rtw_ns_to_sptime(u64 ns)
1987 {
1988 #ifdef PLATFORM_LINUX
1989 	return ns_to_ktime(ns);
1990 #else
1991 	#error "TBD\n"
1992 #endif
1993 }
1994 
rtw_sptime_diff_ms(const sysptime start,const sysptime end)1995 s64 rtw_sptime_diff_ms(const sysptime start, const sysptime end)
1996 {
1997 	sysptime diff;
1998 
1999 	diff = rtw_sptime_sub(end, start);
2000 
2001 	return rtw_sptime_to_ms(diff);
2002 }
2003 
rtw_sptime_pass_ms(const sysptime start)2004 s64 rtw_sptime_pass_ms(const sysptime start)
2005 {
2006 	sysptime cur, diff;
2007 
2008 	cur = rtw_sptime_get();
2009 	diff = rtw_sptime_sub(cur, start);
2010 
2011 	return rtw_sptime_to_ms(diff);
2012 }
2013 
rtw_sptime_diff_us(const sysptime start,const sysptime end)2014 s64 rtw_sptime_diff_us(const sysptime start, const sysptime end)
2015 {
2016 	sysptime diff;
2017 
2018 	diff = rtw_sptime_sub(end, start);
2019 
2020 	return rtw_sptime_to_us(diff);
2021 }
2022 
rtw_sptime_pass_us(const sysptime start)2023 s64 rtw_sptime_pass_us(const sysptime start)
2024 {
2025 	sysptime cur, diff;
2026 
2027 	cur = rtw_sptime_get();
2028 	diff = rtw_sptime_sub(cur, start);
2029 
2030 	return rtw_sptime_to_us(diff);
2031 }
2032 
rtw_sptime_diff_ns(const sysptime start,const sysptime end)2033 s64 rtw_sptime_diff_ns(const sysptime start, const sysptime end)
2034 {
2035 	sysptime diff;
2036 
2037 	diff = rtw_sptime_sub(end, start);
2038 
2039 	return rtw_sptime_to_ns(diff);
2040 }
2041 
rtw_sptime_pass_ns(const sysptime start)2042 s64 rtw_sptime_pass_ns(const sysptime start)
2043 {
2044 	sysptime cur, diff;
2045 
2046 	cur = rtw_sptime_get();
2047 	diff = rtw_sptime_sub(cur, start);
2048 
2049 	return rtw_sptime_to_ns(diff);
2050 }
2051 
rtw_sleep_schedulable(int ms)2052 void rtw_sleep_schedulable(int ms)
2053 {
2054 
2055 #ifdef PLATFORM_LINUX
2056 
2057 	u32 delta;
2058 
2059 	delta = (ms * HZ) / 1000; /* (ms) */
2060 	if (delta == 0) {
2061 		delta = 1;/* 1 ms */
2062 	}
2063 	set_current_state(TASK_INTERRUPTIBLE);
2064         schedule_timeout(delta);
2065 	return;
2066 
2067 #endif
2068 #ifdef PLATFORM_FREEBSD
2069 	DELAY(ms * 1000);
2070 	return ;
2071 #endif
2072 
2073 #ifdef PLATFORM_WINDOWS
2074 
2075 	NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
2076 
2077 #endif
2078 
2079 }
2080 
2081 
rtw_msleep_os(int ms)2082 void rtw_msleep_os(int ms)
2083 {
2084 
2085 #ifdef PLATFORM_LINUX
2086 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
2087 	if (ms < 20) {
2088 		unsigned long us = ms * 1000UL;
2089 		usleep_range(us, us + 1000UL);
2090 	} else
2091 #endif
2092 		msleep((unsigned int)ms);
2093 
2094 #endif
2095 #ifdef PLATFORM_FREEBSD
2096 	/* Delay for delay microseconds */
2097 	DELAY(ms * 1000);
2098 	return ;
2099 #endif
2100 #ifdef PLATFORM_WINDOWS
2101 
2102 	NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
2103 
2104 #endif
2105 
2106 
2107 }
rtw_usleep_os(int us)2108 void rtw_usleep_os(int us)
2109 {
2110 #ifdef PLATFORM_LINUX
2111 
2112 	/* msleep((unsigned int)us); */
2113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
2114 	usleep_range(us, us + 1);
2115 #else
2116 	if (1 < (us / 1000))
2117 		msleep(1);
2118 	else
2119 		msleep((us / 1000) + 1);
2120 #endif
2121 #endif
2122 
2123 #ifdef PLATFORM_FREEBSD
2124 	/* Delay for delay microseconds */
2125 	DELAY(us);
2126 
2127 	return ;
2128 #endif
2129 #ifdef PLATFORM_WINDOWS
2130 
2131 	NdisMSleep(us); /* (us) */
2132 
2133 #endif
2134 
2135 
2136 }
2137 
2138 
2139 #ifdef DBG_DELAY_OS
_rtw_mdelay_os(int ms,const char * func,const int line)2140 void _rtw_mdelay_os(int ms, const char *func, const int line)
2141 {
2142 #if 0
2143 	if (ms > 10)
2144 		RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
2145 	rtw_msleep_os(ms);
2146 	return;
2147 #endif
2148 
2149 
2150 	RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
2151 
2152 #if defined(PLATFORM_LINUX)
2153 
2154 	mdelay((unsigned long)ms);
2155 
2156 #elif defined(PLATFORM_WINDOWS)
2157 
2158 	NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2159 
2160 #endif
2161 
2162 
2163 }
_rtw_udelay_os(int us,const char * func,const int line)2164 void _rtw_udelay_os(int us, const char *func, const int line)
2165 {
2166 
2167 #if 0
2168 	if (us > 1000) {
2169 		RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
2170 		rtw_usleep_os(us);
2171 		return;
2172 	}
2173 #endif
2174 
2175 
2176 	RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
2177 
2178 
2179 #if defined(PLATFORM_LINUX)
2180 
2181 	udelay((unsigned long)us);
2182 
2183 #elif defined(PLATFORM_WINDOWS)
2184 
2185 	NdisStallExecution(us); /* (us) */
2186 
2187 #endif
2188 
2189 }
2190 #else
rtw_mdelay_os(int ms)2191 void rtw_mdelay_os(int ms)
2192 {
2193 
2194 #ifdef PLATFORM_LINUX
2195 
2196 	mdelay((unsigned long)ms);
2197 
2198 #endif
2199 #ifdef PLATFORM_FREEBSD
2200 	DELAY(ms * 1000);
2201 	return ;
2202 #endif
2203 #ifdef PLATFORM_WINDOWS
2204 
2205 	NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2206 
2207 #endif
2208 
2209 
2210 }
rtw_udelay_os(int us)2211 void rtw_udelay_os(int us)
2212 {
2213 
2214 #ifdef PLATFORM_LINUX
2215 
2216 	udelay((unsigned long)us);
2217 
2218 #endif
2219 #ifdef PLATFORM_FREEBSD
2220 	/* Delay for delay microseconds */
2221 	DELAY(us);
2222 	return ;
2223 #endif
2224 #ifdef PLATFORM_WINDOWS
2225 
2226 	NdisStallExecution(us); /* (us) */
2227 
2228 #endif
2229 
2230 }
2231 #endif
2232 
rtw_yield_os(void)2233 void rtw_yield_os(void)
2234 {
2235 #ifdef PLATFORM_LINUX
2236 	yield();
2237 #endif
2238 #ifdef PLATFORM_FREEBSD
2239 	yield();
2240 #endif
2241 #ifdef PLATFORM_WINDOWS
2242 	SwitchToThread();
2243 #endif
2244 }
2245 
2246 const char *_rtw_pwait_type_str[] = {
2247 	[RTW_PWAIT_TYPE_MSLEEP] = "MS",
2248 	[RTW_PWAIT_TYPE_USLEEP] = "US",
2249 	[RTW_PWAIT_TYPE_YIELD] = "Y",
2250 	[RTW_PWAIT_TYPE_MDELAY] = "MD",
2251 	[RTW_PWAIT_TYPE_UDELAY] = "UD",
2252 	[RTW_PWAIT_TYPE_NUM] = "unknown",
2253 };
2254 
rtw_pwctx_yield(int us)2255 static void rtw_pwctx_yield(int us)
2256 {
2257 	rtw_yield_os();
2258 }
2259 
2260 static void (*const rtw_pwait_hdl[])(int)= {
2261 	[RTW_PWAIT_TYPE_MSLEEP] = rtw_msleep_os,
2262 	[RTW_PWAIT_TYPE_USLEEP] = rtw_usleep_os,
2263 	[RTW_PWAIT_TYPE_YIELD] = rtw_pwctx_yield,
2264 	[RTW_PWAIT_TYPE_MDELAY] = rtw_mdelay_os,
2265 	[RTW_PWAIT_TYPE_UDELAY] = rtw_udelay_os,
2266 };
2267 
rtw_pwctx_config(struct rtw_pwait_ctx * pwctx,enum rtw_pwait_type type,s32 time,s32 cnt_lmt)2268 int rtw_pwctx_config(struct rtw_pwait_ctx *pwctx, enum rtw_pwait_type type, s32 time, s32 cnt_lmt)
2269 {
2270 	int ret = _FAIL;
2271 
2272 	if (!RTW_PWAIT_TYPE_VALID(type))
2273 		goto exit;
2274 
2275 	pwctx->conf.type = type;
2276 	pwctx->conf.wait_time = time;
2277 	pwctx->conf.wait_cnt_lmt = cnt_lmt;
2278 	pwctx->wait_hdl = rtw_pwait_hdl[type];
2279 
2280 	ret = _SUCCESS;
2281 
2282 exit:
2283 	return ret;
2284 }
2285 
rtw_macaddr_is_larger(const u8 * a,const u8 * b)2286 bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
2287 {
2288 	u32 va, vb;
2289 
2290 	va = be32_to_cpu(*((u32 *)a));
2291 	vb = be32_to_cpu(*((u32 *)b));
2292 	if (va > vb)
2293 		return 1;
2294 	else if (va < vb)
2295 		return 0;
2296 
2297 	return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
2298 }
2299 
2300 #define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
2301 #define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
2302 #define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
2303 #ifdef CONFIG_WAKELOCK
2304 static struct wake_lock rtw_suspend_lock;
2305 static struct wake_lock rtw_suspend_traffic_lock;
2306 static struct wake_lock rtw_suspend_resume_lock;
2307 #elif defined(CONFIG_ANDROID_POWER)
2308 static android_suspend_lock_t rtw_suspend_lock = {
2309 	.name = RTW_SUSPEND_LOCK_NAME
2310 };
2311 static android_suspend_lock_t rtw_suspend_traffic_lock = {
2312 	.name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
2313 };
2314 static android_suspend_lock_t rtw_suspend_resume_lock = {
2315 	.name = RTW_SUSPEND_RESUME_LOCK_NAME
2316 };
2317 #endif
2318 
rtw_suspend_lock_init(void)2319 inline void rtw_suspend_lock_init(void)
2320 {
2321 #ifdef CONFIG_WAKELOCK
2322 	wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
2323 	wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
2324 	wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
2325 #elif defined(CONFIG_ANDROID_POWER)
2326 	android_init_suspend_lock(&rtw_suspend_lock);
2327 	android_init_suspend_lock(&rtw_suspend_traffic_lock);
2328 	android_init_suspend_lock(&rtw_suspend_resume_lock);
2329 #endif
2330 }
2331 
rtw_suspend_lock_uninit(void)2332 inline void rtw_suspend_lock_uninit(void)
2333 {
2334 #ifdef CONFIG_WAKELOCK
2335 	wake_lock_destroy(&rtw_suspend_lock);
2336 	wake_lock_destroy(&rtw_suspend_traffic_lock);
2337 	wake_lock_destroy(&rtw_suspend_resume_lock);
2338 #elif defined(CONFIG_ANDROID_POWER)
2339 	android_uninit_suspend_lock(&rtw_suspend_lock);
2340 	android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
2341 	android_uninit_suspend_lock(&rtw_suspend_resume_lock);
2342 #endif
2343 }
2344 
rtw_lock_suspend(void)2345 inline void rtw_lock_suspend(void)
2346 {
2347 #ifdef CONFIG_WAKELOCK
2348 	wake_lock(&rtw_suspend_lock);
2349 #elif defined(CONFIG_ANDROID_POWER)
2350 	android_lock_suspend(&rtw_suspend_lock);
2351 #endif
2352 
2353 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2354 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2355 #endif
2356 }
2357 
rtw_unlock_suspend(void)2358 inline void rtw_unlock_suspend(void)
2359 {
2360 #ifdef CONFIG_WAKELOCK
2361 	wake_unlock(&rtw_suspend_lock);
2362 #elif defined(CONFIG_ANDROID_POWER)
2363 	android_unlock_suspend(&rtw_suspend_lock);
2364 #endif
2365 
2366 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2367 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2368 #endif
2369 }
2370 
rtw_resume_lock_suspend(void)2371 inline void rtw_resume_lock_suspend(void)
2372 {
2373 #ifdef CONFIG_WAKELOCK
2374 	wake_lock(&rtw_suspend_resume_lock);
2375 #elif defined(CONFIG_ANDROID_POWER)
2376 	android_lock_suspend(&rtw_suspend_resume_lock);
2377 #endif
2378 
2379 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2380 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2381 #endif
2382 }
2383 
rtw_resume_unlock_suspend(void)2384 inline void rtw_resume_unlock_suspend(void)
2385 {
2386 #ifdef CONFIG_WAKELOCK
2387 	wake_unlock(&rtw_suspend_resume_lock);
2388 #elif defined(CONFIG_ANDROID_POWER)
2389 	android_unlock_suspend(&rtw_suspend_resume_lock);
2390 #endif
2391 
2392 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2393 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2394 #endif
2395 }
2396 
rtw_lock_suspend_timeout(u32 timeout_ms)2397 inline void rtw_lock_suspend_timeout(u32 timeout_ms)
2398 {
2399 #ifdef CONFIG_WAKELOCK
2400 	wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2401 #elif defined(CONFIG_ANDROID_POWER)
2402 	android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2403 #endif
2404 }
2405 
2406 
rtw_lock_traffic_suspend_timeout(u32 timeout_ms)2407 inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
2408 {
2409 #ifdef CONFIG_WAKELOCK
2410 	wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2411 #elif defined(CONFIG_ANDROID_POWER)
2412 	android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2413 #endif
2414 	/* RTW_INFO("traffic lock timeout:%d\n", timeout_ms); */
2415 }
2416 
rtw_set_bit(int nr,unsigned long * addr)2417 inline void rtw_set_bit(int nr, unsigned long *addr)
2418 {
2419 #ifdef PLATFORM_LINUX
2420 	set_bit(nr, addr);
2421 #else
2422 	#error "TBD\n";
2423 #endif
2424 }
2425 
rtw_clear_bit(int nr,unsigned long * addr)2426 inline void rtw_clear_bit(int nr, unsigned long *addr)
2427 {
2428 #ifdef PLATFORM_LINUX
2429 	clear_bit(nr, addr);
2430 #else
2431 	#error "TBD\n";
2432 #endif
2433 }
2434 
rtw_test_and_clear_bit(int nr,unsigned long * addr)2435 inline int rtw_test_and_clear_bit(int nr, unsigned long *addr)
2436 {
2437 #ifdef PLATFORM_LINUX
2438 	return test_and_clear_bit(nr, addr);
2439 #else
2440 	#error "TBD\n";
2441 #endif
2442 }
2443 
ATOMIC_SET(ATOMIC_T * v,int i)2444 inline void ATOMIC_SET(ATOMIC_T *v, int i)
2445 {
2446 #ifdef PLATFORM_LINUX
2447 	atomic_set(v, i);
2448 #elif defined(PLATFORM_WINDOWS)
2449 	*v = i; /* other choice???? */
2450 #elif defined(PLATFORM_FREEBSD)
2451 	atomic_set_int(v, i);
2452 #endif
2453 }
2454 
ATOMIC_READ(ATOMIC_T * v)2455 inline int ATOMIC_READ(ATOMIC_T *v)
2456 {
2457 #ifdef PLATFORM_LINUX
2458 	return atomic_read(v);
2459 #elif defined(PLATFORM_WINDOWS)
2460 	return *v; /* other choice???? */
2461 #elif defined(PLATFORM_FREEBSD)
2462 	return atomic_load_acq_32(v);
2463 #endif
2464 }
2465 
ATOMIC_ADD(ATOMIC_T * v,int i)2466 inline void ATOMIC_ADD(ATOMIC_T *v, int i)
2467 {
2468 #ifdef PLATFORM_LINUX
2469 	atomic_add(i, v);
2470 #elif defined(PLATFORM_WINDOWS)
2471 	InterlockedAdd(v, i);
2472 #elif defined(PLATFORM_FREEBSD)
2473 	atomic_add_int(v, i);
2474 #endif
2475 }
ATOMIC_SUB(ATOMIC_T * v,int i)2476 inline void ATOMIC_SUB(ATOMIC_T *v, int i)
2477 {
2478 #ifdef PLATFORM_LINUX
2479 	atomic_sub(i, v);
2480 #elif defined(PLATFORM_WINDOWS)
2481 	InterlockedAdd(v, -i);
2482 #elif defined(PLATFORM_FREEBSD)
2483 	atomic_subtract_int(v, i);
2484 #endif
2485 }
2486 
ATOMIC_INC(ATOMIC_T * v)2487 inline void ATOMIC_INC(ATOMIC_T *v)
2488 {
2489 #ifdef PLATFORM_LINUX
2490 	atomic_inc(v);
2491 #elif defined(PLATFORM_WINDOWS)
2492 	InterlockedIncrement(v);
2493 #elif defined(PLATFORM_FREEBSD)
2494 	atomic_add_int(v, 1);
2495 #endif
2496 }
2497 
ATOMIC_DEC(ATOMIC_T * v)2498 inline void ATOMIC_DEC(ATOMIC_T *v)
2499 {
2500 #ifdef PLATFORM_LINUX
2501 	atomic_dec(v);
2502 #elif defined(PLATFORM_WINDOWS)
2503 	InterlockedDecrement(v);
2504 #elif defined(PLATFORM_FREEBSD)
2505 	atomic_subtract_int(v, 1);
2506 #endif
2507 }
2508 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)2509 inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
2510 {
2511 #ifdef PLATFORM_LINUX
2512 	return atomic_add_return(i, v);
2513 #elif defined(PLATFORM_WINDOWS)
2514 	return InterlockedAdd(v, i);
2515 #elif defined(PLATFORM_FREEBSD)
2516 	atomic_add_int(v, i);
2517 	return atomic_load_acq_32(v);
2518 #endif
2519 }
2520 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)2521 inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
2522 {
2523 #ifdef PLATFORM_LINUX
2524 	return atomic_sub_return(i, v);
2525 #elif defined(PLATFORM_WINDOWS)
2526 	return InterlockedAdd(v, -i);
2527 #elif defined(PLATFORM_FREEBSD)
2528 	atomic_subtract_int(v, i);
2529 	return atomic_load_acq_32(v);
2530 #endif
2531 }
2532 
ATOMIC_INC_RETURN(ATOMIC_T * v)2533 inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
2534 {
2535 #ifdef PLATFORM_LINUX
2536 	return atomic_inc_return(v);
2537 #elif defined(PLATFORM_WINDOWS)
2538 	return InterlockedIncrement(v);
2539 #elif defined(PLATFORM_FREEBSD)
2540 	atomic_add_int(v, 1);
2541 	return atomic_load_acq_32(v);
2542 #endif
2543 }
2544 
ATOMIC_DEC_RETURN(ATOMIC_T * v)2545 inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
2546 {
2547 #ifdef PLATFORM_LINUX
2548 	return atomic_dec_return(v);
2549 #elif defined(PLATFORM_WINDOWS)
2550 	return InterlockedDecrement(v);
2551 #elif defined(PLATFORM_FREEBSD)
2552 	atomic_subtract_int(v, 1);
2553 	return atomic_load_acq_32(v);
2554 #endif
2555 }
2556 
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)2557 inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
2558 {
2559 #ifdef PLATFORM_LINUX
2560 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
2561 	return atomic_add_unless(v, 1, u);
2562 #else
2563 	/* only make sure not exceed after this function */
2564 	if (ATOMIC_INC_RETURN(v) > u) {
2565 		ATOMIC_DEC(v);
2566 		return 0;
2567 	}
2568 	return 1;
2569 #endif
2570 #else
2571 	#error "TBD\n"
2572 #endif
2573 }
2574 
2575 #ifdef PLATFORM_LINUX
2576 #if !defined(CONFIG_RTW_ANDROID_GKI)
2577 /*
2578 * Open a file with the specific @param path, @param flag, @param mode
2579 * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
2580 * @param path the path of the file to open
2581 * @param flag file operation flags, please refer to linux document
2582 * @param mode please refer to linux document
2583 * @return Linux specific error code
2584 */
openFile(struct file ** fpp,const char * path,int flag,int mode)2585 static int openFile(struct file **fpp, const char *path, int flag, int mode)
2586 {
2587 	struct file *fp;
2588 
2589 	fp = filp_open(path, flag, mode);
2590 	if (IS_ERR(fp)) {
2591 		*fpp = NULL;
2592 		return PTR_ERR(fp);
2593 	} else {
2594 		*fpp = fp;
2595 		return 0;
2596 	}
2597 }
2598 
2599 /*
2600 * Close the file with the specific @param fp
2601 * @param fp the pointer of struct file to close
2602 * @return always 0
2603 */
closeFile(struct file * fp)2604 static int closeFile(struct file *fp)
2605 {
2606 	filp_close(fp, NULL);
2607 	return 0;
2608 }
2609 
readFile(struct file * fp,char * buf,int len)2610 static int readFile(struct file *fp, char *buf, int len)
2611 {
2612 	int rlen = 0, sum = 0;
2613 
2614 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2615 	if (!(fp->f_mode & FMODE_CAN_READ))
2616 #else
2617 	if (!fp->f_op || !fp->f_op->read)
2618 #endif
2619 		return -EPERM;
2620 
2621 	while (sum < len) {
2622 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2623 		rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
2624 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2625 		rlen = __vfs_read(fp, buf + sum, len - sum, &fp->f_pos);
2626 #else
2627 		rlen = fp->f_op->read(fp, buf + sum, len - sum, &fp->f_pos);
2628 #endif
2629 		if (rlen > 0)
2630 			sum += rlen;
2631 		else if (0 != rlen)
2632 			return rlen;
2633 		else
2634 			break;
2635 	}
2636 
2637 	return  sum;
2638 
2639 }
2640 
writeFile(struct file * fp,char * buf,int len)2641 static int writeFile(struct file *fp, char *buf, int len)
2642 {
2643 	int wlen = 0, sum = 0;
2644 
2645 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2646 	if (!(fp->f_mode & FMODE_CAN_WRITE))
2647 #else
2648 	if (!fp->f_op || !fp->f_op->write)
2649 #endif
2650 		return -EPERM;
2651 
2652 	while (sum < len) {
2653 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2654 		wlen = kernel_write(fp, buf + sum, len - sum, &fp->f_pos);
2655 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2656 		wlen = __vfs_write(fp, buf + sum, len - sum, &fp->f_pos);
2657 #else
2658 		wlen = fp->f_op->write(fp, buf + sum, len - sum, &fp->f_pos);
2659 #endif
2660 		if (wlen > 0)
2661 			sum += wlen;
2662 		else if (0 != wlen)
2663 			return wlen;
2664 		else
2665 			break;
2666 	}
2667 
2668 	return sum;
2669 
2670 }
2671 
2672 /*
2673 * Test if the specifi @param pathname is a direct and readable
2674 * If readable, @param sz is not used
2675 * @param pathname the name of the path to test
2676 * @return Linux specific error code
2677 */
isDirReadable(const char * pathname,u32 * sz)2678 static int isDirReadable(const char *pathname, u32 *sz)
2679 {
2680 	struct path path;
2681 	int error = 0;
2682 
2683 	return kern_path(pathname, LOOKUP_FOLLOW, &path);
2684 }
2685 
2686 /*
2687 * Test if the specifi @param path is a file and readable
2688 * If readable, @param sz is got
2689 * @param path the path of the file to test
2690 * @return Linux specific error code
2691 */
isFileReadable(const char * path,u32 * sz)2692 static int isFileReadable(const char *path, u32 *sz)
2693 {
2694 	struct file *fp;
2695 	int ret = 0;
2696 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2697 	mm_segment_t oldfs;
2698 	#endif
2699 	char buf;
2700 
2701 	fp = filp_open(path, O_RDONLY, 0);
2702 	if (IS_ERR(fp))
2703 		ret = PTR_ERR(fp);
2704 	else {
2705 		#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2706 		oldfs = get_fs();
2707 		#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2708 		set_fs(KERNEL_DS);
2709 		#else
2710 		set_fs(get_ds());
2711 		#endif
2712 		#endif
2713 
2714 		if (1 != readFile(fp, &buf, 1))
2715 			ret = PTR_ERR(fp);
2716 
2717 		if (ret == 0 && sz) {
2718 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
2719 			*sz = i_size_read(fp->f_path.dentry->d_inode);
2720 			#else
2721 			*sz = i_size_read(fp->f_dentry->d_inode);
2722 			#endif
2723 		}
2724 
2725 		#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2726 		set_fs(oldfs);
2727 		#endif
2728 		filp_close(fp, NULL);
2729 	}
2730 	return ret;
2731 }
2732 
2733 /*
2734 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2735 * @param path the path of the file to open and write
2736 * @param buf the starting address of the data to write into file
2737 * @param sz how many bytes to write at most
2738 * @return the byte we've written, or Linux specific error code
2739 */
storeToFile(const char * path,u8 * buf,u32 sz)2740 static int storeToFile(const char *path, u8 *buf, u32 sz)
2741 {
2742 	int ret = 0;
2743 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2744 	mm_segment_t oldfs;
2745 	#endif
2746 	struct file *fp;
2747 
2748 	if (path && buf) {
2749 		ret = openFile(&fp, path, O_CREAT | O_WRONLY, 0666);
2750 		if (0 == ret) {
2751 			RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2752 
2753 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2754 			oldfs = get_fs();
2755 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2756 			set_fs(KERNEL_DS);
2757 			#else
2758 			set_fs(get_ds());
2759 			#endif
2760 			#endif
2761 
2762 			ret = writeFile(fp, buf, sz);
2763 
2764 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2765 			set_fs(oldfs);
2766 			#endif
2767 			closeFile(fp);
2768 
2769 			RTW_INFO("%s writeFile, ret:%d\n", __FUNCTION__, ret);
2770 
2771 		} else
2772 			RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2773 	} else {
2774 		RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2775 		ret =  -EINVAL;
2776 	}
2777 	return ret;
2778 }
2779 #endif /* !defined(CONFIG_RTW_ANDROID_GKI)*/
2780 #endif /* PLATFORM_LINUX */
2781 
2782 #if !defined(CONFIG_RTW_ANDROID_GKI)
2783 /*
2784 * Test if the specifi @param path is a direct and readable
2785 * @param path the path of the direct to test
2786 * @return _TRUE or _FALSE
2787 */
rtw_is_dir_readable(const char * path)2788 int rtw_is_dir_readable(const char *path)
2789 {
2790 #ifdef PLATFORM_LINUX
2791 	if (isDirReadable(path, NULL) == 0)
2792 		return _TRUE;
2793 	else
2794 		return _FALSE;
2795 #else
2796 	/* Todo... */
2797 	return _FALSE;
2798 #endif
2799 }
2800 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2801 
2802 /*
2803 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2804 * @param path the path of the file to open and read
2805 * @param buf the starting address of the buffer to store file content
2806 * @param sz how many bytes to read at most
2807 * @return the byte we've read, or Linux specific error code
2808 */
retriveFromFile(const char * path,u8 * buf,u32 sz)2809 static int retriveFromFile(const char *path, u8 *buf, u32 sz)
2810 {
2811 #if defined(CONFIG_RTW_ANDROID_GKI)
2812 	int ret = -EINVAL;
2813 	const struct firmware *fw = NULL;
2814 	char* const delim = "/";
2815 	char *name, *token, *cur, *path_tmp = NULL;
2816 
2817 
2818 	if (path == NULL || buf == NULL) {
2819 		RTW_ERR("%s() NULL pointer\n", __func__);
2820 		goto err;
2821 	}
2822 
2823 	path_tmp = kstrdup(path, GFP_KERNEL);
2824 	if (path_tmp == NULL) {
2825 		RTW_ERR("%s() cannot copy path for parsing file name\n", __func__);
2826 		goto err;
2827 	}
2828 
2829 	/* parsing file name from path */
2830 	cur = path_tmp;
2831 	token = strsep(&cur, delim);
2832 	while (token != NULL) {
2833 		token = strsep(&cur, delim);
2834 		if(token)
2835 			name = token;
2836 	}
2837 
2838 	if (name == NULL) {
2839 		RTW_ERR("%s() parsing file name fail\n", __func__);
2840 		goto err;
2841 	}
2842 
2843 	/* request_firmware() will find file in /vendor/firmware but not in path */
2844 	ret = request_firmware(&fw, name, NULL);
2845 	if (ret == 0) {
2846 		RTW_INFO("%s() Success. retrieve file : %s, file size : %zu\n", __func__, name, fw->size);
2847 
2848 		if ((u32)fw->size < sz) {
2849 			_rtw_memcpy(buf, fw->data, (u32)fw->size);
2850 			ret = (u32)fw->size;
2851 			goto exit;
2852 		} else {
2853 			RTW_ERR("%s() file size : %zu exceed buf size : %u\n", __func__, fw->size, sz);
2854 			ret = -EFBIG;
2855 			goto err;
2856 		}
2857 	} else {
2858 		RTW_ERR("%s() Fail. retrieve file : %s, error : %d\n", __func__, name, ret);
2859 		goto err;
2860 	}
2861 
2862 
2863 
2864 err:
2865 	RTW_ERR("%s() Fail. retrieve file : %s, error : %d\n", __func__, path, ret);
2866 exit:
2867 	if (path_tmp)
2868 		kfree(path_tmp);
2869 	if (fw)
2870 		release_firmware(fw);
2871 	return ret;
2872 #else /* !defined(CONFIG_RTW_ANDROID_GKI) */
2873 	int ret = -1;
2874 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2875 	mm_segment_t oldfs;
2876 	#endif
2877 	struct file *fp;
2878 
2879 	if (path && buf) {
2880 		ret = openFile(&fp, path, O_RDONLY, 0);
2881 		if (0 == ret) {
2882 			RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2883 
2884 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2885 			oldfs = get_fs();
2886 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2887 			set_fs(KERNEL_DS);
2888 			#else
2889 			set_fs(get_ds());
2890 			#endif
2891 			#endif
2892 
2893 			ret = readFile(fp, buf, sz);
2894 
2895 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2896 			set_fs(oldfs);
2897 			#endif
2898 			closeFile(fp);
2899 
2900 			RTW_INFO("%s readFile, ret:%d\n", __FUNCTION__, ret);
2901 
2902 		} else
2903 			RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2904 	} else {
2905 		RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2906 		ret =  -EINVAL;
2907 	}
2908 	return ret;
2909 #endif /* defined(CONFIG_RTW_ANDROID_GKI) */
2910 }
2911 
2912 /*
2913 * Test if the specifi @param path is a file and readable
2914 * @param path the path of the file to test
2915 * @return _TRUE or _FALSE
2916 */
rtw_is_file_readable(const char * path)2917 int rtw_is_file_readable(const char *path)
2918 {
2919 #ifdef PLATFORM_LINUX
2920 #if !defined(CONFIG_RTW_ANDROID_GKI)
2921 	if (isFileReadable(path, NULL) == 0)
2922 		return _TRUE;
2923 	else
2924 		return _FALSE;
2925 #else
2926 	RTW_INFO("%s() Android GKI prohibbit kernel_read, return _TRUE\n", __func__);
2927 	return  _TRUE;
2928 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2929 #else
2930 	/* Todo... */
2931 	return _FALSE;
2932 #endif
2933 }
2934 
2935 /*
2936 * Test if the specifi @param path is a file and readable.
2937 * If readable, @param sz is got
2938 * @param path the path of the file to test
2939 * @return _TRUE or _FALSE
2940 */
rtw_is_file_readable_with_size(const char * path,u32 * sz)2941 int rtw_is_file_readable_with_size(const char *path, u32 *sz)
2942 {
2943 #ifdef PLATFORM_LINUX
2944 #if !defined(CONFIG_RTW_ANDROID_GKI)
2945 	if (isFileReadable(path, sz) == 0)
2946 		return _TRUE;
2947 	else
2948 		return _FALSE;
2949 #else
2950 	RTW_INFO("%s() Android GKI prohibbit kernel_read, return _TRUE\n", __func__);
2951 	*sz = 0;
2952 	return  _TRUE;
2953 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2954 #else
2955 	/* Todo... */
2956 	return _FALSE;
2957 #endif
2958 }
2959 
2960 /*
2961 * Test if the specifi @param path is a readable file with valid size.
2962 * If readable, @param sz is got
2963 * @param path the path of the file to test
2964 * @return _TRUE or _FALSE
2965 */
rtw_readable_file_sz_chk(const char * path,u32 sz)2966 int rtw_readable_file_sz_chk(const char *path, u32 sz)
2967 {
2968 	u32 fsz;
2969 
2970 	if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
2971 		return _FALSE;
2972 
2973 	if (fsz > sz)
2974 		return _FALSE;
2975 
2976 	return _TRUE;
2977 }
2978 
2979 /*
2980 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2981 * @param path the path of the file to open and read
2982 * @param buf the starting address of the buffer to store file content
2983 * @param sz how many bytes to read at most
2984 * @return the byte we've read
2985 */
rtw_retrieve_from_file(const char * path,u8 * buf,u32 sz)2986 int rtw_retrieve_from_file(const char *path, u8 *buf, u32 sz)
2987 {
2988 #ifdef PLATFORM_LINUX
2989 	int ret = retriveFromFile(path, buf, sz);
2990 	return ret >= 0 ? ret : 0;
2991 #else
2992 	/* Todo... */
2993 	return 0;
2994 #endif
2995 }
2996 
2997 #if !defined(CONFIG_RTW_ANDROID_GKI)
2998 /*
2999 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
3000 * @param path the path of the file to open and write
3001 * @param buf the starting address of the data to write into file
3002 * @param sz how many bytes to write at most
3003 * @return the byte we've written
3004 */
rtw_store_to_file(const char * path,u8 * buf,u32 sz)3005 int rtw_store_to_file(const char *path, u8 *buf, u32 sz)
3006 {
3007 #ifdef PLATFORM_LINUX
3008 	int ret = storeToFile(path, buf, sz);
3009 	return ret >= 0 ? ret : 0;
3010 #else
3011 	/* Todo... */
3012 	return 0;
3013 #endif
3014 }
3015 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
3016 
3017 #ifdef PLATFORM_LINUX
rtw_alloc_etherdev_with_old_priv(int sizeof_priv,void * old_priv)3018 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
3019 {
3020 	struct net_device *pnetdev;
3021 	struct rtw_netdev_priv_indicator *pnpi;
3022 
3023 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
3024 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
3025 #else
3026 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
3027 #endif
3028 	if (!pnetdev)
3029 		goto RETURN;
3030 
3031 	pnpi = netdev_priv(pnetdev);
3032 	pnpi->priv = old_priv;
3033 	pnpi->sizeof_priv = sizeof_priv;
3034 
3035 RETURN:
3036 	return pnetdev;
3037 }
3038 
rtw_alloc_etherdev(int sizeof_priv)3039 struct net_device *rtw_alloc_etherdev(int sizeof_priv)
3040 {
3041 	struct net_device *pnetdev;
3042 	struct rtw_netdev_priv_indicator *pnpi;
3043 
3044 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
3045 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
3046 #else
3047 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
3048 #endif
3049 	if (!pnetdev)
3050 		goto RETURN;
3051 
3052 	pnpi = netdev_priv(pnetdev);
3053 
3054 	pnpi->priv = rtw_zvmalloc(sizeof_priv);
3055 	if (!pnpi->priv) {
3056 		free_netdev(pnetdev);
3057 		pnetdev = NULL;
3058 		goto RETURN;
3059 	}
3060 
3061 	pnpi->sizeof_priv = sizeof_priv;
3062 RETURN:
3063 	return pnetdev;
3064 }
3065 
rtw_free_netdev(struct net_device * netdev)3066 void rtw_free_netdev(struct net_device *netdev)
3067 {
3068 	struct rtw_netdev_priv_indicator *pnpi;
3069 
3070 	if (!netdev)
3071 		goto RETURN;
3072 
3073 	pnpi = netdev_priv(netdev);
3074 
3075 	if (!pnpi->priv)
3076 		goto RETURN;
3077 
3078 	free_netdev(netdev);
3079 
3080 RETURN:
3081 	return;
3082 }
3083 
3084 #endif
3085 
3086 #ifdef PLATFORM_FREEBSD
3087 /*
3088  * Copy a buffer from userspace and write into kernel address
3089  * space.
3090  *
3091  * This emulation just calls the FreeBSD copyin function (to
3092  * copy data from user space buffer into a kernel space buffer)
3093  * and is designed to be used with the above io_write_wrapper.
3094  *
3095  * This function should return the number of bytes not copied.
3096  * I.e. success results in a zero value.
3097  * Negative error values are not returned.
3098  */
3099 unsigned long
copy_from_user(void * to,const void * from,unsigned long n)3100 copy_from_user(void *to, const void *from, unsigned long n)
3101 {
3102 	if (copyin(from, to, n) != 0) {
3103 		/* Any errors will be treated as a failure
3104 		   to copy any of the requested bytes */
3105 		return n;
3106 	}
3107 
3108 	return 0;
3109 }
3110 
3111 unsigned long
copy_to_user(void * to,const void * from,unsigned long n)3112 copy_to_user(void *to, const void *from, unsigned long n)
3113 {
3114 	if (copyout(from, to, n) != 0) {
3115 		/* Any errors will be treated as a failure
3116 		   to copy any of the requested bytes */
3117 		return n;
3118 	}
3119 
3120 	return 0;
3121 }
3122 
3123 
3124 /*
3125  * The usb_register and usb_deregister functions are used to register
3126  * usb drivers with the usb subsystem. In this compatibility layer
3127  * emulation a list of drivers (struct usb_driver) is maintained
3128  * and is used for probing/attaching etc.
3129  *
3130  * usb_register and usb_deregister simply call these functions.
3131  */
3132 int
usb_register(struct usb_driver * driver)3133 usb_register(struct usb_driver *driver)
3134 {
3135 	rtw_usb_linux_register(driver);
3136 	return 0;
3137 }
3138 
3139 
3140 int
usb_deregister(struct usb_driver * driver)3141 usb_deregister(struct usb_driver *driver)
3142 {
3143 	rtw_usb_linux_deregister(driver);
3144 	return 0;
3145 }
3146 
module_init_exit_wrapper(void * arg)3147 void module_init_exit_wrapper(void *arg)
3148 {
3149 	int (*func)(void) = arg;
3150 	func();
3151 	return;
3152 }
3153 
3154 #endif /* PLATFORM_FREEBSD */
3155 
3156 #ifdef CONFIG_PLATFORM_SPRD
3157 	#ifdef do_div
3158 		#undef do_div
3159 	#endif
3160 	#include <asm-generic/div64.h>
3161 #endif
3162 
rtw_modular64(u64 x,u64 y)3163 u64 rtw_modular64(u64 x, u64 y)
3164 {
3165 #ifdef PLATFORM_LINUX
3166 	return do_div(x, y);
3167 #elif defined(PLATFORM_WINDOWS)
3168 	return x % y;
3169 #elif defined(PLATFORM_FREEBSD)
3170 	return x % y;
3171 #endif
3172 }
3173 
rtw_division64(u64 x,u64 y)3174 u64 rtw_division64(u64 x, u64 y)
3175 {
3176 #ifdef PLATFORM_LINUX
3177 	do_div(x, y);
3178 	return x;
3179 #elif defined(PLATFORM_WINDOWS)
3180 	return x / y;
3181 #elif defined(PLATFORM_FREEBSD)
3182 	return x / y;
3183 #endif
3184 }
3185 
rtw_random32(void)3186 inline u32 rtw_random32(void)
3187 {
3188 #ifdef PLATFORM_LINUX
3189 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3190 	return prandom_u32();
3191 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18))
3192 	u32 random_int;
3193 	get_random_bytes(&random_int , 4);
3194 	return random_int;
3195 #else
3196 	return random32();
3197 #endif
3198 #elif defined(PLATFORM_WINDOWS)
3199 #error "to be implemented\n"
3200 #elif defined(PLATFORM_FREEBSD)
3201 #error "to be implemented\n"
3202 #endif
3203 }
3204 
rtw_buf_free(u8 ** buf,u32 * buf_len)3205 void rtw_buf_free(u8 **buf, u32 *buf_len)
3206 {
3207 	u32 ori_len;
3208 
3209 	if (!buf || !buf_len)
3210 		return;
3211 
3212 	ori_len = *buf_len;
3213 
3214 	if (*buf) {
3215 		u32 tmp_buf_len = *buf_len;
3216 		*buf_len = 0;
3217 		rtw_mfree(*buf, tmp_buf_len);
3218 		*buf = NULL;
3219 	}
3220 }
3221 
rtw_buf_update(u8 ** buf,u32 * buf_len,const u8 * src,u32 src_len)3222 void rtw_buf_update(u8 **buf, u32 *buf_len, const u8 *src, u32 src_len)
3223 {
3224 	u32 ori_len = 0, dup_len = 0;
3225 	u8 *ori = NULL;
3226 	u8 *dup = NULL;
3227 
3228 	if (!buf || !buf_len)
3229 		return;
3230 
3231 	if (!src || !src_len)
3232 		goto keep_ori;
3233 
3234 	/* duplicate src */
3235 	dup = rtw_malloc(src_len);
3236 	if (dup) {
3237 		dup_len = src_len;
3238 		_rtw_memcpy(dup, src, dup_len);
3239 	}
3240 
3241 keep_ori:
3242 	ori = *buf;
3243 	ori_len = *buf_len;
3244 
3245 	/* replace buf with dup */
3246 	*buf_len = 0;
3247 	*buf = dup;
3248 	*buf_len = dup_len;
3249 
3250 	/* free ori */
3251 	if (ori && ori_len > 0)
3252 		rtw_mfree(ori, ori_len);
3253 }
3254 
3255 
3256 /**
3257  * rtw_cbuf_full - test if cbuf is full
3258  * @cbuf: pointer of struct rtw_cbuf
3259  *
3260  * Returns: _TRUE if cbuf is full
3261  */
rtw_cbuf_full(struct rtw_cbuf * cbuf)3262 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
3263 {
3264 	return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
3265 }
3266 
3267 /**
3268  * rtw_cbuf_empty - test if cbuf is empty
3269  * @cbuf: pointer of struct rtw_cbuf
3270  *
3271  * Returns: _TRUE if cbuf is empty
3272  */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)3273 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
3274 {
3275 	return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
3276 }
3277 
3278 /**
3279  * rtw_cbuf_push - push a pointer into cbuf
3280  * @cbuf: pointer of struct rtw_cbuf
3281  * @buf: pointer to push in
3282  *
3283  * Lock free operation, be careful of the use scheme
3284  * Returns: _TRUE push success
3285  */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)3286 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
3287 {
3288 	if (rtw_cbuf_full(cbuf))
3289 		return _FAIL;
3290 
3291 	if (0)
3292 		RTW_INFO("%s on %u\n", __func__, cbuf->write);
3293 	cbuf->bufs[cbuf->write] = buf;
3294 	cbuf->write = (cbuf->write + 1) % cbuf->size;
3295 
3296 	return _SUCCESS;
3297 }
3298 
3299 /**
3300  * rtw_cbuf_pop - pop a pointer from cbuf
3301  * @cbuf: pointer of struct rtw_cbuf
3302  *
3303  * Lock free operation, be careful of the use scheme
3304  * Returns: pointer popped out
3305  */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)3306 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
3307 {
3308 	void *buf;
3309 	if (rtw_cbuf_empty(cbuf))
3310 		return NULL;
3311 
3312 	if (0)
3313 		RTW_INFO("%s on %u\n", __func__, cbuf->read);
3314 	buf = cbuf->bufs[cbuf->read];
3315 	cbuf->read = (cbuf->read + 1) % cbuf->size;
3316 
3317 	return buf;
3318 }
3319 
3320 /**
3321  * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
3322  * @size: size of pointer
3323  *
3324  * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
3325  */
rtw_cbuf_alloc(u32 size)3326 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
3327 {
3328 	struct rtw_cbuf *cbuf;
3329 
3330 	cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
3331 
3332 	if (cbuf) {
3333 		cbuf->write = cbuf->read = 0;
3334 		cbuf->size = size;
3335 	}
3336 
3337 	return cbuf;
3338 }
3339 
3340 /**
3341  * rtw_cbuf_free - free the given rtw_cbuf
3342  * @cbuf: pointer of struct rtw_cbuf to free
3343  */
rtw_cbuf_free(struct rtw_cbuf * cbuf)3344 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
3345 {
3346 	rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
3347 }
3348 
3349 /**
3350  * map_readN - read a range of map data
3351  * @map: map to read
3352  * @offset: start address to read
3353  * @len: length to read
3354  * @buf: pointer of buffer to store data read
3355  *
3356  * Returns: _SUCCESS or _FAIL
3357  */
map_readN(const struct map_t * map,u16 offset,u16 len,u8 * buf)3358 int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
3359 {
3360 	const struct map_seg_t *seg;
3361 	int ret = _FAIL;
3362 	int i;
3363 
3364 	if (len == 0) {
3365 		rtw_warn_on(1);
3366 		goto exit;
3367 	}
3368 
3369 	if (offset + len > map->len) {
3370 		rtw_warn_on(1);
3371 		goto exit;
3372 	}
3373 
3374 	_rtw_memset(buf, map->init_value, len);
3375 
3376 	for (i = 0; i < map->seg_num; i++) {
3377 		u8 *c_dst, *c_src;
3378 		u16 c_len;
3379 
3380 		seg = map->segs + i;
3381 		if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
3382 			continue;
3383 
3384 		if (seg->sa >= offset) {
3385 			c_dst = buf + (seg->sa - offset);
3386 			c_src = seg->c;
3387 			if (seg->sa + seg->len <= offset + len)
3388 				c_len = seg->len;
3389 			else
3390 				c_len = offset + len - seg->sa;
3391 		} else {
3392 			c_dst = buf;
3393 			c_src = seg->c + (offset - seg->sa);
3394 			if (seg->sa + seg->len >= offset + len)
3395 				c_len = len;
3396 			else
3397 				c_len = seg->sa + seg->len - offset;
3398 		}
3399 
3400 		_rtw_memcpy(c_dst, c_src, c_len);
3401 	}
3402 
3403 exit:
3404 	return ret;
3405 }
3406 
3407 /**
3408  * map_read8 - read 1 byte of map data
3409  * @map: map to read
3410  * @offset: address to read
3411  *
3412  * Returns: value of data of specified offset. map.init_value if offset is out of range
3413  */
map_read8(const struct map_t * map,u16 offset)3414 u8 map_read8(const struct map_t *map, u16 offset)
3415 {
3416 	const struct map_seg_t *seg;
3417 	u8 val = map->init_value;
3418 	int i;
3419 
3420 	if (offset + 1 > map->len) {
3421 		rtw_warn_on(1);
3422 		goto exit;
3423 	}
3424 
3425 	for (i = 0; i < map->seg_num; i++) {
3426 		seg = map->segs + i;
3427 		if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
3428 			continue;
3429 
3430 		val = *(seg->c + offset - seg->sa);
3431 		break;
3432 	}
3433 
3434 exit:
3435 	return val;
3436 }
3437 
3438 #ifdef CONFIG_RTW_MESH
rtw_blacklist_add(_queue * blist,const u8 * addr,u32 timeout_ms)3439 int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
3440 {
3441 	struct blacklist_ent *ent;
3442 	_list *list, *head;
3443 	u8 exist = _FALSE, timeout = _FALSE;
3444 
3445 	enter_critical_bh(&blist->lock);
3446 
3447 	head = &blist->queue;
3448 	list = get_next(head);
3449 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3450 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3451 		list = get_next(list);
3452 
3453 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3454 			exist = _TRUE;
3455 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3456 				timeout = _TRUE;
3457 			ent->exp_time = rtw_get_current_time()
3458 				+ rtw_ms_to_systime(timeout_ms);
3459 			break;
3460 		}
3461 
3462 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3463 			rtw_list_delete(&ent->list);
3464 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3465 		}
3466 	}
3467 
3468 	if (exist == _FALSE) {
3469 		ent = rtw_malloc(sizeof(struct blacklist_ent));
3470 		if (ent) {
3471 			_rtw_memcpy(ent->addr, addr, ETH_ALEN);
3472 			ent->exp_time = rtw_get_current_time()
3473 				+ rtw_ms_to_systime(timeout_ms);
3474 			rtw_list_insert_tail(&ent->list, head);
3475 		}
3476 	}
3477 
3478 	exit_critical_bh(&blist->lock);
3479 
3480 	return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
3481 }
3482 
rtw_blacklist_del(_queue * blist,const u8 * addr)3483 int rtw_blacklist_del(_queue *blist, const u8 *addr)
3484 {
3485 	struct blacklist_ent *ent = NULL;
3486 	_list *list, *head;
3487 	u8 exist = _FALSE;
3488 
3489 	enter_critical_bh(&blist->lock);
3490 	head = &blist->queue;
3491 	list = get_next(head);
3492 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3493 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3494 		list = get_next(list);
3495 
3496 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3497 			rtw_list_delete(&ent->list);
3498 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3499 			exist = _TRUE;
3500 			break;
3501 		}
3502 
3503 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3504 			rtw_list_delete(&ent->list);
3505 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3506 		}
3507 	}
3508 
3509 	exit_critical_bh(&blist->lock);
3510 
3511 	return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
3512 }
3513 
rtw_blacklist_search(_queue * blist,const u8 * addr)3514 int rtw_blacklist_search(_queue *blist, const u8 *addr)
3515 {
3516 	struct blacklist_ent *ent = NULL;
3517 	_list *list, *head;
3518 	u8 exist = _FALSE;
3519 
3520 	enter_critical_bh(&blist->lock);
3521 	head = &blist->queue;
3522 	list = get_next(head);
3523 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3524 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3525 		list = get_next(list);
3526 
3527 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3528 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3529 				rtw_list_delete(&ent->list);
3530 				rtw_mfree(ent, sizeof(struct blacklist_ent));
3531 			} else
3532 				exist = _TRUE;
3533 			break;
3534 		}
3535 
3536 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3537 			rtw_list_delete(&ent->list);
3538 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3539 		}
3540 	}
3541 
3542 	exit_critical_bh(&blist->lock);
3543 
3544 	return exist;
3545 }
3546 
rtw_blacklist_flush(_queue * blist)3547 void rtw_blacklist_flush(_queue *blist)
3548 {
3549 	struct blacklist_ent *ent;
3550 	_list *list, *head;
3551 	_list tmp;
3552 
3553 	_rtw_init_listhead(&tmp);
3554 
3555 	enter_critical_bh(&blist->lock);
3556 	rtw_list_splice_init(&blist->queue, &tmp);
3557 	exit_critical_bh(&blist->lock);
3558 
3559 	head = &tmp;
3560 	list = get_next(head);
3561 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3562 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3563 		list = get_next(list);
3564 		rtw_list_delete(&ent->list);
3565 		rtw_mfree(ent, sizeof(struct blacklist_ent));
3566 	}
3567 }
3568 
dump_blacklist(void * sel,_queue * blist,const char * title)3569 void dump_blacklist(void *sel, _queue *blist, const char *title)
3570 {
3571 	struct blacklist_ent *ent = NULL;
3572 	_list *list, *head;
3573 
3574 	enter_critical_bh(&blist->lock);
3575 	head = &blist->queue;
3576 	list = get_next(head);
3577 
3578 	if (rtw_end_of_queue_search(head, list) == _FALSE) {
3579 		if (title)
3580 			RTW_PRINT_SEL(sel, "%s:\n", title);
3581 
3582 		while (rtw_end_of_queue_search(head, list) == _FALSE) {
3583 			ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3584 			list = get_next(list);
3585 
3586 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3587 				RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
3588 			else
3589 				RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
3590 					, rtw_get_remaining_time_ms(ent->exp_time));
3591 		}
3592 
3593 	}
3594 	exit_critical_bh(&blist->lock);
3595 }
3596 #endif
3597 
3598 /**
3599 * is_null -
3600 *
3601 * Return	TRUE if c is null character
3602 *		FALSE otherwise.
3603 */
is_null(char c)3604 inline BOOLEAN is_null(char c)
3605 {
3606 	if (c == '\0')
3607 		return _TRUE;
3608 	else
3609 		return _FALSE;
3610 }
3611 
is_all_null(char * c,int len)3612 inline BOOLEAN is_all_null(char *c, int len)
3613 {
3614 	for (; len > 0; len--)
3615 		if (c[len - 1] != '\0')
3616 			return _FALSE;
3617 
3618 	return _TRUE;
3619 }
3620 
3621 /**
3622 * is_eol -
3623 *
3624 * Return	TRUE if c is represent for EOL (end of line)
3625 *		FALSE otherwise.
3626 */
is_eol(char c)3627 inline BOOLEAN is_eol(char c)
3628 {
3629 	if (c == '\r' || c == '\n')
3630 		return _TRUE;
3631 	else
3632 		return _FALSE;
3633 }
3634 
3635 /**
3636 * is_space -
3637 *
3638 * Return	TRUE if c is represent for space
3639 *		FALSE otherwise.
3640 */
is_space(char c)3641 inline BOOLEAN is_space(char c)
3642 {
3643 	if (c == ' ' || c == '\t')
3644 		return _TRUE;
3645 	else
3646 		return _FALSE;
3647 }
3648 
3649 /**
3650 * is_decimal -
3651 *
3652 * Return	TRUE if chTmp is represent for decimal digit
3653 *		FALSE otherwise.
3654 */
is_decimal(char chTmp)3655 inline BOOLEAN is_decimal(char chTmp)
3656 {
3657 	if ((chTmp >= '0' && chTmp <= '9'))
3658 		return _TRUE;
3659 	else
3660 		return _FALSE;
3661 }
3662 
3663 /**
3664 * IsHexDigit -
3665 *
3666 * Return	TRUE if chTmp is represent for hex digit
3667 *		FALSE otherwise.
3668 */
IsHexDigit(char chTmp)3669 inline BOOLEAN IsHexDigit(char chTmp)
3670 {
3671 	if ((chTmp >= '0' && chTmp <= '9') ||
3672 		(chTmp >= 'a' && chTmp <= 'f') ||
3673 		(chTmp >= 'A' && chTmp <= 'F'))
3674 		return _TRUE;
3675 	else
3676 		return _FALSE;
3677 }
3678 
3679 /**
3680 * is_alpha -
3681 *
3682 * Return	TRUE if chTmp is represent for alphabet
3683 *		FALSE otherwise.
3684 */
is_alpha(char chTmp)3685 inline BOOLEAN is_alpha(char chTmp)
3686 {
3687 	if ((chTmp >= 'a' && chTmp <= 'z') ||
3688 		(chTmp >= 'A' && chTmp <= 'Z'))
3689 		return _TRUE;
3690 	else
3691 		return _FALSE;
3692 }
3693 
alpha_to_upper(char c)3694 inline char alpha_to_upper(char c)
3695 {
3696 	if ((c >= 'a' && c <= 'z'))
3697 		c = 'A' + (c - 'a');
3698 	return c;
3699 }
3700 
hex2num_i(char c)3701 int hex2num_i(char c)
3702 {
3703 	if (c >= '0' && c <= '9')
3704 		return c - '0';
3705 	if (c >= 'a' && c <= 'f')
3706 		return c - 'a' + 10;
3707 	if (c >= 'A' && c <= 'F')
3708 		return c - 'A' + 10;
3709 	return -1;
3710 }
3711 
hex2byte_i(const char * hex)3712 int hex2byte_i(const char *hex)
3713 {
3714 	int a, b;
3715 	a = hex2num_i(*hex++);
3716 	if (a < 0)
3717 		return -1;
3718 	b = hex2num_i(*hex++);
3719 	if (b < 0)
3720 		return -1;
3721 	return (a << 4) | b;
3722 }
3723 
hexstr2bin(const char * hex,u8 * buf,size_t len)3724 int hexstr2bin(const char *hex, u8 *buf, size_t len)
3725 {
3726 	size_t i;
3727 	int a;
3728 	const char *ipos = hex;
3729 	u8 *opos = buf;
3730 
3731 	for (i = 0; i < len; i++) {
3732 		a = hex2byte_i(ipos);
3733 		if (a < 0)
3734 			return -1;
3735 		*opos++ = a;
3736 		ipos += 2;
3737 	}
3738 	return 0;
3739 }
3740 
3741 /**
3742  * hwaddr_aton - Convert ASCII string to MAC address
3743  * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
3744  * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
3745  * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
3746  */
hwaddr_aton_i(const char * txt,u8 * addr)3747 int hwaddr_aton_i(const char *txt, u8 *addr)
3748 {
3749 	int i;
3750 
3751 	for (i = 0; i < 6; i++) {
3752 		int a, b;
3753 
3754 		a = hex2num_i(*txt++);
3755 		if (a < 0)
3756 			return -1;
3757 		b = hex2num_i(*txt++);
3758 		if (b < 0)
3759 			return -1;
3760 		*addr++ = (a << 4) | b;
3761 		if (i < 5 && *txt++ != ':')
3762 			return -1;
3763 	}
3764 
3765 	return 0;
3766 }
3767 
3768