xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/os_dep/osdep_service.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _OSDEP_SERVICE_C_
16 
17 #include <drv_types.h>
18 
19 #ifdef CONFIG_HWSIM
20 #include "rtw_hwsim_intf.h"
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)21 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
22 {
23 	skb->dev = ndev;
24 	rtw_hwsim_medium_pre_netif_rx(skb);
25 	return netif_rx(skb);
26 }
27 #endif /* CONFIG_HWSIM */
28 
rtw_atoi(u8 * s)29 u32 rtw_atoi(u8 *s)
30 {
31 
32 	int num = 0, flag = 0;
33 	int i;
34 	for (i = 0; i <= strlen(s); i++) {
35 		if (s[i] >= '0' && s[i] <= '9')
36 			num = num * 10 + s[i] - '0';
37 		else if (s[0] == '-' && i == 0)
38 			flag = 1;
39 		else
40 			break;
41 	}
42 
43 	if (flag == 1)
44 		num = num * -1;
45 
46 	return num;
47 
48 }
49 
50 #if defined(DBG_MEM_ALLOC)
51 
52 struct rtw_mem_stat {
53 	ATOMIC_T alloc; /* the memory bytes we allocate currently */
54 	ATOMIC_T peak; /* the peak memory bytes we allocate */
55 	ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
56 	ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
57 };
58 
59 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
60 #ifdef RTW_MEM_FUNC_STAT
61 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
62 #endif
63 
64 char *MSTAT_TYPE_str[] = {
65 	"VIR",
66 	"PHY",
67 	"SKB",
68 	"USB",
69 };
70 
71 #ifdef RTW_MEM_FUNC_STAT
72 char *MSTAT_FUNC_str[] = {
73 	"UNSP",
74 	"IO",
75 	"TXIO",
76 	"RXIO",
77 	"TX",
78 	"RX",
79 };
80 #endif
81 
rtw_mstat_dump(void * sel)82 void rtw_mstat_dump(void *sel)
83 {
84 	int i;
85 	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
86 #ifdef RTW_MEM_FUNC_STAT
87 	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
88 #endif
89 
90 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
91 		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
92 		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
93 		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
94 		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
95 	}
96 
97 #ifdef RTW_MEM_FUNC_STAT
98 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
99 		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
100 		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
101 		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
102 		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
103 	}
104 #endif
105 
106 	RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
107 	RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
108 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
109 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
110 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
111 #ifdef RTW_MEM_FUNC_STAT
112 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
113 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
114 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
115 #endif
116 }
117 
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)118 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
119 {
120 	static systime update_time = 0;
121 	int peak, alloc;
122 	int i;
123 
124 	/* initialization */
125 	if (!update_time) {
126 		for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
127 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
128 			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
129 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
130 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
131 		}
132 		#ifdef RTW_MEM_FUNC_STAT
133 		for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
134 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
135 			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
136 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
137 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
138 		}
139 		#endif
140 	}
141 
142 	switch (status) {
143 	case MSTAT_ALLOC_SUCCESS:
144 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
145 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
146 		peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
147 		if (peak < alloc)
148 			ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
149 
150 		#ifdef RTW_MEM_FUNC_STAT
151 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
152 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
153 		peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
154 		if (peak < alloc)
155 			ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
156 		#endif
157 		break;
158 
159 	case MSTAT_ALLOC_FAIL:
160 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
161 		#ifdef RTW_MEM_FUNC_STAT
162 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
163 		#endif
164 		break;
165 
166 	case MSTAT_FREE:
167 		ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
168 		ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
169 		#ifdef RTW_MEM_FUNC_STAT
170 		ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
171 		ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
172 		#endif
173 		break;
174 	};
175 
176 	/* if (rtw_get_passing_time_ms(update_time) > 5000) { */
177 	/*	rtw_mstat_dump(RTW_DBGDUMP); */
178 	update_time = rtw_get_current_time();
179 	/* } */
180 }
181 
182 #ifndef SIZE_MAX
183 #define SIZE_MAX (~(size_t)0)
184 #endif
185 
186 struct mstat_sniff_rule {
187 	enum mstat_f flags;
188 	size_t lb;
189 	size_t hb;
190 };
191 
192 struct mstat_sniff_rule mstat_sniff_rules[] = {
193 	{MSTAT_TYPE_VIR, 32, 32},
194 };
195 
196 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
197 
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)198 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
199 {
200 	int i;
201 	for (i = 0; i < mstat_sniff_rule_num; i++) {
202 		if (mstat_sniff_rules[i].flags == flags
203 			&& mstat_sniff_rules[i].lb <= size
204 			&& mstat_sniff_rules[i].hb >= size)
205 			return _TRUE;
206 	}
207 
208 	return _FALSE;
209 }
210 
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)211 inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
212 {
213 	void *p;
214 
215 	if (match_mstat_sniff_rules(flags, sz))
216 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
217 
218 	p = _rtw_vmalloc((sz));
219 
220 	rtw_mstat_update(
221 		flags
222 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
223 		, sz
224 	);
225 
226 	return p;
227 }
228 
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)229 inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
230 {
231 	void *p;
232 
233 	if (match_mstat_sniff_rules(flags, sz))
234 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
235 
236 	p = _rtw_zvmalloc((sz));
237 
238 	rtw_mstat_update(
239 		flags
240 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
241 		, sz
242 	);
243 
244 	return p;
245 }
246 
dbg_rtw_vmfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)247 inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
248 {
249 
250 	if (match_mstat_sniff_rules(flags, sz))
251 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
252 
253 	_rtw_vmfree((pbuf), (sz));
254 
255 	rtw_mstat_update(
256 		flags
257 		, MSTAT_FREE
258 		, sz
259 	);
260 }
261 
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)262 inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
263 {
264 	void *p;
265 
266 	if (match_mstat_sniff_rules(flags, sz))
267 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
268 
269 	p = _rtw_malloc((sz));
270 
271 	rtw_mstat_update(
272 		flags
273 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
274 		, sz
275 	);
276 
277 	return p;
278 }
279 
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)280 inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
281 {
282 	void *p;
283 
284 	if (match_mstat_sniff_rules(flags, sz))
285 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
286 
287 	p = _rtw_zmalloc((sz));
288 
289 	rtw_mstat_update(
290 		flags
291 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
292 		, sz
293 	);
294 
295 	return p;
296 }
297 
dbg_rtw_mfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)298 inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
299 {
300 	if (match_mstat_sniff_rules(flags, sz))
301 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
302 
303 	_rtw_mfree((pbuf), (sz));
304 
305 	rtw_mstat_update(
306 		flags
307 		, MSTAT_FREE
308 		, sz
309 	);
310 }
311 
dbg_rtw_skb_mstat_aid(struct sk_buff * skb_head,const enum mstat_f flags,enum mstat_status status)312 inline void dbg_rtw_skb_mstat_aid(struct sk_buff *skb_head, const enum mstat_f flags, enum mstat_status status)
313 {
314 	unsigned int truesize = 0;
315 	struct sk_buff *skb;
316 
317 	if (!skb_head)
318 		return;
319 
320 	rtw_mstat_update(flags, status, skb_head->truesize);
321 
322 	skb_walk_frags(skb_head, skb)
323 		rtw_mstat_update(flags, status, skb->truesize);
324 }
325 
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)326 inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
327 {
328 	struct sk_buff *skb;
329 	unsigned int truesize = 0;
330 
331 	skb = _rtw_skb_alloc(size);
332 
333 	if (skb)
334 		truesize = skb->truesize;
335 
336 	if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
337 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
338 
339 	rtw_mstat_update(
340 		flags
341 		, skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
342 		, truesize
343 	);
344 
345 	return skb;
346 }
347 
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)348 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
349 {
350 	unsigned int truesize = skb->truesize;
351 
352 	if (match_mstat_sniff_rules(flags, truesize))
353 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
354 
355 	dbg_rtw_skb_mstat_aid(skb, flags, MSTAT_FREE);
356 
357 	_rtw_skb_free(skb);
358 }
359 
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)360 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
361 {
362 	struct sk_buff *skb_cp;
363 	unsigned int truesize = skb->truesize;
364 	unsigned int cp_truesize = 0;
365 
366 	skb_cp = _rtw_skb_copy(skb);
367 	if (skb_cp)
368 		cp_truesize = skb_cp->truesize;
369 
370 	if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
371 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
372 
373 	rtw_mstat_update(
374 		flags
375 		, skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
376 		, cp_truesize
377 	);
378 
379 	return skb_cp;
380 }
381 
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)382 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
383 {
384 	struct sk_buff *skb_cl;
385 	unsigned int truesize = skb->truesize;
386 	unsigned int cl_truesize = 0;
387 
388 	skb_cl = _rtw_skb_clone(skb);
389 	if (skb_cl)
390 		cl_truesize = skb_cl->truesize;
391 
392 	if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
393 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
394 
395 	rtw_mstat_update(
396 		flags
397 		, skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
398 		, cl_truesize
399 	);
400 
401 	return skb_cl;
402 }
403 
dbg_rtw_skb_linearize(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)404 inline int dbg_rtw_skb_linearize(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
405 {
406 	unsigned int truesize = 0;
407 	int ret;
408 
409 	dbg_rtw_skb_mstat_aid(skb, flags, MSTAT_FREE);
410 
411 	ret = _rtw_skb_linearize(skb);
412 
413 	dbg_rtw_skb_mstat_aid(skb, flags, MSTAT_ALLOC_SUCCESS);
414 
415 	return ret;
416 }
417 
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)418 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
419 {
420 	int ret;
421 	unsigned int truesize = skb->truesize;
422 
423 	if (match_mstat_sniff_rules(flags, truesize))
424 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
425 
426 	ret = _rtw_netif_rx(ndev, skb);
427 
428 	rtw_mstat_update(
429 		flags
430 		, MSTAT_FREE
431 		, truesize
432 	);
433 
434 	return ret;
435 }
436 
437 #ifdef CONFIG_RTW_NAPI
dbg_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)438 inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
439 {
440 	int ret;
441 	unsigned int truesize = skb->truesize;
442 
443 	if (match_mstat_sniff_rules(flags, truesize))
444 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
445 
446 	ret = _rtw_netif_receive_skb(ndev, skb);
447 
448 	rtw_mstat_update(
449 		flags
450 		, MSTAT_FREE
451 		, truesize
452 	);
453 
454 	return ret;
455 }
456 
457 #ifdef CONFIG_RTW_GRO
dbg_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)458 inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
459 {
460 	int ret;
461 	unsigned int truesize = skb->truesize;
462 
463 	if (match_mstat_sniff_rules(flags, truesize))
464 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
465 
466 	ret = _rtw_napi_gro_receive(napi, skb);
467 
468 	rtw_mstat_update(
469 		flags
470 		, MSTAT_FREE
471 		, truesize
472 	);
473 
474 	return ret;
475 }
476 #endif /* CONFIG_RTW_GRO */
477 #endif /* CONFIG_RTW_NAPI */
478 
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)479 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
480 {
481 	struct sk_buff *skb;
482 
483 	while ((skb = skb_dequeue(list)) != NULL)
484 		dbg_rtw_skb_free(skb, flags, func, line);
485 }
486 
487 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)488 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
489 {
490 	void *p;
491 
492 	if (match_mstat_sniff_rules(flags, size))
493 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
494 
495 	p = _rtw_usb_buffer_alloc(dev, size, dma);
496 
497 	rtw_mstat_update(
498 		flags
499 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
500 		, size
501 	);
502 
503 	return p;
504 }
505 
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)506 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
507 {
508 
509 	if (match_mstat_sniff_rules(flags, size))
510 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
511 
512 	_rtw_usb_buffer_free(dev, size, addr, dma);
513 
514 	rtw_mstat_update(
515 		flags
516 		, MSTAT_FREE
517 		, size
518 	);
519 }
520 #endif /* CONFIG_USB_HCI */
521 
522 #endif /* defined(DBG_MEM_ALLOC) */
523 
rtw_malloc2d(int h,int w,size_t size)524 void *rtw_malloc2d(int h, int w, size_t size)
525 {
526 	int j;
527 
528 	void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
529 	if (a == NULL) {
530 		RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
531 		return NULL;
532 	}
533 
534 	for (j = 0; j < h; j++)
535 		a[j] = ((char *)(a + h)) + j * w * size;
536 
537 	return a;
538 }
539 
rtw_mfree2d(void * pbuf,int h,int w,int size)540 void rtw_mfree2d(void *pbuf, int h, int w, int size)
541 {
542 	rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
543 }
544 
_rtw_memcmp2(const void * dst,const void * src,u32 sz)545 int _rtw_memcmp2(const void *dst, const void *src, u32 sz)
546 {
547 	const unsigned char *p1 = dst, *p2 = src;
548 
549 	if (sz == 0)
550 		return 0;
551 
552 	while (*p1 == *p2) {
553 		p1++;
554 		p2++;
555 		sz--;
556 		if (sz == 0)
557 			return 0;
558 	}
559 
560 	return *p1 - *p2;
561 }
562 
_rtw_init_queue(_queue * pqueue)563 void _rtw_init_queue(_queue *pqueue)
564 {
565 	_rtw_init_listhead(&(pqueue->queue));
566 	_rtw_spinlock_init(&(pqueue->lock));
567 }
568 
_rtw_deinit_queue(_queue * pqueue)569 void _rtw_deinit_queue(_queue *pqueue)
570 {
571 	_rtw_spinlock_free(&(pqueue->lock));
572 }
573 
_rtw_queue_empty(_queue * pqueue)574 u32 _rtw_queue_empty(_queue	*pqueue)
575 {
576 	return rtw_is_list_empty(&(pqueue->queue));
577 }
578 
579 
rtw_end_of_queue_search(_list * head,_list * plist)580 u32 rtw_end_of_queue_search(_list *head, _list *plist)
581 {
582 	if (head == plist)
583 		return _TRUE;
584 	else
585 		return _FALSE;
586 }
587 
588 /* the input parameter start use the same unit as returned by rtw_get_current_time */
_rtw_get_passing_time_ms(systime start)589 inline s32 _rtw_get_passing_time_ms(systime start)
590 {
591 	return _rtw_systime_to_ms(_rtw_get_current_time() - start);
592 }
593 
_rtw_get_remaining_time_ms(systime end)594 inline s32 _rtw_get_remaining_time_ms(systime end)
595 {
596 	return _rtw_systime_to_ms(end - _rtw_get_current_time());
597 }
598 
_rtw_get_time_interval_ms(systime start,systime end)599 inline s32 _rtw_get_time_interval_ms(systime start, systime end)
600 {
601 	return _rtw_systime_to_ms(end - start);
602 }
603 
rtw_macaddr_is_larger(const u8 * a,const u8 * b)604 bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
605 {
606 	u32 va, vb;
607 
608 	va = be32_to_cpu(*((u32 *)a));
609 	vb = be32_to_cpu(*((u32 *)b));
610 	if (va > vb)
611 		return 1;
612 	else if (va < vb)
613 		return 0;
614 
615 	return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
616 }
617 
618 
619 /*
620 * Test if the specifi @param path is a readable file with valid size.
621 * If readable, @param sz is got
622 * @param path the path of the file to test
623 * @return _TRUE or _FALSE
624 */
rtw_readable_file_sz_chk(const char * path,u32 sz)625 int rtw_readable_file_sz_chk(const char *path, u32 sz)
626 {
627 	u32 fsz;
628 
629 	if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
630 		return _FALSE;
631 
632 	if (fsz > sz)
633 		return _FALSE;
634 
635 	return _TRUE;
636 }
637 
rtw_buf_free(u8 ** buf,u32 * buf_len)638 void rtw_buf_free(u8 **buf, u32 *buf_len)
639 {
640 	u32 ori_len;
641 
642 	if (!buf || !buf_len)
643 		return;
644 
645 	ori_len = *buf_len;
646 
647 	if (*buf) {
648 		u32 tmp_buf_len = *buf_len;
649 		*buf_len = 0;
650 		rtw_mfree(*buf, tmp_buf_len);
651 		*buf = NULL;
652 	}
653 }
654 
rtw_buf_update(u8 ** buf,u32 * buf_len,const u8 * src,u32 src_len)655 void rtw_buf_update(u8 **buf, u32 *buf_len, const u8 *src, u32 src_len)
656 {
657 	u32 ori_len = 0, dup_len = 0;
658 	u8 *ori = NULL;
659 	u8 *dup = NULL;
660 
661 	if (!buf || !buf_len)
662 		return;
663 
664 	if (!src || !src_len)
665 		goto keep_ori;
666 
667 	/* duplicate src */
668 	dup = rtw_malloc(src_len);
669 	if (dup) {
670 		dup_len = src_len;
671 		_rtw_memcpy(dup, src, dup_len);
672 	}
673 
674 keep_ori:
675 	ori = *buf;
676 	ori_len = *buf_len;
677 
678 	/* replace buf with dup */
679 	*buf_len = 0;
680 	*buf = dup;
681 	*buf_len = dup_len;
682 
683 	/* free ori */
684 	if (ori && ori_len > 0)
685 		rtw_mfree(ori, ori_len);
686 }
687 
688 
689 /**
690  * rtw_cbuf_full - test if cbuf is full
691  * @cbuf: pointer of struct rtw_cbuf
692  *
693  * Returns: _TRUE if cbuf is full
694  */
rtw_cbuf_full(struct rtw_cbuf * cbuf)695 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
696 {
697 	return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
698 }
699 
700 /**
701  * rtw_cbuf_empty - test if cbuf is empty
702  * @cbuf: pointer of struct rtw_cbuf
703  *
704  * Returns: _TRUE if cbuf is empty
705  */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)706 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
707 {
708 	return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
709 }
710 
711 /**
712  * rtw_cbuf_push - push a pointer into cbuf
713  * @cbuf: pointer of struct rtw_cbuf
714  * @buf: pointer to push in
715  *
716  * Lock free operation, be careful of the use scheme
717  * Returns: _TRUE push success
718  */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)719 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
720 {
721 	if (rtw_cbuf_full(cbuf))
722 		return _FAIL;
723 
724 	if (0)
725 		RTW_INFO("%s on %u\n", __func__, cbuf->write);
726 	cbuf->bufs[cbuf->write] = buf;
727 	cbuf->write = (cbuf->write + 1) % cbuf->size;
728 
729 	return _SUCCESS;
730 }
731 
732 /**
733  * rtw_cbuf_pop - pop a pointer from cbuf
734  * @cbuf: pointer of struct rtw_cbuf
735  *
736  * Lock free operation, be careful of the use scheme
737  * Returns: pointer popped out
738  */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)739 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
740 {
741 	void *buf;
742 	if (rtw_cbuf_empty(cbuf))
743 		return NULL;
744 
745 	if (0)
746 		RTW_INFO("%s on %u\n", __func__, cbuf->read);
747 	buf = cbuf->bufs[cbuf->read];
748 	cbuf->read = (cbuf->read + 1) % cbuf->size;
749 
750 	return buf;
751 }
752 
753 /**
754  * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
755  * @size: size of pointer
756  *
757  * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
758  */
rtw_cbuf_alloc(u32 size)759 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
760 {
761 	struct rtw_cbuf *cbuf;
762 
763 	cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
764 
765 	if (cbuf) {
766 		cbuf->write = cbuf->read = 0;
767 		cbuf->size = size;
768 	}
769 
770 	return cbuf;
771 }
772 
773 /**
774  * rtw_cbuf_free - free the given rtw_cbuf
775  * @cbuf: pointer of struct rtw_cbuf to free
776  */
rtw_cbuf_free(struct rtw_cbuf * cbuf)777 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
778 {
779 	rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
780 }
781 
782 /**
783  * map_readN - read a range of map data
784  * @map: map to read
785  * @offset: start address to read
786  * @len: length to read
787  * @buf: pointer of buffer to store data read
788  *
789  * Returns: _SUCCESS or _FAIL
790  */
map_readN(const struct map_t * map,u16 offset,u16 len,u8 * buf)791 int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
792 {
793 	const struct map_seg_t *seg;
794 	int ret = _FAIL;
795 	int i;
796 
797 	if (len == 0) {
798 		rtw_warn_on(1);
799 		goto exit;
800 	}
801 
802 	if (offset + len > map->len) {
803 		rtw_warn_on(1);
804 		goto exit;
805 	}
806 
807 	_rtw_memset(buf, map->init_value, len);
808 
809 	for (i = 0; i < map->seg_num; i++) {
810 		u8 *c_dst, *c_src;
811 		u16 c_len;
812 
813 		seg = map->segs + i;
814 		if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
815 			continue;
816 
817 		if (seg->sa >= offset) {
818 			c_dst = buf + (seg->sa - offset);
819 			c_src = seg->c;
820 			if (seg->sa + seg->len <= offset + len)
821 				c_len = seg->len;
822 			else
823 				c_len = offset + len - seg->sa;
824 		} else {
825 			c_dst = buf;
826 			c_src = seg->c + (offset - seg->sa);
827 			if (seg->sa + seg->len >= offset + len)
828 				c_len = len;
829 			else
830 				c_len = seg->sa + seg->len - offset;
831 		}
832 
833 		_rtw_memcpy(c_dst, c_src, c_len);
834 	}
835 
836 exit:
837 	return ret;
838 }
839 
840 /**
841  * map_read8 - read 1 byte of map data
842  * @map: map to read
843  * @offset: address to read
844  *
845  * Returns: value of data of specified offset. map.init_value if offset is out of range
846  */
map_read8(const struct map_t * map,u16 offset)847 u8 map_read8(const struct map_t *map, u16 offset)
848 {
849 	const struct map_seg_t *seg;
850 	u8 val = map->init_value;
851 	int i;
852 
853 	if (offset + 1 > map->len) {
854 		rtw_warn_on(1);
855 		goto exit;
856 	}
857 
858 	for (i = 0; i < map->seg_num; i++) {
859 		seg = map->segs + i;
860 		if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
861 			continue;
862 
863 		val = *(seg->c + offset - seg->sa);
864 		break;
865 	}
866 
867 exit:
868 	return val;
869 }
870 
rtw_blacklist_add(_queue * blist,const u8 * addr,u32 timeout_ms)871 int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
872 {
873 	struct blacklist_ent *ent;
874 	_list *list, *head;
875 	u8 exist = _FALSE, timeout = _FALSE;
876 
877 	_rtw_spinlock_bh(&blist->lock);
878 
879 	head = &blist->queue;
880 	list = get_next(head);
881 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
882 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
883 		list = get_next(list);
884 
885 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
886 			exist = _TRUE;
887 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
888 				timeout = _TRUE;
889 			ent->exp_time = rtw_get_current_time()
890 				+ rtw_ms_to_systime(timeout_ms);
891 			break;
892 		}
893 
894 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
895 			rtw_list_delete(&ent->list);
896 			rtw_mfree(ent, sizeof(struct blacklist_ent));
897 		}
898 	}
899 
900 	if (exist == _FALSE) {
901 		ent = rtw_malloc(sizeof(struct blacklist_ent));
902 		if (ent) {
903 			_rtw_memcpy(ent->addr, addr, ETH_ALEN);
904 			ent->exp_time = rtw_get_current_time()
905 				+ rtw_ms_to_systime(timeout_ms);
906 			rtw_list_insert_tail(&ent->list, head);
907 		}
908 	}
909 
910 	_rtw_spinunlock_bh(&blist->lock);
911 
912 	return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
913 }
914 
rtw_blacklist_del(_queue * blist,const u8 * addr)915 int rtw_blacklist_del(_queue *blist, const u8 *addr)
916 {
917 	struct blacklist_ent *ent = NULL;
918 	_list *list, *head;
919 	u8 exist = _FALSE;
920 
921 	_rtw_spinlock_bh(&blist->lock);
922 	head = &blist->queue;
923 	list = get_next(head);
924 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
925 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
926 		list = get_next(list);
927 
928 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
929 			rtw_list_delete(&ent->list);
930 			rtw_mfree(ent, sizeof(struct blacklist_ent));
931 			exist = _TRUE;
932 			break;
933 		}
934 
935 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
936 			rtw_list_delete(&ent->list);
937 			rtw_mfree(ent, sizeof(struct blacklist_ent));
938 		}
939 	}
940 
941 	_rtw_spinunlock_bh(&blist->lock);
942 
943 	return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
944 }
945 
rtw_blacklist_search(_queue * blist,const u8 * addr)946 int rtw_blacklist_search(_queue *blist, const u8 *addr)
947 {
948 	struct blacklist_ent *ent = NULL;
949 	_list *list, *head;
950 	u8 exist = _FALSE;
951 
952 	_rtw_spinlock_bh(&blist->lock);
953 	head = &blist->queue;
954 	list = get_next(head);
955 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
956 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
957 		list = get_next(list);
958 
959 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
960 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
961 				rtw_list_delete(&ent->list);
962 				rtw_mfree(ent, sizeof(struct blacklist_ent));
963 			} else
964 				exist = _TRUE;
965 			break;
966 		}
967 
968 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
969 			rtw_list_delete(&ent->list);
970 			rtw_mfree(ent, sizeof(struct blacklist_ent));
971 		}
972 	}
973 
974 	_rtw_spinunlock_bh(&blist->lock);
975 
976 	return exist;
977 }
978 
rtw_blacklist_flush(_queue * blist)979 void rtw_blacklist_flush(_queue *blist)
980 {
981 	struct blacklist_ent *ent;
982 	_list *list, *head;
983 	_list tmp;
984 
985 	_rtw_init_listhead(&tmp);
986 
987 	_rtw_spinlock_bh(&blist->lock);
988 	rtw_list_splice_init(&blist->queue, &tmp);
989 	_rtw_spinunlock_bh(&blist->lock);
990 
991 	head = &tmp;
992 	list = get_next(head);
993 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
994 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
995 		list = get_next(list);
996 		rtw_list_delete(&ent->list);
997 		rtw_mfree(ent, sizeof(struct blacklist_ent));
998 	}
999 }
1000 
dump_blacklist(void * sel,_queue * blist,const char * title)1001 void dump_blacklist(void *sel, _queue *blist, const char *title)
1002 {
1003 	struct blacklist_ent *ent = NULL;
1004 	_list *list, *head;
1005 
1006 	_rtw_spinlock_bh(&blist->lock);
1007 	head = &blist->queue;
1008 	list = get_next(head);
1009 
1010 	if (rtw_end_of_queue_search(head, list) == _FALSE) {
1011 		if (title)
1012 			RTW_PRINT_SEL(sel, "%s:\n", title);
1013 
1014 		while (rtw_end_of_queue_search(head, list) == _FALSE) {
1015 			ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
1016 			list = get_next(list);
1017 
1018 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
1019 				RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
1020 			else
1021 				RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
1022 					, rtw_get_remaining_time_ms(ent->exp_time));
1023 		}
1024 
1025 	}
1026 	_rtw_spinunlock_bh(&blist->lock);
1027 }
1028 
1029 /**
1030 * is_null -
1031 *
1032 * Return	TRUE if c is null character
1033 *		FALSE otherwise.
1034 */
is_null(char c)1035 inline BOOLEAN is_null(char c)
1036 {
1037 	if (c == '\0')
1038 		return _TRUE;
1039 	else
1040 		return _FALSE;
1041 }
1042 
is_all_null(char * c,int len)1043 inline BOOLEAN is_all_null(char *c, int len)
1044 {
1045 	for (; len > 0; len--)
1046 		if (c[len - 1] != '\0')
1047 			return _FALSE;
1048 
1049 	return _TRUE;
1050 }
1051 
1052 /**
1053 * is_eol -
1054 *
1055 * Return	TRUE if c is represent for EOL (end of line)
1056 *		FALSE otherwise.
1057 */
is_eol(char c)1058 inline BOOLEAN is_eol(char c)
1059 {
1060 	if (c == '\r' || c == '\n')
1061 		return _TRUE;
1062 	else
1063 		return _FALSE;
1064 }
1065 
1066 /**
1067 * is_space -
1068 *
1069 * Return	TRUE if c is represent for space
1070 *		FALSE otherwise.
1071 */
is_space(char c)1072 inline BOOLEAN is_space(char c)
1073 {
1074 	if (c == ' ' || c == '\t')
1075 		return _TRUE;
1076 	else
1077 		return _FALSE;
1078 }
1079 
1080 /**
1081 * is_decimal -
1082 *
1083 * Return	TRUE if chTmp is represent for decimal digit
1084 *		FALSE otherwise.
1085 */
is_decimal(char chTmp)1086 inline BOOLEAN is_decimal(char chTmp)
1087 {
1088 	if ((chTmp >= '0' && chTmp <= '9'))
1089 		return _TRUE;
1090 	else
1091 		return _FALSE;
1092 }
1093 
1094 /**
1095 * IsHexDigit -
1096 *
1097 * Return	TRUE if chTmp is represent for hex digit
1098 *		FALSE otherwise.
1099 */
IsHexDigit(char chTmp)1100 inline BOOLEAN IsHexDigit(char chTmp)
1101 {
1102 	if ((chTmp >= '0' && chTmp <= '9') ||
1103 		(chTmp >= 'a' && chTmp <= 'f') ||
1104 		(chTmp >= 'A' && chTmp <= 'F'))
1105 		return _TRUE;
1106 	else
1107 		return _FALSE;
1108 }
1109 
1110 /**
1111 * is_alpha -
1112 *
1113 * Return	TRUE if chTmp is represent for alphabet
1114 *		FALSE otherwise.
1115 */
is_alpha(char chTmp)1116 inline BOOLEAN is_alpha(char chTmp)
1117 {
1118 	if ((chTmp >= 'a' && chTmp <= 'z') ||
1119 		(chTmp >= 'A' && chTmp <= 'Z'))
1120 		return _TRUE;
1121 	else
1122 		return _FALSE;
1123 }
1124 
alpha_to_upper(char c)1125 inline char alpha_to_upper(char c)
1126 {
1127 	if ((c >= 'a' && c <= 'z'))
1128 		c = 'A' + (c - 'a');
1129 	return c;
1130 }
1131 
hex2num_i(char c)1132 int hex2num_i(char c)
1133 {
1134 	if (c >= '0' && c <= '9')
1135 		return c - '0';
1136 	if (c >= 'a' && c <= 'f')
1137 		return c - 'a' + 10;
1138 	if (c >= 'A' && c <= 'F')
1139 		return c - 'A' + 10;
1140 	return -1;
1141 }
1142 
hex2byte_i(const char * hex)1143 int hex2byte_i(const char *hex)
1144 {
1145 	int a, b;
1146 	a = hex2num_i(*hex++);
1147 	if (a < 0)
1148 		return -1;
1149 	b = hex2num_i(*hex++);
1150 	if (b < 0)
1151 		return -1;
1152 	return (a << 4) | b;
1153 }
1154 
hexstr2bin(const char * hex,u8 * buf,size_t len)1155 int hexstr2bin(const char *hex, u8 *buf, size_t len)
1156 {
1157 	size_t i;
1158 	int a;
1159 	const char *ipos = hex;
1160 	u8 *opos = buf;
1161 
1162 	for (i = 0; i < len; i++) {
1163 		a = hex2byte_i(ipos);
1164 		if (a < 0)
1165 			return -1;
1166 		*opos++ = a;
1167 		ipos += 2;
1168 	}
1169 	return 0;
1170 }
1171 
1172