xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/core/rtw_prealloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2016 - 2022 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 
16 #include <linux/kernel.h>
17 #include <linux/gfp.h>
18 #include <drv_types.h>
19 #include <rtw_mem.h>
20 
21 #if defined(CONFIG_STACKTRACE) &&                                              \
22 	(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
23 #define STACKTRACE 1
24 #define MAX_STACK_TRACE 4
25 #endif /* CONFIG_STACKTRACE */
26 
27 #define TRACE_ORDER(a, b) ((a) && (((a)&BIT(b)) > 0))
28 
29 #if (defined(CONFIG_RTKM) && defined(CONFIG_RTKM_STANDALONE))
30 #ifdef pr_fmt
31 #undef pr_fmt
32 #endif
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #undef RTW_PRINT
36 #define RTW_PRINT pr_info
37 #else /* !CONFIG_RTKM */
38 #ifdef pr_fmt
39 #undef pr_fmt
40 #endif
41 #define pr_fmt(fmt) "RTKM: " fmt
42 #endif /* CONFIG_RTKM */
43 
44 struct phy_mem_list {
45 	_lock lock;
46 	_list list;
47 	struct rb_root rb_tree;
48 	unsigned short entries;
49 	unsigned short used;
50 	unsigned short peak;
51 	unsigned char order;
52 } rtkm_phy_list[MAX_ORDER];
53 
54 struct mem_entry {
55 	_list list;
56 	struct rb_node rbn;
57 	void *data;
58 	size_t size;
59 #ifdef STACKTRACE
60 #ifndef CONFIG_ARCH_STACKWALK
61 	struct stack_trace trace;
62 #endif /* CONFIG_ARCH_STACKWALK */
63 	unsigned long stack_entries[MAX_STACK_TRACE];
64 #endif /* STACKTRACE */
65 	unsigned char order;
66 	unsigned char is_use;
67 };
68 
69 #ifdef STACKTRACE
70 #ifdef CONFIG_ARCH_STACKWALK
stacktrace_print(const struct mem_entry * entries,unsigned int nr_entries,int spaces)71 static inline void stacktrace_print(const struct mem_entry *entries,
72 				    unsigned int nr_entries, int spaces)
73 {
74 	stack_trace_print(entries->stack_entries, nr_entries, spaces);
75 }
76 
stacktrace_save(struct mem_entry * store)77 static inline int stacktrace_save(struct mem_entry *store)
78 {
79 	return stack_trace_save(store->stack_entries,
80 				ARRAY_SIZE(store->stack_entries), 1);
81 }
82 #else /* !CONFIG_ARCH_STACKWALK */
stacktrace_print(const struct mem_entry * entries,unsigned int nr_entries,int spaces)83 static inline void stacktrace_print(const struct mem_entry *entries,
84 				    unsigned int nr_entries, int spaces)
85 {
86 	stack_trace_print(entries->trace.entries, nr_entries, spaces);
87 }
88 
stacktrace_save(struct mem_entry * store)89 static inline void stacktrace_save(struct mem_entry *store)
90 {
91 	store->trace.skip = 0;
92 	store->trace.nr_entries = 0;
93 	store->trace.entries = store->stack_entries;
94 	store->trace.max_entries = MAX_STACK_TRACE;
95 	save_stack_trace(&store->trace);
96 }
97 #endif /* CONFIG_ARCH_STACKWALK */
98 #else /* !STACKTRACE */
99 #define stacktrace_print(a, b, c)
100 #define stacktrace_save(a)
101 #endif /* STACKTRACE */
102 
103 /* Trace mpool */
104 static unsigned int rtkm_trace = 0;
105 module_param(rtkm_trace, uint, 0644);
106 MODULE_PARM_DESC(rtkm_trace, "Trace memory pool");
107 
108 /* Preallocated memory expansion */
109 static bool rtkm_mem_exp = 1;
110 module_param(rtkm_mem_exp, bool, 0644);
111 MODULE_PARM_DESC(rtkm_mem_exp, "Preallocated memory expansion");
112 
113 #ifndef RTKM_MPOOL_0
114 #define RTKM_MPOOL_0 0
115 #endif
116 #ifndef RTKM_MPOOL_1
117 #define RTKM_MPOOL_1 0
118 #endif
119 #ifndef RTKM_MPOOL_2
120 #define RTKM_MPOOL_2 0
121 #endif
122 #ifndef RTKM_MPOOL_3
123 #define RTKM_MPOOL_3 0
124 #endif
125 #ifndef RTKM_MPOOL_4
126 #define RTKM_MPOOL_4 0
127 #endif
128 #ifndef RTKM_MPOOL_5
129 #define RTKM_MPOOL_5 0
130 #endif
131 #ifndef RTKM_MPOOL_6
132 #define RTKM_MPOOL_6 0
133 #endif
134 #ifndef RTKM_MPOOL_7
135 #define RTKM_MPOOL_7 0
136 #endif
137 #ifndef RTKM_MPOOL_8
138 #define RTKM_MPOOL_8 0
139 #endif
140 
141 /* Preallocated memory pool  */
142 static int mpool[MAX_ORDER] = {
143 	[0] = RTKM_MPOOL_0, [1] = RTKM_MPOOL_1, [2] = RTKM_MPOOL_2,
144 	[3] = RTKM_MPOOL_3, [4] = RTKM_MPOOL_4, [5] = RTKM_MPOOL_5,
145 	[6] = RTKM_MPOOL_6, [7] = RTKM_MPOOL_7, [8] = RTKM_MPOOL_8
146 };
147 static int n_mpool = 1;
148 module_param_array(mpool, int, &n_mpool, 0644);
149 MODULE_PARM_DESC(mpool, "Preallocated memory pool");
150 
dump_mstatus(void * sel)151 static inline void dump_mstatus(void *sel)
152 {
153 	int i;
154 	unsigned int a, b, c;
155 	long unsigned int musage = 0;
156 
157 	a = b = c = 0;
158 
159 	RTW_PRINT_SEL(sel,
160 		      "====================== RTKM ======================\n");
161 	RTW_PRINT_SEL(sel, "%6s %10s %10s %10s %10s\n", "order", "use", "peak",
162 		      rtkm_mem_exp ? "alloc+" : "alloc", "size");
163 	RTW_PRINT_SEL(sel,
164 		      "--------------------------------------------------\n");
165 	for (i = 0; i < MAX_ORDER; i++) {
166 		if (rtkm_phy_list[i].entries) {
167 			RTW_PRINT_SEL(
168 				sel, "%6d %10d %10d %10d %10lu\n", i,
169 				rtkm_phy_list[i].used, rtkm_phy_list[i].peak,
170 				rtkm_phy_list[i].entries,
171 				(rtkm_phy_list[i].entries) * (PAGE_SIZE << i));
172 			a += rtkm_phy_list[i].used;
173 			b += rtkm_phy_list[i].peak;
174 			c += rtkm_phy_list[i].entries;
175 			musage += (rtkm_phy_list[i].entries) * (PAGE_SIZE << i);
176 		}
177 	}
178 	RTW_PRINT_SEL(sel, "%6s %10d %10d %10d %10lu\n", "sum", a, b, c,
179 		      musage);
180 }
181 
rtkm_dump_mstatus(void * sel)182 void rtkm_dump_mstatus(void *sel)
183 {
184 	dump_mstatus(sel);
185 }
186 EXPORT_SYMBOL(rtkm_dump_mstatus);
187 
rtkm_set_trace(unsigned int mask)188 void rtkm_set_trace(unsigned int mask)
189 {
190 	rtkm_trace = mask;
191 }
192 EXPORT_SYMBOL(rtkm_set_trace);
193 
rb_insert_mem(struct phy_mem_list * mlist,struct mem_entry * entry)194 static void rb_insert_mem(struct phy_mem_list *mlist, struct mem_entry *entry)
195 {
196 	struct rb_node **p = &mlist->rb_tree.rb_node;
197 	struct rb_node *parent = NULL;
198 	struct mem_entry *tmp = NULL;
199 
200 	while (*p) {
201 		parent = *p;
202 		tmp = rb_entry(parent, struct mem_entry, rbn);
203 
204 		if (tmp->data < entry->data)
205 			p = &(*p)->rb_left;
206 		else
207 			p = &(*p)->rb_right;
208 	}
209 
210 	rb_link_node(&entry->rbn, parent, p);
211 	rb_insert_color(&entry->rbn, &mlist->rb_tree);
212 }
213 
rb_find_mem(struct phy_mem_list * mlist,const void * objp)214 static struct mem_entry *rb_find_mem(struct phy_mem_list *mlist,
215 				     const void *objp)
216 {
217 	struct rb_node *n = mlist->rb_tree.rb_node;
218 	struct mem_entry *entry = NULL;
219 
220 	while (n) {
221 		entry = rb_entry(n, struct mem_entry, rbn);
222 
223 		if (entry->data == objp)
224 			return entry;
225 		else if (entry->data < objp)
226 			n = n->rb_left;
227 		else
228 			n = n->rb_right;
229 	}
230 
231 	return NULL;
232 }
233 
create_mem_entry(int order)234 static inline void *create_mem_entry(int order)
235 {
236 	struct mem_entry *entry;
237 
238 	entry = _rtw_malloc(sizeof(struct mem_entry));
239 	if (entry == NULL) {
240 		pr_warn("%s: alloc memory entry fail!\n", __func__);
241 		return NULL;
242 	}
243 	entry->order = order;
244 	entry->is_use = _FALSE;
245 	/* get memory by pages */
246 	entry->data = (void *)__get_free_pages(
247 		in_interrupt() ? GFP_ATOMIC : GFP_KERNEL, entry->order);
248 	if (entry->data == NULL) {
249 		_rtw_mfree(entry, sizeof(struct mem_entry));
250 		pr_warn("%s: alloc memory oreder-%d fail!\n", __func__, order);
251 		return NULL;
252 	}
253 
254 	return entry;
255 }
256 
_kmalloc(size_t size,gfp_t flags,int clear)257 static inline void *_kmalloc(size_t size, gfp_t flags, int clear)
258 {
259 	int order = 0;
260 	int warn = _FALSE;
261 	struct mem_entry *entry = NULL;
262 
263 	order = get_order(size);
264 
265 	if (rtkm_phy_list[order].entries == rtkm_phy_list[order].used) {
266 		if (rtkm_mem_exp) {
267 			warn = _TRUE;
268 			pr_warn("%s: No enough order-%d pool\n", __func__,
269 				order);
270 			entry = create_mem_entry(order);
271 			if (entry) {
272 				_rtw_spinlock_bh(&rtkm_phy_list[order].lock);
273 				list_add_tail(
274 					&entry->list,
275 					&rtkm_phy_list[entry->order].list);
276 				rtkm_phy_list[entry->order].entries++;
277 				_rtw_spinunlock_bh(&rtkm_phy_list[order].lock);
278 			}
279 		}
280 
281 		if (entry == NULL) {
282 			pr_warn("%s: No more memory for size %ld\n", __func__,
283 				size);
284 			WARN_ON(1);
285 			return NULL;
286 		}
287 	}
288 
289 	_rtw_spinlock_bh(&rtkm_phy_list[order].lock);
290 	list_for_each_entry (entry, &rtkm_phy_list[order].list, list) {
291 		if (entry->is_use == _FALSE) {
292 			list_del_init(&entry->list);
293 			entry->is_use = _TRUE;
294 			entry->size = size;
295 			if (clear == _TRUE)
296 				memset(entry->data, 0, size);
297 			stacktrace_save(entry);
298 			rtkm_phy_list[order].used++;
299 			list_add_tail(&entry->list, &rtkm_phy_list[order].list);
300 			rb_insert_mem(&rtkm_phy_list[order], entry);
301 			break;
302 		}
303 	}
304 	if (rtkm_phy_list[order].peak < rtkm_phy_list[order].used)
305 		rtkm_phy_list[order].peak = rtkm_phy_list[order].used;
306 	_rtw_spinunlock_bh(&rtkm_phy_list[order].lock);
307 
308 	if ((warn) || TRACE_ORDER(rtkm_trace, order)) {
309 		pr_info("%s: require(%p, %lu) usage(%d %u/%u)\n", __func__,
310 			entry->data, entry->size, order,
311 			rtkm_phy_list[order].used,
312 			rtkm_phy_list[order].entries);
313 		stacktrace_print(entry, MAX_STACK_TRACE, 0);
314 	}
315 
316 	return entry->data;
317 }
318 
_kfree(const void * objp,size_t size)319 static inline void _kfree(const void *objp, size_t size)
320 {
321 	int found = _FALSE, order = 0;
322 	struct mem_entry *entry;
323 
324 	order = get_order(size);
325 
326 	if (!list_empty(&rtkm_phy_list[order].list)) {
327 		_rtw_spinlock_bh(&rtkm_phy_list[order].lock);
328 
329 		entry = rb_find_mem(&rtkm_phy_list[order], objp);
330 		if (entry && (entry->is_use == _TRUE) &&
331 		    (entry->data == objp)) {
332 			if (TRACE_ORDER(rtkm_trace, order)) {
333 				pr_info("%s: release(%p, %lu)\n", __func__,
334 					objp, size);
335 			}
336 			rtw_list_delete(&entry->list);
337 			rtkm_phy_list[order].used--;
338 			entry->is_use = _FALSE;
339 			entry->size = 0;
340 			rb_erase(&entry->rbn, &rtkm_phy_list[order].rb_tree);
341 			list_add(&entry->list, &rtkm_phy_list[order].list);
342 			found = _TRUE;
343 		}
344 
345 		_rtw_spinunlock_bh(&rtkm_phy_list[order].lock);
346 	}
347 	if (found == _FALSE) {
348 		pr_warn("%s: not found (%p, %lu)\n", __func__, objp, size);
349 	}
350 }
351 
rtkm_kmalloc(size_t size,gfp_t flags)352 void *rtkm_kmalloc(size_t size, gfp_t flags)
353 {
354 	if (size > RTKM_MGMT_SIZE)
355 		return _kmalloc(size, flags, _FALSE);
356 	else
357 		return kmalloc(size, flags);
358 }
359 EXPORT_SYMBOL(rtkm_kmalloc);
360 
rtkm_kzalloc(size_t size,gfp_t flags)361 void *rtkm_kzalloc(size_t size, gfp_t flags)
362 {
363 	if (size > RTKM_MGMT_SIZE)
364 		return _kmalloc(size, flags, _TRUE);
365 	else
366 		return kzalloc(size, flags);
367 }
368 EXPORT_SYMBOL(rtkm_kzalloc);
369 
rtkm_kfree(const void * objp,size_t size)370 void rtkm_kfree(const void *objp, size_t size)
371 {
372 	if (size > RTKM_MGMT_SIZE)
373 		return _kfree(objp, size);
374 	else
375 		return kfree(objp);
376 }
377 EXPORT_SYMBOL(rtkm_kfree);
378 
rtkm_init_phy(void)379 static inline int rtkm_init_phy(void)
380 {
381 	int ret = 0, i, j;
382 	struct mem_entry *entry;
383 
384 	pr_info("%s", __func__);
385 	pr_info("%s: memory expansion:%d\n", __func__, rtkm_mem_exp);
386 
387 	for (i = (MAX_ORDER - 1); i > -1; i--) {
388 		INIT_LIST_HEAD(&rtkm_phy_list[i].list);
389 		_rtw_spinlock_init(&rtkm_phy_list[i].lock);
390 		rtkm_phy_list[i].rb_tree = RB_ROOT;
391 
392 		for (j = 0; (ret == 0) && (j < mpool[i]); j++) {
393 			entry = create_mem_entry(i);
394 			if (entry == NULL) {
395 				ret = -ENOMEM;
396 				break;
397 			}
398 
399 			list_add_tail(&entry->list,
400 				      &rtkm_phy_list[entry->order].list);
401 			rtkm_phy_list[entry->order].entries++;
402 		}
403 	}
404 	if (ret == 0)
405 		dump_mstatus(RTW_DBGDUMP);
406 
407 	return ret;
408 }
409 
rtkm_destroy_phy(void)410 static inline void rtkm_destroy_phy(void)
411 {
412 	int i = 0;
413 	struct mem_entry *entry;
414 
415 	pr_info("%s", __func__);
416 	dump_mstatus(RTW_DBGDUMP);
417 
418 	for (i = 0; i < MAX_ORDER; i++) {
419 		if (rtkm_phy_list[i].used)
420 			pr_err("%s: memory leak! order=%d num=%d\n", __func__,
421 			       i, rtkm_phy_list[i].used);
422 
423 		if (rtkm_phy_list[i].rb_tree.rb_node != NULL)
424 			pr_err("%s: rb tree leak! order=%d\n", __func__, i);
425 
426 		while (!list_empty(&rtkm_phy_list[i].list)) {
427 			entry = list_entry(rtkm_phy_list[i].list.next,
428 					   struct mem_entry, list);
429 			list_del_init(&entry->list);
430 			if (entry->is_use == _TRUE) {
431 				rb_erase(&entry->rbn,
432 					 &rtkm_phy_list[i].rb_tree);
433 				pr_err("%s: memory leak! (%p, %lu)\n", __func__,
434 				       entry->data, entry->size);
435 				stacktrace_print(entry, MAX_STACK_TRACE, 0);
436 			}
437 			if (entry->data)
438 				free_pages((unsigned long)(entry->data),
439 					   entry->order);
440 			entry->data = NULL;
441 			entry->size = 0;
442 			entry->is_use = _FALSE;
443 			_rtw_mfree(entry, sizeof(struct mem_entry));
444 			entry = NULL;
445 			rtkm_phy_list[i].entries--;
446 		}
447 		_rtw_spinlock_free(&rtkm_phy_list[i].lock);
448 	}
449 }
450 
rtkm_prealloc_init(void)451 int rtkm_prealloc_init(void)
452 {
453 	int ret = 0;
454 
455 	pr_info("%s\n", __func__);
456 
457 	ret = rtkm_init_phy();
458 	if (ret == -ENOMEM) {
459 		pr_err("No enough memory for phiscal.");
460 		rtkm_destroy_phy();
461 	}
462 
463 	pr_info("%s: done ret=%d\n", __func__, ret);
464 	return ret;
465 }
466 EXPORT_SYMBOL(rtkm_prealloc_init);
467 
rtkm_prealloc_destroy(void)468 void rtkm_prealloc_destroy(void)
469 {
470 	pr_info("%s\n", __func__);
471 
472 	rtkm_destroy_phy();
473 
474 	pr_info("%s: done\n", __func__);
475 }
476 EXPORT_SYMBOL(rtkm_prealloc_destroy);
477