1 /******************************************************************************
2 *
3 * Copyright(c) 2016 - 2022 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15
16 #include <linux/kernel.h>
17 #include <linux/gfp.h>
18 #include <drv_types.h>
19 #include <rtw_mem.h>
20
21 #if defined(CONFIG_STACKTRACE) && \
22 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
23 #define STACKTRACE 1
24 #define MAX_STACK_TRACE 4
25 #endif /* CONFIG_STACKTRACE */
26
27 #define TRACE_ORDER(a, b) ((a) && (((a)&BIT(b)) > 0))
28
29 #if (defined(CONFIG_RTKM) && defined(CONFIG_RTKM_STANDALONE))
30 #ifdef pr_fmt
31 #undef pr_fmt
32 #endif
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #undef RTW_PRINT
36 #define RTW_PRINT pr_info
37 #else /* !CONFIG_RTKM */
38 #ifdef pr_fmt
39 #undef pr_fmt
40 #endif
41 #define pr_fmt(fmt) "RTKM: " fmt
42 #endif /* CONFIG_RTKM */
43
44 struct phy_mem_list {
45 _lock lock;
46 _list list;
47 struct rb_root rb_tree;
48 unsigned short entries;
49 unsigned short used;
50 unsigned short peak;
51 unsigned char order;
52 } rtkm_phy_list[MAX_ORDER];
53
54 struct mem_entry {
55 _list list;
56 struct rb_node rbn;
57 void *data;
58 size_t size;
59 #ifdef STACKTRACE
60 #ifndef CONFIG_ARCH_STACKWALK
61 struct stack_trace trace;
62 #endif /* CONFIG_ARCH_STACKWALK */
63 unsigned long stack_entries[MAX_STACK_TRACE];
64 #endif /* STACKTRACE */
65 unsigned char order;
66 unsigned char is_use;
67 };
68
69 #ifdef STACKTRACE
70 #ifdef CONFIG_ARCH_STACKWALK
stacktrace_print(const struct mem_entry * entries,unsigned int nr_entries,int spaces)71 static inline void stacktrace_print(const struct mem_entry *entries,
72 unsigned int nr_entries, int spaces)
73 {
74 stack_trace_print(entries->stack_entries, nr_entries, spaces);
75 }
76
stacktrace_save(struct mem_entry * store)77 static inline int stacktrace_save(struct mem_entry *store)
78 {
79 return stack_trace_save(store->stack_entries,
80 ARRAY_SIZE(store->stack_entries), 1);
81 }
82 #else /* !CONFIG_ARCH_STACKWALK */
stacktrace_print(const struct mem_entry * entries,unsigned int nr_entries,int spaces)83 static inline void stacktrace_print(const struct mem_entry *entries,
84 unsigned int nr_entries, int spaces)
85 {
86 stack_trace_print(entries->trace.entries, nr_entries, spaces);
87 }
88
stacktrace_save(struct mem_entry * store)89 static inline void stacktrace_save(struct mem_entry *store)
90 {
91 store->trace.skip = 0;
92 store->trace.nr_entries = 0;
93 store->trace.entries = store->stack_entries;
94 store->trace.max_entries = MAX_STACK_TRACE;
95 save_stack_trace(&store->trace);
96 }
97 #endif /* CONFIG_ARCH_STACKWALK */
98 #else /* !STACKTRACE */
99 #define stacktrace_print(a, b, c)
100 #define stacktrace_save(a)
101 #endif /* STACKTRACE */
102
103 /* Trace mpool */
104 static unsigned int rtkm_trace = 0;
105 module_param(rtkm_trace, uint, 0644);
106 MODULE_PARM_DESC(rtkm_trace, "Trace memory pool");
107
108 /* Preallocated memory expansion */
109 static bool rtkm_mem_exp = 1;
110 module_param(rtkm_mem_exp, bool, 0644);
111 MODULE_PARM_DESC(rtkm_mem_exp, "Preallocated memory expansion");
112
113 #ifndef RTKM_MPOOL_0
114 #define RTKM_MPOOL_0 0
115 #endif
116 #ifndef RTKM_MPOOL_1
117 #ifdef CONFIG_RTW_REDUCE_MEM
118 #define RTKM_MPOOL_1 2*CONFIG_IFACE_NUMBER
119 #else
120 #define RTKM_MPOOL_1 \
121 (MAX_TX_RING_NUM * CONFIG_IFACE_NUMBER + CONFIG_IFACE_NUMBER + \
122 1 /* alloc_txring */)
123 #endif
124 #endif
125 #ifndef RTKM_MPOOL_2
126 #ifdef CONFIG_RTW_REDUCE_MEM
127 #define RTKM_MPOOL_2 2
128 #else
129 #define RTKM_MPOOL_2 0
130 #endif
131 #endif
132 #ifndef RTKM_MPOOL_3
133 #ifdef CONFIG_RTW_REDUCE_MEM
134 #define RTKM_MPOOL_3 2
135 #else
136 #ifdef CONFIG_PCI_HCI
137 #define RTKM_MPOOL_3 \
138 (10 * CONFIG_IFACE_NUMBER /* sta_info */)
139 #else
140 #define RTKM_MPOOL_3 \
141 (NR_XMITBUFF + NR_RECVBUFF + 10 * CONFIG_IFACE_NUMBER /* sta_info */)
142 #endif
143 #endif
144 #endif
145 #ifndef RTKM_MPOOL_4
146 #ifdef CONFIG_RTW_REDUCE_MEM
147 #define RTKM_MPOOL_4 2*CONFIG_IFACE_NUMBER
148 #else
149 #define RTKM_MPOOL_4 0
150 #endif
151 #endif
152 #ifndef RTKM_MPOOL_5
153 #define RTKM_MPOOL_5 0
154 #endif
155
156 #ifndef RTKM_MPOOL_6
157 #define RTKM_MPOOL_6 0
158 #endif
159
160 #ifndef RTKM_MPOOL_7
161 #ifdef CONFIG_RTW_REDUCE_MEM
162 #define RTKM_MPOOL_7 0
163 #else
164 #define RTKM_MPOOL_7 1*CONFIG_IFACE_NUMBER
165 #endif
166 #endif
167
168 #ifndef RTKM_MPOOL_8
169 #define RTKM_MPOOL_8 0
170 #endif
171 /* Preallocated memory pool */
172 static int mpool[MAX_ORDER] = {
173 [0] = RTKM_MPOOL_0, [1] = RTKM_MPOOL_1, [2] = RTKM_MPOOL_2,
174 [3] = RTKM_MPOOL_3, [4] = RTKM_MPOOL_4, [5] = RTKM_MPOOL_5,
175 [6] = RTKM_MPOOL_6, [7] = RTKM_MPOOL_7, [8] = RTKM_MPOOL_8
176 };
177 static int n_mpool = 1;
178 module_param_array(mpool, int, &n_mpool, 0644);
179 MODULE_PARM_DESC(mpool, "Preallocated memory pool");
180
dump_mstatus(void * sel)181 static inline void dump_mstatus(void *sel)
182 {
183 int i;
184 unsigned int a, b, c;
185 long unsigned int musage = 0;
186
187 a = b = c = 0;
188
189 RTW_PRINT_SEL(sel,
190 "====================== RTKM ======================\n");
191 RTW_PRINT_SEL(sel, "%6s %10s %10s %10s %10s\n", "order", "use", "peak",
192 rtkm_mem_exp ? "alloc+" : "alloc", "size");
193 RTW_PRINT_SEL(sel,
194 "--------------------------------------------------\n");
195 for (i = 0; i < MAX_ORDER; i++) {
196 if (rtkm_phy_list[i].entries) {
197 RTW_PRINT_SEL(
198 sel, "%6d %10d %10d %10d %10lu\n", i,
199 rtkm_phy_list[i].used, rtkm_phy_list[i].peak,
200 rtkm_phy_list[i].entries,
201 (rtkm_phy_list[i].entries) * (PAGE_SIZE << i));
202 a += rtkm_phy_list[i].used;
203 b += rtkm_phy_list[i].peak;
204 c += rtkm_phy_list[i].entries;
205 musage += (rtkm_phy_list[i].entries) * (PAGE_SIZE << i);
206 }
207 }
208 RTW_PRINT_SEL(sel, "%6s %10d %10d %10d %10lu\n", "sum", a, b, c,
209 musage);
210 }
211
rtkm_dump_mstatus(void * sel)212 void rtkm_dump_mstatus(void *sel)
213 {
214 dump_mstatus(sel);
215 }
216 EXPORT_SYMBOL(rtkm_dump_mstatus);
217
rtkm_set_trace(unsigned int mask)218 void rtkm_set_trace(unsigned int mask)
219 {
220 rtkm_trace = mask;
221 }
222 EXPORT_SYMBOL(rtkm_set_trace);
223
rb_insert_mem(struct phy_mem_list * mlist,struct mem_entry * entry)224 static void rb_insert_mem(struct phy_mem_list *mlist, struct mem_entry *entry)
225 {
226 struct rb_node **p = &mlist->rb_tree.rb_node;
227 struct rb_node *parent = NULL;
228 struct mem_entry *tmp = NULL;
229
230 while (*p) {
231 parent = *p;
232 tmp = rb_entry(parent, struct mem_entry, rbn);
233
234 if (tmp->data < entry->data)
235 p = &(*p)->rb_left;
236 else
237 p = &(*p)->rb_right;
238 }
239
240 rb_link_node(&entry->rbn, parent, p);
241 rb_insert_color(&entry->rbn, &mlist->rb_tree);
242 }
243
rb_find_mem(struct phy_mem_list * mlist,const void * objp)244 static struct mem_entry *rb_find_mem(struct phy_mem_list *mlist,
245 const void *objp)
246 {
247 struct rb_node *n = mlist->rb_tree.rb_node;
248 struct mem_entry *entry = NULL;
249
250 while (n) {
251 entry = rb_entry(n, struct mem_entry, rbn);
252
253 if (entry->data == objp)
254 return entry;
255 else if (entry->data < objp)
256 n = n->rb_left;
257 else
258 n = n->rb_right;
259 }
260
261 return NULL;
262 }
263
create_mem_entry(int order)264 static inline void *create_mem_entry(int order)
265 {
266 struct mem_entry *entry;
267
268 entry = _rtw_malloc(sizeof(struct mem_entry));
269 if (entry == NULL) {
270 pr_warn("%s: alloc memory entry fail!\n", __func__);
271 return NULL;
272 }
273 entry->order = order;
274 entry->is_use = _FALSE;
275 /* get memory by pages */
276 entry->data = (void *)__get_free_pages(
277 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL, entry->order);
278 if (entry->data == NULL) {
279 _rtw_mfree(entry, sizeof(struct mem_entry));
280 pr_warn("%s: alloc memory oreder-%d fail!\n", __func__, order);
281 return NULL;
282 }
283
284 return entry;
285 }
286
_kmalloc(size_t size,gfp_t flags,int clear)287 static inline void *_kmalloc(size_t size, gfp_t flags, int clear)
288 {
289 int order = 0;
290 int warn = _FALSE;
291 struct mem_entry *entry = NULL;
292
293 order = get_order(size);
294
295 if (rtkm_phy_list[order].entries == rtkm_phy_list[order].used) {
296 if (rtkm_mem_exp) {
297 warn = _TRUE;
298 pr_warn("%s: No enough order-%d pool\n", __func__,
299 order);
300 entry = create_mem_entry(order);
301 if (entry) {
302 _rtw_spinlock_bh(&rtkm_phy_list[order].lock);
303 list_add_tail(
304 &entry->list,
305 &rtkm_phy_list[entry->order].list);
306 rtkm_phy_list[entry->order].entries++;
307 _rtw_spinunlock_bh(&rtkm_phy_list[order].lock);
308 }
309 }
310
311 if (entry == NULL) {
312 pr_warn("%s: No more memory for size %ld\n", __func__,
313 size);
314 WARN_ON(1);
315 return NULL;
316 }
317 }
318
319 _rtw_spinlock_bh(&rtkm_phy_list[order].lock);
320 list_for_each_entry (entry, &rtkm_phy_list[order].list, list) {
321 if (entry->is_use == _FALSE) {
322 list_del_init(&entry->list);
323 entry->is_use = _TRUE;
324 entry->size = size;
325 if (clear == _TRUE)
326 memset(entry->data, 0, size);
327 stacktrace_save(entry);
328 rtkm_phy_list[order].used++;
329 list_add_tail(&entry->list, &rtkm_phy_list[order].list);
330 rb_insert_mem(&rtkm_phy_list[order], entry);
331 break;
332 }
333 }
334 if (rtkm_phy_list[order].peak < rtkm_phy_list[order].used)
335 rtkm_phy_list[order].peak = rtkm_phy_list[order].used;
336 _rtw_spinunlock_bh(&rtkm_phy_list[order].lock);
337
338 if ((warn) || TRACE_ORDER(rtkm_trace, order)) {
339 pr_info("%s: require(%p, %lu) usage(%d %u/%u)\n", __func__,
340 entry->data, entry->size, order,
341 rtkm_phy_list[order].used,
342 rtkm_phy_list[order].entries);
343 stacktrace_print(entry, MAX_STACK_TRACE, 0);
344 }
345
346 return entry->data;
347 }
348
_kfree(const void * objp,size_t size)349 static inline void _kfree(const void *objp, size_t size)
350 {
351 int found = _FALSE, order = 0;
352 struct mem_entry *entry;
353
354 order = get_order(size);
355
356 if (!list_empty(&rtkm_phy_list[order].list)) {
357 _rtw_spinlock_bh(&rtkm_phy_list[order].lock);
358
359 entry = rb_find_mem(&rtkm_phy_list[order], objp);
360 if (entry && (entry->is_use == _TRUE) &&
361 (entry->data == objp)) {
362 if (TRACE_ORDER(rtkm_trace, order)) {
363 pr_info("%s: release(%p, %lu)\n", __func__,
364 objp, size);
365 }
366 rtw_list_delete(&entry->list);
367 rtkm_phy_list[order].used--;
368 entry->is_use = _FALSE;
369 entry->size = 0;
370 rb_erase(&entry->rbn, &rtkm_phy_list[order].rb_tree);
371 list_add(&entry->list, &rtkm_phy_list[order].list);
372 found = _TRUE;
373 }
374
375 _rtw_spinunlock_bh(&rtkm_phy_list[order].lock);
376 }
377 if (found == _FALSE) {
378 pr_warn("%s: not found (%p, %lu)\n", __func__, objp, size);
379 }
380 }
381
rtkm_kmalloc(size_t size,gfp_t flags)382 void *rtkm_kmalloc(size_t size, gfp_t flags)
383 {
384 if (size > RTKM_MGMT_SIZE)
385 return _kmalloc(size, flags, _FALSE);
386 else
387 return kmalloc(size, flags);
388 }
389 EXPORT_SYMBOL(rtkm_kmalloc);
390
rtkm_kzalloc(size_t size,gfp_t flags)391 void *rtkm_kzalloc(size_t size, gfp_t flags)
392 {
393 if (size > RTKM_MGMT_SIZE)
394 return _kmalloc(size, flags, _TRUE);
395 else
396 return kzalloc(size, flags);
397 }
398 EXPORT_SYMBOL(rtkm_kzalloc);
399
rtkm_kfree(const void * objp,size_t size)400 void rtkm_kfree(const void *objp, size_t size)
401 {
402 if (size > RTKM_MGMT_SIZE)
403 return _kfree(objp, size);
404 else
405 return kfree(objp);
406 }
407 EXPORT_SYMBOL(rtkm_kfree);
408
rtkm_init_phy(void)409 static inline int rtkm_init_phy(void)
410 {
411 int ret = 0, i, j;
412 struct mem_entry *entry;
413
414 pr_info("%s", __func__);
415 pr_info("%s: memory expansion:%d\n", __func__, rtkm_mem_exp);
416
417 for (i = (MAX_ORDER - 1); i > -1; i--) {
418 INIT_LIST_HEAD(&rtkm_phy_list[i].list);
419 _rtw_spinlock_init(&rtkm_phy_list[i].lock);
420 rtkm_phy_list[i].rb_tree = RB_ROOT;
421
422 for (j = 0; (ret == 0) && (j < mpool[i]); j++) {
423 entry = create_mem_entry(i);
424 if (entry == NULL) {
425 ret = -ENOMEM;
426 break;
427 }
428
429 list_add_tail(&entry->list,
430 &rtkm_phy_list[entry->order].list);
431 rtkm_phy_list[entry->order].entries++;
432 }
433 }
434 if (ret == 0)
435 dump_mstatus(RTW_DBGDUMP);
436
437 return ret;
438 }
439
rtkm_destroy_phy(void)440 static inline void rtkm_destroy_phy(void)
441 {
442 int i = 0;
443 struct mem_entry *entry;
444
445 pr_info("%s", __func__);
446 dump_mstatus(RTW_DBGDUMP);
447
448 for (i = 0; i < MAX_ORDER; i++) {
449 if (rtkm_phy_list[i].used)
450 pr_err("%s: memory leak! order=%d num=%d\n", __func__,
451 i, rtkm_phy_list[i].used);
452
453 if (rtkm_phy_list[i].rb_tree.rb_node != NULL)
454 pr_err("%s: rb tree leak! order=%d\n", __func__, i);
455
456 while (!list_empty(&rtkm_phy_list[i].list)) {
457 entry = list_entry(rtkm_phy_list[i].list.next,
458 struct mem_entry, list);
459 list_del_init(&entry->list);
460 if (entry->is_use == _TRUE) {
461 rb_erase(&entry->rbn,
462 &rtkm_phy_list[i].rb_tree);
463 pr_err("%s: memory leak! (%p, %lu)\n", __func__,
464 entry->data, entry->size);
465 stacktrace_print(entry, MAX_STACK_TRACE, 0);
466 }
467 if (entry->data)
468 free_pages((unsigned long)(entry->data),
469 entry->order);
470 entry->data = NULL;
471 entry->size = 0;
472 entry->is_use = _FALSE;
473 _rtw_mfree(entry, sizeof(struct mem_entry));
474 entry = NULL;
475 rtkm_phy_list[i].entries--;
476 }
477 _rtw_spinlock_free(&rtkm_phy_list[i].lock);
478 }
479 }
480
rtkm_prealloc_init(void)481 int rtkm_prealloc_init(void)
482 {
483 int ret = 0;
484
485 pr_info("%s\n", __func__);
486
487 ret = rtkm_init_phy();
488 if (ret == -ENOMEM) {
489 pr_err("No enough memory for phiscal.");
490 rtkm_destroy_phy();
491 }
492
493 pr_info("%s: done ret=%d\n", __func__, ret);
494 return ret;
495 }
496 EXPORT_SYMBOL(rtkm_prealloc_init);
497
rtkm_prealloc_destroy(void)498 void rtkm_prealloc_destroy(void)
499 {
500 pr_info("%s\n", __func__);
501
502 rtkm_destroy_phy();
503
504 pr_info("%s: done\n", __func__);
505 }
506 EXPORT_SYMBOL(rtkm_prealloc_destroy);
507