xref: /OK3568_Linux_fs/kernel/drivers/soc/rockchip/minidump/minidump_log.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2023 Rockchip Electronics Co., Ltd.
5  */
6 
7 #include <linux/cache.h>
8 #include <linux/freezer.h>
9 #include <linux/bitops.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/kallsyms.h>
14 #include <linux/rbtree.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/thread_info.h>
18 #include <soc/rockchip/rk_minidump.h>
19 #include <asm/page.h>
20 #include <asm/memory.h>
21 #include <asm/sections.h>
22 #include <asm/stacktrace.h>
23 #include <linux/mm.h>
24 #include <linux/ratelimit.h>
25 #include <linux/notifier.h>
26 #include <linux/sizes.h>
27 #include <linux/sched/task.h>
28 #include <linux/suspend.h>
29 #include <linux/vmalloc.h>
30 #include <linux/android_debug_symbols.h>
31 #include <linux/elf.h>
32 #include <linux/seq_buf.h>
33 #include <linux/elfcore.h>
34 #include "minidump_private.h"
35 
36 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
37 #include <linux/bits.h>
38 #include <linux/sched/prio.h>
39 #include <asm/memory.h>
40 
41 #include "../../../kernel/sched/sched.h"
42 
43 #include <linux/kdebug.h>
44 #include <linux/thread_info.h>
45 #include <asm/ptrace.h>
46 #include <linux/uaccess.h>
47 #include <linux/percpu.h>
48 
49 #include <linux/module.h>
50 #include <linux/cma.h>
51 #include <linux/dma-map-ops.h>
52 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
53 #include <trace/hooks/debug.h>
54 #endif
55 #include "minidump_memory.h"
56 #endif	/* CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP */
57 
58 #ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
59 
60 #include <trace/events/sched.h>
61 
62 #ifdef CONFIG_VMAP_STACK
63 #define STACK_NUM_PAGES (THREAD_SIZE / PAGE_SIZE)
64 #else
65 #define STACK_NUM_PAGES 1
66 #endif	/* !CONFIG_VMAP_STACK */
67 
68 struct md_stack_cpu_data {
69 	int stack_mdidx[STACK_NUM_PAGES];
70 	struct md_region stack_mdr[STACK_NUM_PAGES];
71 } ____cacheline_aligned_in_smp;
72 
73 static int md_current_stack_init __read_mostly;
74 
75 static DEFINE_PER_CPU_SHARED_ALIGNED(struct md_stack_cpu_data, md_stack_data);
76 
77 struct md_suspend_context_data {
78 	int task_mdno;
79 	int stack_mdidx[STACK_NUM_PAGES];
80 	struct md_region stack_mdr[STACK_NUM_PAGES];
81 	struct md_region task_mdr;
82 	bool init;
83 };
84 
85 static struct md_suspend_context_data md_suspend_context;
86 #endif	/* CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK */
87 
88 static bool is_vmap_stack __read_mostly;
89 
90 #ifdef CONFIG_ROCKCHIP_MINIDUMP_FTRACE
91 #include <trace/hooks/ftrace_dump.h>
92 #include <linux/ring_buffer.h>
93 
94 #define MD_FTRACE_BUF_SIZE	SZ_2M
95 
96 static char *md_ftrace_buf_addr;
97 static size_t md_ftrace_buf_current;
98 static bool minidump_ftrace_in_oops;
99 static bool minidump_ftrace_dump = true;
100 #endif
101 
102 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
103 /* Rnqueue information */
104 #define MD_RUNQUEUE_PAGES	8
105 
106 static bool md_in_oops_handler;
107 static struct seq_buf *md_runq_seq_buf;
108 static int md_align_offset;
109 
110 /* CPU context information */
111 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
112 #define MD_CPU_CNTXT_PAGES	32
113 
114 static int die_cpu = -1;
115 static struct seq_buf *md_cntxt_seq_buf;
116 #endif
117 
118 /* Meminfo */
119 static struct seq_buf *md_meminfo_seq_buf;
120 
121 /* Slabinfo */
122 #ifdef CONFIG_SLUB_DEBUG
123 static struct seq_buf *md_slabinfo_seq_buf;
124 #endif
125 
126 #ifdef CONFIG_PAGE_OWNER
127 size_t md_pageowner_dump_size = SZ_2M;
128 char *md_pageowner_dump_addr;
129 #endif
130 
131 #ifdef CONFIG_SLUB_DEBUG
132 size_t md_slabowner_dump_size = SZ_2M;
133 char *md_slabowner_dump_addr;
134 #endif
135 
136 size_t md_dma_buf_info_size = SZ_256K;
137 char *md_dma_buf_info_addr;
138 
139 size_t md_dma_buf_procs_size = SZ_256K;
140 char *md_dma_buf_procs_addr;
141 
142 /* Modules information */
143 #ifdef CONFIG_MODULES
144 #define MD_MODULE_PAGES	  8
145 static struct seq_buf *md_mod_info_seq_buf;
146 static DEFINE_SPINLOCK(md_modules_lock);
147 #endif	/* CONFIG_MODULES */
148 #endif
149 
150 static struct md_region note_md_entry;
151 static DEFINE_PER_CPU_SHARED_ALIGNED(struct elf_prstatus *, cpu_epr);
152 
register_stack_entry(struct md_region * ksp_entry,u64 sp,u64 size)153 static int register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size)
154 {
155 	struct page *sp_page;
156 	int entry;
157 
158 	ksp_entry->virt_addr = sp;
159 	ksp_entry->size = size;
160 	if (is_vmap_stack) {
161 		sp_page = vmalloc_to_page((const void *) sp);
162 		ksp_entry->phys_addr = page_to_phys(sp_page);
163 	} else {
164 		ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
165 	}
166 
167 	entry = rk_minidump_add_region(ksp_entry);
168 	if (entry < 0)
169 		pr_err("Failed to add stack of entry %s in Minidump\n",
170 				ksp_entry->name);
171 	return entry;
172 }
173 
174 #ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
register_kernel_sections(void)175 static void register_kernel_sections(void)
176 {
177 	struct md_region ksec_entry;
178 	char *data_name = "KDATABSS";
179 	char *rodata_name = "KROAIDATA";
180 	size_t static_size;
181 	void __percpu *base;
182 	unsigned int cpu;
183 	void *_sdata, *__bss_stop;
184 	void *start_ro, *end_ro;
185 
186 	_sdata = android_debug_symbol(ADS_SDATA);
187 	__bss_stop = android_debug_symbol(ADS_BSS_END);
188 	base = android_debug_symbol(ADS_PER_CPU_START);
189 	static_size = (size_t)(android_debug_symbol(ADS_PER_CPU_END) - base);
190 
191 	strscpy(ksec_entry.name, data_name, sizeof(ksec_entry.name));
192 	ksec_entry.virt_addr = (u64)_sdata;
193 	ksec_entry.phys_addr = virt_to_phys(_sdata);
194 	ksec_entry.size = roundup((__bss_stop - _sdata), 4);
195 	if (rk_minidump_add_region(&ksec_entry) < 0)
196 		pr_err("Failed to add data section in Minidump\n");
197 
198 	start_ro = android_debug_symbol(ADS_START_RO_AFTER_INIT);
199 	end_ro = android_debug_symbol(ADS_END_RO_AFTER_INIT);
200 	strscpy(ksec_entry.name, rodata_name, sizeof(ksec_entry.name));
201 	ksec_entry.virt_addr = (uintptr_t)start_ro;
202 	ksec_entry.phys_addr = virt_to_phys(start_ro);
203 	ksec_entry.size = roundup((end_ro - start_ro), 4);
204 	if (rk_minidump_add_region(&ksec_entry) < 0)
205 		pr_err("Failed to add rodata section in Minidump\n");
206 
207 	/* Add percpu static sections */
208 	for_each_possible_cpu(cpu) {
209 		void *start = per_cpu_ptr(base, cpu);
210 
211 		memset(&ksec_entry, 0, sizeof(ksec_entry));
212 		scnprintf(ksec_entry.name, sizeof(ksec_entry.name),
213 			"KSPERCPU%d", cpu);
214 		ksec_entry.virt_addr = (uintptr_t)start;
215 		ksec_entry.phys_addr = per_cpu_ptr_to_phys(start);
216 		ksec_entry.size = static_size;
217 		if (rk_minidump_add_region(&ksec_entry) < 0)
218 			pr_err("Failed to add percpu sections in Minidump\n");
219 	}
220 }
221 #endif
222 
in_stack_range(u64 sp,u64 base_addr,unsigned int stack_size)223 static inline bool in_stack_range(
224 		u64 sp, u64 base_addr, unsigned int stack_size)
225 {
226 	u64 min_addr = base_addr;
227 	u64 max_addr = base_addr + stack_size;
228 
229 	return (min_addr <= sp && sp < max_addr);
230 }
231 
calculate_copy_pages(u64 sp,struct vm_struct * stack_area)232 static unsigned int calculate_copy_pages(u64 sp, struct vm_struct *stack_area)
233 {
234 	u64 tsk_stack_base = (u64) stack_area->addr;
235 	u64 offset;
236 	unsigned int stack_pages, copy_pages;
237 
238 	if (in_stack_range(sp, tsk_stack_base, get_vm_area_size(stack_area))) {
239 		offset = sp - tsk_stack_base;
240 		stack_pages = get_vm_area_size(stack_area) / PAGE_SIZE;
241 		copy_pages = stack_pages - (offset / PAGE_SIZE);
242 	} else {
243 		copy_pages = 0;
244 	}
245 	return copy_pages;
246 }
247 
dump_stack_minidump(u64 sp)248 void dump_stack_minidump(u64 sp)
249 {
250 	struct md_region ksp_entry, ktsk_entry;
251 	u32 cpu = smp_processor_id();
252 	struct vm_struct *stack_vm_area;
253 	unsigned int i, copy_pages;
254 
255 	if (IS_ENABLED(CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK))
256 		return;
257 
258 	if (is_idle_task(current))
259 		return;
260 
261 	is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);
262 
263 	if (sp < KIMAGE_VADDR || sp > -256UL)
264 		sp = current_stack_pointer;
265 
266 	/*
267 	 * Since stacks are now allocated with vmalloc, the translation to
268 	 * physical address is not a simple linear transformation like it is
269 	 * for kernel logical addresses, since vmalloc creates a virtual
270 	 * mapping. Thus, virt_to_phys() should not be used in this context;
271 	 * instead the page table must be walked to acquire the physical
272 	 * address of one page of the stack.
273 	 */
274 	stack_vm_area = task_stack_vm_area(current);
275 	if (is_vmap_stack) {
276 		sp &= ~(PAGE_SIZE - 1);
277 		copy_pages = calculate_copy_pages(sp, stack_vm_area);
278 		for (i = 0; i < copy_pages; i++) {
279 			scnprintf(ksp_entry.name, sizeof(ksp_entry.name),
280 				  "KSTACK%d_%d", cpu, i);
281 			(void)register_stack_entry(&ksp_entry, sp, PAGE_SIZE);
282 			sp += PAGE_SIZE;
283 		}
284 	} else {
285 		sp &= ~(THREAD_SIZE - 1);
286 		scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d",
287 			  cpu);
288 		(void)register_stack_entry(&ksp_entry, sp, THREAD_SIZE);
289 	}
290 
291 	scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu);
292 	ktsk_entry.virt_addr = (u64)current;
293 	ktsk_entry.phys_addr = virt_to_phys((uintptr_t *)current);
294 	ktsk_entry.size = sizeof(struct task_struct);
295 	if (rk_minidump_add_region(&ktsk_entry) < 0)
296 		pr_err("Failed to add current task %d in Minidump\n", cpu);
297 }
298 
299 #ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
update_stack_entry(struct md_region * ksp_entry,u64 sp,int mdno)300 static void update_stack_entry(struct md_region *ksp_entry, u64 sp,
301 			       int mdno)
302 {
303 	struct page *sp_page;
304 
305 	ksp_entry->virt_addr = sp;
306 	if (likely(is_vmap_stack)) {
307 		sp_page = vmalloc_to_page((const void *) sp);
308 		ksp_entry->phys_addr = page_to_phys(sp_page);
309 	} else {
310 		ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
311 	}
312 	if (rk_minidump_update_region(mdno, ksp_entry) < 0) {
313 		pr_err_ratelimited(
314 			"Failed to update stack entry %s in minidump\n",
315 			ksp_entry->name);
316 	}
317 }
318 
register_vmapped_stack(struct md_region * mdr,int * mdno,u64 sp,char * name_str,bool update)319 static void register_vmapped_stack(struct md_region *mdr, int *mdno,
320 				   u64 sp, char *name_str, bool update)
321 {
322 	int i;
323 
324 	sp &= ~(PAGE_SIZE - 1);
325 	for (i = 0; i < STACK_NUM_PAGES; i++) {
326 		if (unlikely(!update)) {
327 			scnprintf(mdr->name, sizeof(mdr->name), "%s_%d",
328 					  name_str, i);
329 			*mdno = register_stack_entry(mdr, sp, PAGE_SIZE);
330 		} else {
331 			update_stack_entry(mdr, sp, *mdno);
332 		}
333 		sp += PAGE_SIZE;
334 		mdr++;
335 		mdno++;
336 	}
337 }
338 
register_normal_stack(struct md_region * mdr,int * mdno,u64 sp,char * name_str,bool update)339 static void register_normal_stack(struct md_region *mdr, int *mdno,
340 				  u64 sp, char *name_str, bool update)
341 {
342 	sp &= ~(THREAD_SIZE - 1);
343 	if (unlikely(!update)) {
344 		scnprintf(mdr->name, sizeof(mdr->name), name_str);
345 		*mdno = register_stack_entry(mdr, sp, THREAD_SIZE);
346 	} else {
347 		update_stack_entry(mdr, sp, *mdno);
348 	}
349 }
350 
update_md_stack(struct md_region * stack_mdr,int * stack_mdno,u64 sp)351 static void update_md_stack(struct md_region *stack_mdr,
352 			    int *stack_mdno, u64 sp)
353 {
354 	unsigned int i;
355 	int *mdno;
356 
357 	if (likely(is_vmap_stack)) {
358 		for (i = 0; i < STACK_NUM_PAGES; i++) {
359 			mdno = stack_mdno + i;
360 			if (unlikely(*mdno < 0))
361 				return;
362 		}
363 		register_vmapped_stack(stack_mdr, stack_mdno, sp, NULL, true);
364 	} else {
365 		if (unlikely(*stack_mdno < 0))
366 			return;
367 		register_normal_stack(stack_mdr, stack_mdno, sp, NULL, true);
368 	}
369 }
370 
update_md_cpu_stack(u32 cpu,u64 sp)371 static void update_md_cpu_stack(u32 cpu, u64 sp)
372 {
373 	struct md_stack_cpu_data *md_stack_cpu_d = &per_cpu(md_stack_data, cpu);
374 
375 	if (!md_current_stack_init)
376 		return;
377 
378 	update_md_stack(md_stack_cpu_d->stack_mdr,
379 			md_stack_cpu_d->stack_mdidx, sp);
380 }
381 
md_current_stack_notifer(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next)382 static void md_current_stack_notifer(void *ignore, bool preempt,
383 		struct task_struct *prev, struct task_struct *next)
384 {
385 	u32 cpu = task_cpu(next);
386 	u64 sp = (u64)next->stack;
387 
388 	update_md_cpu_stack(cpu, sp);
389 }
390 
md_current_stack_ipi_handler(void * data)391 static void md_current_stack_ipi_handler(void *data)
392 {
393 	u32 cpu = smp_processor_id();
394 	struct vm_struct *stack_vm_area;
395 	u64 sp = current_stack_pointer;
396 
397 	if (is_idle_task(current))
398 		return;
399 	if (likely(is_vmap_stack)) {
400 		stack_vm_area = task_stack_vm_area(current);
401 		sp = (u64)stack_vm_area->addr;
402 	}
403 	update_md_cpu_stack(cpu, sp);
404 }
405 
update_md_current_task(struct md_region * mdr,int mdno)406 static void update_md_current_task(struct md_region *mdr, int mdno)
407 {
408 	mdr->virt_addr = (u64)current;
409 	mdr->phys_addr = virt_to_phys((uintptr_t *)current);
410 	if (rk_minidump_update_region(mdno, mdr) < 0)
411 		pr_err("Failed to update %s current task in minidump\n",
412 			   mdr->name);
413 }
414 
update_md_suspend_current_stack(void)415 static void update_md_suspend_current_stack(void)
416 {
417 	u64 sp = current_stack_pointer;
418 	struct vm_struct *stack_vm_area;
419 
420 	if (likely(is_vmap_stack)) {
421 		stack_vm_area = task_stack_vm_area(current);
422 		sp = (u64)stack_vm_area->addr;
423 	}
424 	update_md_stack(md_suspend_context.stack_mdr,
425 			md_suspend_context.stack_mdidx, sp);
426 }
427 
update_md_suspend_current_task(void)428 static void update_md_suspend_current_task(void)
429 {
430 	if (unlikely(md_suspend_context.task_mdno < 0))
431 		return;
432 	update_md_current_task(&md_suspend_context.task_mdr,
433 			md_suspend_context.task_mdno);
434 }
435 
update_md_suspend_currents(void)436 static void update_md_suspend_currents(void)
437 {
438 	if (!md_suspend_context.init)
439 		return;
440 	update_md_suspend_current_stack();
441 	update_md_suspend_current_task();
442 }
443 
register_current_stack(void)444 static void register_current_stack(void)
445 {
446 	int cpu;
447 	u64 sp = current_stack_pointer;
448 	struct md_stack_cpu_data *md_stack_cpu_d;
449 	struct vm_struct *stack_vm_area;
450 	char name_str[MD_MAX_NAME_LENGTH];
451 
452 	/*
453 	 * Since stacks are now allocated with vmalloc, the translation to
454 	 * physical address is not a simple linear transformation like it is
455 	 * for kernel logical addresses, since vmalloc creates a virtual
456 	 * mapping. Thus, virt_to_phys() should not be used in this context;
457 	 * instead the page table must be walked to acquire the physical
458 	 * address of all pages of the stack.
459 	 */
460 	if (likely(is_vmap_stack)) {
461 		stack_vm_area = task_stack_vm_area(current);
462 		sp = (u64)stack_vm_area->addr;
463 	}
464 	for_each_possible_cpu(cpu) {
465 		/*
466 		 * Let's register dummies for now,
467 		 * once system up and running, let the cpu update its currents.
468 		 */
469 		md_stack_cpu_d = &per_cpu(md_stack_data, cpu);
470 		scnprintf(name_str, sizeof(name_str), "KSTACK%d", cpu);
471 		if (is_vmap_stack)
472 			register_vmapped_stack(md_stack_cpu_d->stack_mdr,
473 				md_stack_cpu_d->stack_mdidx, sp,
474 				name_str, false);
475 		else
476 			register_normal_stack(md_stack_cpu_d->stack_mdr,
477 				md_stack_cpu_d->stack_mdidx, sp,
478 				name_str, false);
479 	}
480 
481 	register_trace_sched_switch(md_current_stack_notifer, NULL);
482 	md_current_stack_init = 1;
483 	smp_call_function(md_current_stack_ipi_handler, NULL, 1);
484 }
485 
register_suspend_stack(void)486 static void register_suspend_stack(void)
487 {
488 	char name_str[MD_MAX_NAME_LENGTH];
489 	u64 sp = current_stack_pointer;
490 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
491 
492 	scnprintf(name_str, sizeof(name_str), "KSUSPSTK");
493 	if (is_vmap_stack) {
494 		sp = (u64)stack_vm_area->addr;
495 		register_vmapped_stack(md_suspend_context.stack_mdr,
496 				md_suspend_context.stack_mdidx,
497 				sp, name_str, false);
498 	} else {
499 		register_normal_stack(md_suspend_context.stack_mdr,
500 			md_suspend_context.stack_mdidx,
501 			sp, name_str, false);
502 	}
503 }
504 
register_current_task(struct md_region * mdr,int * mdno,char * name_str)505 static void register_current_task(struct md_region *mdr, int *mdno,
506 				  char *name_str)
507 {
508 	scnprintf(mdr->name, sizeof(mdr->name), name_str);
509 	mdr->virt_addr = (u64)current;
510 	mdr->phys_addr = virt_to_phys((uintptr_t *)current);
511 	mdr->size = sizeof(struct task_struct);
512 	*mdno = rk_minidump_add_region(mdr);
513 	if (*mdno < 0)
514 		pr_err("Failed to add current task %s in Minidump\n",
515 		       mdr->name);
516 }
517 
register_suspend_current_task(void)518 static void register_suspend_current_task(void)
519 {
520 	char name_str[MD_MAX_NAME_LENGTH];
521 
522 	scnprintf(name_str, sizeof(name_str), "KSUSPTASK");
523 	register_current_task(&md_suspend_context.task_mdr,
524 			&md_suspend_context.task_mdno, name_str);
525 }
526 
527 #if !defined(MODULE) && defined(CONFIG_ARM64)
register_irq_stacks(void)528 static void register_irq_stacks(void)
529 {
530 	struct md_region md_entry;
531 	int cpu, ret;
532 	struct page *sp_page;
533 
534 	for_each_possible_cpu(cpu) {
535 		scnprintf(md_entry.name, sizeof(md_entry.name), "KIRQSTACK%d", cpu);
536 		md_entry.virt_addr = (u64)per_cpu(irq_stack_ptr, cpu);
537 
538 		if (is_vmap_stack) {
539 			sp_page = vmalloc_to_page((const void *) md_entry.virt_addr);
540 			md_entry.phys_addr = page_to_phys(sp_page);
541 		} else {
542 			md_entry.phys_addr = virt_to_phys((const volatile void *)md_entry.virt_addr);
543 		}
544 
545 		md_entry.size = IRQ_STACK_SIZE;
546 		ret = rk_minidump_add_region(&md_entry);
547 		if (ret < 0)
548 			pr_err("Failed to add %s entry in Minidump\n", md_entry.name);
549 	}
550 }
551 #else
register_irq_stacks(void)552 static inline void register_irq_stacks(void)
553 {
554 }
555 #endif
556 
minidump_pm_notifier(struct notifier_block * nb,unsigned long event,void * unused)557 static int minidump_pm_notifier(struct notifier_block *nb,
558 				unsigned long event, void *unused)
559 {
560 	switch (event) {
561 	case PM_SUSPEND_PREPARE:
562 		update_md_suspend_currents();
563 		break;
564 	}
565 	return NOTIFY_DONE;
566 }
567 
568 static struct notifier_block minidump_pm_nb = {
569 	.notifier_call = minidump_pm_notifier,
570 };
571 
register_suspend_context(void)572 static void register_suspend_context(void)
573 {
574 	register_suspend_stack();
575 	register_suspend_current_task();
576 	register_pm_notifier(&minidump_pm_nb);
577 	md_suspend_context.init = true;
578 }
579 #endif	/* CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK */
580 
append_elf_note(Elf_Word * buf,char * name,unsigned int type,size_t data_len)581 static Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
582 			  size_t data_len)
583 {
584 	struct elf_note *note = (struct elf_note *)buf;
585 
586 	note->n_namesz = strlen(name) + 1;
587 	note->n_descsz = data_len;
588 	note->n_type   = type;
589 	buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf_Word));
590 	memcpy(buf, name, note->n_namesz);
591 	buf += DIV_ROUND_UP(note->n_namesz, sizeof(Elf_Word));
592 	return buf;
593 }
594 
register_note_section(void)595 static void register_note_section(void)
596 {
597 	int ret = 0, i = 0;
598 	size_t data_len;
599 	Elf_Word *buf;
600 	void *buffer_start;
601 	struct elf_prstatus *epr;
602 	struct md_region *mdr = &note_md_entry;
603 
604 	buffer_start = kzalloc(PAGE_SIZE, GFP_KERNEL);
605 	if (!buffer_start)
606 		return;
607 
608 	memcpy(mdr->name, "note", 5);
609 	mdr->virt_addr = (uintptr_t)buffer_start;
610 	mdr->phys_addr = virt_to_phys(buffer_start);
611 
612 	buf = (Elf_Word *)mdr->virt_addr;
613 	data_len = sizeof(struct elf_prstatus);
614 	for_each_possible_cpu(i) {
615 		buf = append_elf_note(buf, "CORE", NT_PRSTATUS, data_len);
616 		epr = (struct elf_prstatus *)buf;
617 		epr->pr_pid = i;
618 		per_cpu(cpu_epr, i) = epr;
619 		buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
620 	}
621 
622 	mdr->size = (u64)buf - mdr->virt_addr;
623 	rk_md_flush_dcache_area((void *)mdr->virt_addr, mdr->size);
624 	ret = rk_minidump_add_region(mdr);
625 	if (ret < 0)
626 		pr_err("Failed to add %s entry in Minidump\n", mdr->name);
627 }
628 
rk_minidump_update_cpu_regs(struct pt_regs * regs)629 void rk_minidump_update_cpu_regs(struct pt_regs *regs)
630 {
631 	int cpu = raw_smp_processor_id();
632 	struct elf_prstatus *epr = per_cpu(cpu_epr, cpu);
633 
634 	if (!epr)
635 		return;
636 
637 	memcpy((void *)&epr->pr_reg, (void *)regs, sizeof(elf_gregset_t));
638 	rk_md_flush_dcache_area((void *)&epr->pr_reg, sizeof(elf_gregset_t));
639 	rk_md_flush_dcache_area((void *)(regs->sp & ~(PAGE_SIZE - 1)), PAGE_SIZE);
640 }
641 EXPORT_SYMBOL(rk_minidump_update_cpu_regs);
642 
643 #ifdef CONFIG_ROCKCHIP_MINIDUMP_FTRACE
minidump_add_trace_event(char * buf,size_t size)644 static void minidump_add_trace_event(char *buf, size_t size)
645 {
646 	char *addr;
647 
648 	if (!READ_ONCE(md_ftrace_buf_addr) ||
649 	    (size > (size_t)MD_FTRACE_BUF_SIZE))
650 		return;
651 
652 	if ((md_ftrace_buf_current + size) > (size_t)MD_FTRACE_BUF_SIZE)
653 		md_ftrace_buf_current = 0;
654 	addr = md_ftrace_buf_addr + md_ftrace_buf_current;
655 	memcpy(addr, buf, size);
656 	md_ftrace_buf_current += size;
657 }
658 
md_trace_oops_enter(void * unused,bool * enter_check)659 static void md_trace_oops_enter(void *unused, bool *enter_check)
660 {
661 	if (!minidump_ftrace_in_oops) {
662 		minidump_ftrace_in_oops = true;
663 		*enter_check = false;
664 	} else {
665 		*enter_check = true;
666 	}
667 }
668 
md_trace_oops_exit(void * unused,bool * exit_check)669 static void md_trace_oops_exit(void *unused, bool *exit_check)
670 {
671 	minidump_ftrace_in_oops = false;
672 }
673 
md_update_trace_fmt(void * unused,bool * format_check)674 static void md_update_trace_fmt(void *unused, bool *format_check)
675 {
676 	*format_check = false;
677 }
678 
md_buf_size_check(void * unused,unsigned long buffer_size,bool * size_check)679 static void md_buf_size_check(void *unused, unsigned long buffer_size,
680 			      bool *size_check)
681 {
682 	if (!minidump_ftrace_dump) {
683 		*size_check = true;
684 		return;
685 	}
686 
687 	if (buffer_size > (SZ_256K + PAGE_SIZE)) {
688 		pr_err("Skip md ftrace buffer dump for: %#lx\n", buffer_size);
689 		minidump_ftrace_dump = false;
690 		*size_check = true;
691 	}
692 }
693 
md_dump_trace_buf(void * unused,struct trace_seq * trace_buf,bool * printk_check)694 static void md_dump_trace_buf(void *unused, struct trace_seq *trace_buf,
695 			      bool *printk_check)
696 {
697 	if (minidump_ftrace_in_oops && minidump_ftrace_dump) {
698 		minidump_add_trace_event(trace_buf->buffer,
699 					 trace_buf->seq.len);
700 		*printk_check = false;
701 	}
702 }
703 
md_register_trace_buf(void)704 static void md_register_trace_buf(void)
705 {
706 	struct md_region md_entry;
707 	void *buffer_start;
708 
709 	buffer_start = kzalloc(MD_FTRACE_BUF_SIZE, GFP_KERNEL);
710 
711 	if (!buffer_start)
712 		return;
713 
714 	strscpy(md_entry.name, "KFTRACE", sizeof(md_entry.name));
715 	md_entry.virt_addr = (uintptr_t)buffer_start;
716 	md_entry.phys_addr = virt_to_phys(buffer_start);
717 	md_entry.size = MD_FTRACE_BUF_SIZE;
718 	if (rk_minidump_add_region(&md_entry) < 0)
719 		pr_err("Failed to add ftrace buffer entry in Minidump\n");
720 
721 	register_trace_android_vh_ftrace_oops_enter(md_trace_oops_enter,
722 							 NULL);
723 	register_trace_android_vh_ftrace_oops_exit(md_trace_oops_exit,
724 							 NULL);
725 	register_trace_android_vh_ftrace_size_check(md_buf_size_check,
726 						    NULL);
727 	register_trace_android_vh_ftrace_format_check(md_update_trace_fmt,
728 						      NULL);
729 	register_trace_android_vh_ftrace_dump_buffer(md_dump_trace_buf,
730 						     NULL);
731 
732 	/* Complete registration before adding entries */
733 	smp_mb();
734 	WRITE_ONCE(md_ftrace_buf_addr, buffer_start);
735 }
736 #endif
737 
738 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
md_dump_align(void)739 static void md_dump_align(void)
740 {
741 	int tab_offset = md_align_offset;
742 
743 	while (tab_offset--)
744 		seq_buf_printf(md_runq_seq_buf, " | ");
745 	seq_buf_printf(md_runq_seq_buf, " |--");
746 }
747 
md_dump_task_info(struct task_struct * task,char * status,struct task_struct * curr)748 static void md_dump_task_info(struct task_struct *task, char *status,
749 			      struct task_struct *curr)
750 {
751 	struct sched_entity *se;
752 
753 	md_dump_align();
754 	if (!task) {
755 		seq_buf_printf(md_runq_seq_buf, "%s : None(0)\n", status);
756 		return;
757 	}
758 
759 	se = &task->se;
760 	if (task == curr) {
761 		seq_buf_printf(md_runq_seq_buf,
762 			       "[status: curr] pid: %d comm: %s preempt: %#llx\n",
763 			       task_pid_nr(task), task->comm,
764 			       (u64)task->thread_info.preempt_count);
765 		return;
766 	}
767 
768 	seq_buf_printf(md_runq_seq_buf,
769 		       "[status: %s] pid: %d tsk: %#lx comm: %s stack: %#lx",
770 		       status, task_pid_nr(task),
771 		       (unsigned long)task,
772 		       task->comm,
773 		       (unsigned long)task->stack);
774 	seq_buf_printf(md_runq_seq_buf,
775 		       " prio: %d aff: %*pb",
776 		       task->prio, cpumask_pr_args(&task->cpus_mask));
777 #ifdef CONFIG_SCHED_WALT
778 	seq_buf_printf(md_runq_seq_buf, " enq: %lu wake: %lu sleep: %lu",
779 		       task->wts.last_enqueued_ts, task->wts.last_wake_ts,
780 		       task->wts.last_sleep_ts);
781 #endif
782 	seq_buf_printf(md_runq_seq_buf,
783 		       " vrun: %lu arr: %lu sum_ex: %lu\n",
784 		       (unsigned long)se->vruntime,
785 		       (unsigned long)se->exec_start,
786 		       (unsigned long)se->sum_exec_runtime);
787 }
788 
789 static void md_dump_cfs_rq(struct cfs_rq *cfs, struct task_struct *curr);
790 
md_dump_cgroup_state(char * status,struct sched_entity * se_p,struct task_struct * curr)791 static void md_dump_cgroup_state(char *status, struct sched_entity *se_p,
792 				 struct task_struct *curr)
793 {
794 	struct task_struct *task;
795 	struct cfs_rq *my_q = NULL;
796 	unsigned int nr_running;
797 
798 	if (!se_p) {
799 		md_dump_task_info(NULL, status, NULL);
800 		return;
801 	}
802 #ifdef CONFIG_FAIR_GROUP_SCHED
803 	my_q = se_p->my_q;
804 #endif
805 	if (!my_q) {
806 		task = container_of(se_p, struct task_struct, se);
807 		md_dump_task_info(task, status, curr);
808 		return;
809 	}
810 	nr_running = my_q->nr_running;
811 	md_dump_align();
812 	seq_buf_printf(md_runq_seq_buf, "%s: %d process is grouping\n",
813 				   status, nr_running);
814 	md_align_offset++;
815 	md_dump_cfs_rq(my_q, curr);
816 	md_align_offset--;
817 }
818 
md_dump_cfs_node_func(struct rb_node * node,struct task_struct * curr)819 static void md_dump_cfs_node_func(struct rb_node *node,
820 				  struct task_struct *curr)
821 {
822 	struct sched_entity *se_p = container_of(node, struct sched_entity,
823 						 run_node);
824 
825 	md_dump_cgroup_state("pend", se_p, curr);
826 }
827 
md_rb_walk_cfs(struct rb_root_cached * rb_root_cached_p,struct task_struct * curr)828 static void md_rb_walk_cfs(struct rb_root_cached *rb_root_cached_p,
829 			   struct task_struct *curr)
830 {
831 	int max_walk = 200;	/* Bail out, in case of loop */
832 	struct rb_node *leftmost = rb_root_cached_p->rb_leftmost;
833 	struct rb_root *root = &rb_root_cached_p->rb_root;
834 	struct rb_node *rb_node = rb_first(root);
835 
836 	if (!leftmost)
837 		return;
838 	while (rb_node && max_walk--) {
839 		md_dump_cfs_node_func(rb_node, curr);
840 		rb_node = rb_next(rb_node);
841 	}
842 }
843 
md_dump_cfs_rq(struct cfs_rq * cfs,struct task_struct * curr)844 static void md_dump_cfs_rq(struct cfs_rq *cfs, struct task_struct *curr)
845 {
846 	struct rb_root_cached *rb_root_cached_p = &cfs->tasks_timeline;
847 
848 	md_dump_cgroup_state("curr", cfs->curr, curr);
849 	md_dump_cgroup_state("next", cfs->next, curr);
850 	md_dump_cgroup_state("last", cfs->last, curr);
851 	md_dump_cgroup_state("skip", cfs->skip, curr);
852 	md_rb_walk_cfs(rb_root_cached_p, curr);
853 }
854 
md_dump_rt_rq(struct rt_rq * rt_rq,struct task_struct * curr)855 static void md_dump_rt_rq(struct rt_rq  *rt_rq, struct task_struct *curr)
856 {
857 	struct rt_prio_array *array = &rt_rq->active;
858 	struct sched_rt_entity *rt_se;
859 	int idx;
860 
861 	/* Lifted most of the below code from dump_throttled_rt_tasks() */
862 	if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
863 		return;
864 
865 	idx = sched_find_first_bit(array->bitmap);
866 	while (idx < MAX_RT_PRIO) {
867 		list_for_each_entry(rt_se, array->queue + idx, run_list) {
868 			struct task_struct *p;
869 
870 #ifdef CONFIG_RT_GROUP_SCHED
871 			if (rt_se->my_q)
872 				continue;
873 #endif
874 
875 			p = container_of(rt_se, struct task_struct, rt);
876 			md_dump_task_info(p, "pend", curr);
877 		}
878 		idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
879 	}
880 }
881 
md_dump_runqueues(void)882 static void md_dump_runqueues(void)
883 {
884 	int cpu;
885 	struct rq *rq;
886 	struct rt_rq  *rt;
887 	struct cfs_rq *cfs;
888 
889 	if (!md_runq_seq_buf)
890 		return;
891 
892 	for_each_possible_cpu(cpu) {
893 		rq = cpu_rq(cpu);
894 		rt = &rq->rt;
895 		cfs = &rq->cfs;
896 		seq_buf_printf(md_runq_seq_buf,
897 			       "CPU%d %d process is running\n",
898 			       cpu, rq->nr_running);
899 		md_dump_task_info(cpu_curr(cpu), "curr", NULL);
900 		seq_buf_printf(md_runq_seq_buf,
901 			       "CFS %d process is pending\n",
902 			       cfs->nr_running);
903 		md_dump_cfs_rq(cfs, cpu_curr(cpu));
904 		seq_buf_printf(md_runq_seq_buf,
905 			       "RT %d process is pending\n",
906 			       rt->rt_nr_running);
907 		md_dump_rt_rq(rt, cpu_curr(cpu));
908 		seq_buf_printf(md_runq_seq_buf, "\n");
909 	}
910 
911 	rk_md_flush_dcache_area((void *)md_runq_seq_buf->buffer, md_runq_seq_buf->len);
912 }
913 
914 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
915 /*
916  * dump a block of kernel memory from around the given address.
917  * Bulk of the code is lifted from arch/arm64/kernel/process.c.
918  */
md_dump_data(unsigned long addr,int nbytes,const char * name)919 static void md_dump_data(unsigned long addr, int nbytes, const char *name)
920 {
921 	int	i, j;
922 	int	nlines;
923 	u32	*p;
924 
925 	/*
926 	 * don't attempt to dump non-kernel addresses or
927 	 * values that are probably just small negative numbers
928 	 */
929 	if (addr < PAGE_OFFSET || addr > -256UL)
930 		return;
931 
932 	seq_buf_printf(md_cntxt_seq_buf, "\n%s: %#lx:\n", name, addr);
933 
934 	/*
935 	 * round address down to a 32 bit boundary
936 	 * and always dump a multiple of 32 bytes
937 	 */
938 	p = (u32 *)(addr & ~(sizeof(u32) - 1));
939 	nbytes += (addr & (sizeof(u32) - 1));
940 	nlines = (nbytes + 31) / 32;
941 
942 	for (i = 0; i < nlines; i++) {
943 		/*
944 		 * just display low 16 bits of address to keep
945 		 * each line of the dump < 80 characters
946 		 */
947 		seq_buf_printf(md_cntxt_seq_buf, "%04lx ",
948 			       (unsigned long)p & 0xffff);
949 		for (j = 0; j < 8; j++) {
950 			u32	data = 0;
951 
952 			if (get_kernel_nofault(data, p))
953 				seq_buf_printf(md_cntxt_seq_buf, " ********");
954 			else
955 				seq_buf_printf(md_cntxt_seq_buf, " %08x", data);
956 			++p;
957 		}
958 		seq_buf_printf(md_cntxt_seq_buf, "\n");
959 	}
960 }
961 
md_reg_context_data(struct pt_regs * regs)962 static void md_reg_context_data(struct pt_regs *regs)
963 {
964 	mm_segment_t fs;
965 	unsigned int i;
966 	int nbytes = 128;
967 
968 	if (user_mode(regs) ||  !regs->pc)
969 		return;
970 
971 	rk_minidump_update_cpu_regs(regs);
972 	fs = get_fs();
973 	set_fs(KERNEL_DS);
974 	md_dump_data(regs->pc - nbytes, nbytes * 2, "PC");
975 	md_dump_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
976 	md_dump_data(regs->sp - nbytes, nbytes * 2, "SP");
977 	for (i = 0; i < 30; i++) {
978 		char name[4];
979 
980 		snprintf(name, sizeof(name), "X%u", i);
981 		md_dump_data(regs->regs[i] - nbytes, nbytes * 2, name);
982 	}
983 	set_fs(fs);
984 	rk_md_flush_dcache_area((void *)md_cntxt_seq_buf->buffer, md_cntxt_seq_buf->len);
985 }
986 
md_dump_panic_regs(void)987 static inline void md_dump_panic_regs(void)
988 {
989 	struct pt_regs regs;
990 	u64 tmp1, tmp2;
991 
992 	/* Lifted from crash_setup_regs() */
993 	__asm__ __volatile__ (
994 		"stp	 x0,   x1, [%2, #16 *  0]\n"
995 		"stp	 x2,   x3, [%2, #16 *  1]\n"
996 		"stp	 x4,   x5, [%2, #16 *  2]\n"
997 		"stp	 x6,   x7, [%2, #16 *  3]\n"
998 		"stp	 x8,   x9, [%2, #16 *  4]\n"
999 		"stp	x10,  x11, [%2, #16 *  5]\n"
1000 		"stp	x12,  x13, [%2, #16 *  6]\n"
1001 		"stp	x14,  x15, [%2, #16 *  7]\n"
1002 		"stp	x16,  x17, [%2, #16 *  8]\n"
1003 		"stp	x18,  x19, [%2, #16 *  9]\n"
1004 		"stp	x20,  x21, [%2, #16 * 10]\n"
1005 		"stp	x22,  x23, [%2, #16 * 11]\n"
1006 		"stp	x24,  x25, [%2, #16 * 12]\n"
1007 		"stp	x26,  x27, [%2, #16 * 13]\n"
1008 		"stp	x28,  x29, [%2, #16 * 14]\n"
1009 		"mov	 %0,  sp\n"
1010 		"stp	x30,  %0,  [%2, #16 * 15]\n"
1011 
1012 		"/* faked current PSTATE */\n"
1013 		"mrs	 %0, CurrentEL\n"
1014 		"mrs	 %1, SPSEL\n"
1015 		"orr	 %0, %0, %1\n"
1016 		"mrs	 %1, DAIF\n"
1017 		"orr	 %0, %0, %1\n"
1018 		"mrs	 %1, NZCV\n"
1019 		"orr	 %0, %0, %1\n"
1020 		/* pc */
1021 		"adr	 %1, 1f\n"
1022 		"1:\n"
1023 		"stp	 %1, %0,   [%2, #16 * 16]\n"
1024 		: "=&r" (tmp1), "=&r" (tmp2)
1025 		: "r" (&regs)
1026 		: "memory"
1027 		);
1028 
1029 	seq_buf_printf(md_cntxt_seq_buf, "PANIC CPU : %d\n",
1030 				   raw_smp_processor_id());
1031 	md_reg_context_data(&regs);
1032 }
1033 
md_die_context_notify(struct notifier_block * self,unsigned long val,void * data)1034 static int md_die_context_notify(struct notifier_block *self,
1035 				 unsigned long val, void *data)
1036 {
1037 	struct die_args *args = (struct die_args *)data;
1038 
1039 	if (md_in_oops_handler)
1040 		return NOTIFY_DONE;
1041 	md_in_oops_handler = true;
1042 	if (!md_cntxt_seq_buf) {
1043 		md_in_oops_handler = false;
1044 		return NOTIFY_DONE;
1045 	}
1046 	die_cpu = raw_smp_processor_id();
1047 	seq_buf_printf(md_cntxt_seq_buf, "\nDIE CPU : %d\n", die_cpu);
1048 	md_reg_context_data(args->regs);
1049 	md_in_oops_handler = false;
1050 	return NOTIFY_DONE;
1051 }
1052 
1053 static struct notifier_block md_die_context_nb = {
1054 	.notifier_call = md_die_context_notify,
1055 	.priority = INT_MAX - 2, /* < rk watchdog die notifier */
1056 };
1057 #endif
1058 
md_panic_handler(struct notifier_block * this,unsigned long event,void * ptr)1059 static int md_panic_handler(struct notifier_block *this,
1060 			    unsigned long event, void *ptr)
1061 {
1062 	if (md_in_oops_handler)
1063 		return NOTIFY_DONE;
1064 	md_in_oops_handler = true;
1065 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
1066 	if (!md_cntxt_seq_buf)
1067 		goto dump_rq;
1068 	if (raw_smp_processor_id() != die_cpu)
1069 		md_dump_panic_regs();
1070 dump_rq:
1071 #endif
1072 	md_dump_runqueues();
1073 	if (md_meminfo_seq_buf)
1074 		md_dump_meminfo(md_meminfo_seq_buf);
1075 
1076 #ifdef CONFIG_SLUB_DEBUG
1077 	if (md_slabinfo_seq_buf)
1078 		md_dump_slabinfo(md_slabinfo_seq_buf);
1079 #endif
1080 
1081 #ifdef CONFIG_PAGE_OWNER
1082 	if (md_pageowner_dump_addr)
1083 		md_dump_pageowner(md_pageowner_dump_addr, md_pageowner_dump_size);
1084 #endif
1085 
1086 #ifdef CONFIG_SLUB_DEBUG
1087 	if (md_slabowner_dump_addr)
1088 		md_dump_slabowner(md_slabowner_dump_addr, md_slabowner_dump_size);
1089 #endif
1090 	if (md_dma_buf_info_addr)
1091 		md_dma_buf_info(md_dma_buf_info_addr, md_dma_buf_info_size);
1092 
1093 	if (md_dma_buf_procs_addr)
1094 		md_dma_buf_procs(md_dma_buf_procs_addr, md_dma_buf_procs_size);
1095 
1096 	rk_minidump_flush_elfheader();
1097 	md_in_oops_handler = false;
1098 	return NOTIFY_DONE;
1099 }
1100 
1101 static struct notifier_block md_panic_blk = {
1102 	.notifier_call = md_panic_handler,
1103 	.priority = INT_MAX - 2,
1104 };
1105 
md_register_minidump_entry(char * name,u64 virt_addr,u64 phys_addr,u64 size)1106 static int md_register_minidump_entry(char *name, u64 virt_addr,
1107 				      u64 phys_addr, u64 size)
1108 {
1109 	struct md_region md_entry;
1110 	int ret;
1111 
1112 	strscpy(md_entry.name, name, sizeof(md_entry.name));
1113 	md_entry.virt_addr = virt_addr;
1114 	md_entry.phys_addr = phys_addr;
1115 	md_entry.size = size;
1116 	ret = rk_minidump_add_region(&md_entry);
1117 	if (ret < 0)
1118 		pr_err("Failed to add %s entry in Minidump\n", name);
1119 	return ret;
1120 }
1121 
md_register_panic_entries(int num_pages,char * name,struct seq_buf ** global_buf)1122 static int md_register_panic_entries(int num_pages, char *name,
1123 				      struct seq_buf **global_buf)
1124 {
1125 	char *buf;
1126 	struct seq_buf *seq_buf_p;
1127 	int ret;
1128 
1129 	buf = kzalloc(num_pages * PAGE_SIZE, GFP_KERNEL);
1130 	if (!buf)
1131 		return -EINVAL;
1132 
1133 	seq_buf_p = kzalloc(sizeof(*seq_buf_p), GFP_KERNEL);
1134 	if (!seq_buf_p) {
1135 		ret = -EINVAL;
1136 		goto err_seq_buf;
1137 	}
1138 
1139 	ret = md_register_minidump_entry(name, (uintptr_t)buf,
1140 					 virt_to_phys(buf),
1141 					 num_pages * PAGE_SIZE);
1142 	if (ret < 0)
1143 		goto err_entry_reg;
1144 
1145 	seq_buf_init(seq_buf_p, buf, num_pages * PAGE_SIZE);
1146 
1147 	/* Complete registration before populating data */
1148 	smp_mb();
1149 	WRITE_ONCE(*global_buf, seq_buf_p);
1150 	return 0;
1151 
1152 err_entry_reg:
1153 	kfree(seq_buf_p);
1154 err_seq_buf:
1155 	kfree(buf);
1156 	return ret;
1157 }
1158 
md_register_panic_data(void)1159 static void md_register_panic_data(void)
1160 {
1161 	struct dentry *minidump_dir = NULL;
1162 
1163 	md_register_panic_entries(MD_RUNQUEUE_PAGES, "KRUNQUEUE",
1164 				  &md_runq_seq_buf);
1165 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
1166 	md_register_panic_entries(MD_CPU_CNTXT_PAGES, "KCNTXT",
1167 				  &md_cntxt_seq_buf);
1168 #endif
1169 	md_register_panic_entries(MD_MEMINFO_PAGES, "MEMINFO",
1170 				  &md_meminfo_seq_buf);
1171 #ifdef CONFIG_SLUB_DEBUG
1172 	md_register_panic_entries(MD_SLABINFO_PAGES, "SLABINFO",
1173 				  &md_slabinfo_seq_buf);
1174 #endif
1175 	if (!minidump_dir)
1176 		minidump_dir = debugfs_create_dir("minidump", NULL);
1177 #ifdef CONFIG_PAGE_OWNER
1178 	if (is_page_owner_enabled()) {
1179 		md_register_memory_dump(md_pageowner_dump_size, "PAGEOWNER");
1180 		md_debugfs_pageowner(minidump_dir);
1181 	}
1182 #endif
1183 #ifdef CONFIG_SLUB_DEBUG
1184 	if (is_slub_debug_enabled()) {
1185 		md_register_memory_dump(md_slabowner_dump_size, "SLABOWNER");
1186 		md_debugfs_slabowner(minidump_dir);
1187 	}
1188 #endif
1189 	md_register_memory_dump(md_dma_buf_info_size, "DMABUF_INFO");
1190 	md_debugfs_dmabufinfo(minidump_dir);
1191 	md_register_memory_dump(md_dma_buf_procs_size, "DMABUF_PROCS");
1192 	md_debugfs_dmabufprocs(minidump_dir);
1193 }
1194 
print_module(const char * name,void * mod_addr,void * data)1195 static int print_module(const char *name, void *mod_addr, void *data)
1196 {
1197 	if (!md_mod_info_seq_buf) {
1198 		pr_err("md_mod_info_seq_buf is NULL\n");
1199 		return -EINVAL;
1200 	}
1201 
1202 	seq_buf_printf(md_mod_info_seq_buf, "name: %s, base: %#lx\n", name, (uintptr_t)mod_addr);
1203 	return 0;
1204 }
1205 
md_module_notify(struct notifier_block * self,unsigned long val,void * data)1206 static int md_module_notify(struct notifier_block *self,
1207 			    unsigned long val, void *data)
1208 {
1209 	struct module *mod = data;
1210 
1211 	spin_lock(&md_modules_lock);
1212 	switch (mod->state) {
1213 	case MODULE_STATE_LIVE:
1214 		print_module(mod->name, mod->core_layout.base, data);
1215 		break;
1216 	case MODULE_STATE_GOING:
1217 		print_module(mod->name, mod->core_layout.base, data);
1218 		break;
1219 	default:
1220 		break;
1221 	}
1222 	spin_unlock(&md_modules_lock);
1223 	return 0;
1224 }
1225 
1226 static struct notifier_block md_module_nb = {
1227 	.notifier_call = md_module_notify,
1228 };
1229 
md_register_module_data(void)1230 static void md_register_module_data(void)
1231 {
1232 	int ret;
1233 
1234 	ret = md_register_panic_entries(MD_MODULE_PAGES, "KMODULES",
1235 					&md_mod_info_seq_buf);
1236 	if (ret) {
1237 		pr_err("Failed to register minidump module buffer\n");
1238 		return;
1239 	}
1240 
1241 	seq_buf_printf(md_mod_info_seq_buf, "=== MODULE INFO ===\n");
1242 	ret = register_module_notifier(&md_module_nb);
1243 	if (ret) {
1244 		pr_err("Failed to register minidump module notifier\n");
1245 		return;
1246 	}
1247 
1248 	android_debug_for_each_module(print_module, NULL);
1249 }
1250 #endif /* CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP */
1251 
rk_minidump_log_init(void)1252 int rk_minidump_log_init(void)
1253 {
1254 	is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);
1255 
1256 	register_note_section();
1257 #ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
1258 	register_kernel_sections();
1259 #endif
1260 
1261 #ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
1262 	register_current_stack();
1263 	register_suspend_context();
1264 	register_irq_stacks();
1265 #endif
1266 
1267 #ifdef CONFIG_ROCKCHIP_MINIDUMP_FTRACE
1268 	md_register_trace_buf();
1269 #endif
1270 
1271 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
1272 	md_register_module_data();
1273 	md_register_panic_data();
1274 	atomic_notifier_chain_register(&panic_notifier_list, &md_panic_blk);
1275 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
1276 	register_die_notifier(&md_die_context_nb);
1277 #endif
1278 #endif
1279 	return 0;
1280 }
1281