1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2023 Rockchip Electronics Co., Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/cache.h>
8*4882a593Smuzhiyun #include <linux/freezer.h>
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/kallsyms.h>
14*4882a593Smuzhiyun #include <linux/rbtree.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/thread_info.h>
18*4882a593Smuzhiyun #include <soc/rockchip/rk_minidump.h>
19*4882a593Smuzhiyun #include <asm/page.h>
20*4882a593Smuzhiyun #include <asm/memory.h>
21*4882a593Smuzhiyun #include <asm/sections.h>
22*4882a593Smuzhiyun #include <asm/stacktrace.h>
23*4882a593Smuzhiyun #include <linux/mm.h>
24*4882a593Smuzhiyun #include <linux/ratelimit.h>
25*4882a593Smuzhiyun #include <linux/notifier.h>
26*4882a593Smuzhiyun #include <linux/sizes.h>
27*4882a593Smuzhiyun #include <linux/sched/task.h>
28*4882a593Smuzhiyun #include <linux/suspend.h>
29*4882a593Smuzhiyun #include <linux/vmalloc.h>
30*4882a593Smuzhiyun #include <linux/android_debug_symbols.h>
31*4882a593Smuzhiyun #include <linux/elf.h>
32*4882a593Smuzhiyun #include <linux/seq_buf.h>
33*4882a593Smuzhiyun #include <linux/elfcore.h>
34*4882a593Smuzhiyun #include "minidump_private.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
37*4882a593Smuzhiyun #include <linux/bits.h>
38*4882a593Smuzhiyun #include <linux/sched/prio.h>
39*4882a593Smuzhiyun #include <asm/memory.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include "../../../kernel/sched/sched.h"
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include <linux/kdebug.h>
44*4882a593Smuzhiyun #include <linux/thread_info.h>
45*4882a593Smuzhiyun #include <asm/ptrace.h>
46*4882a593Smuzhiyun #include <linux/uaccess.h>
47*4882a593Smuzhiyun #include <linux/percpu.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include <linux/module.h>
50*4882a593Smuzhiyun #include <linux/cma.h>
51*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
52*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
53*4882a593Smuzhiyun #include <trace/hooks/debug.h>
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun #include "minidump_memory.h"
56*4882a593Smuzhiyun #endif /* CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #include <trace/events/sched.h>
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #ifdef CONFIG_VMAP_STACK
63*4882a593Smuzhiyun #define STACK_NUM_PAGES (THREAD_SIZE / PAGE_SIZE)
64*4882a593Smuzhiyun #else
65*4882a593Smuzhiyun #define STACK_NUM_PAGES 1
66*4882a593Smuzhiyun #endif /* !CONFIG_VMAP_STACK */
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct md_stack_cpu_data {
69*4882a593Smuzhiyun int stack_mdidx[STACK_NUM_PAGES];
70*4882a593Smuzhiyun struct md_region stack_mdr[STACK_NUM_PAGES];
71*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static int md_current_stack_init __read_mostly;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static DEFINE_PER_CPU_SHARED_ALIGNED(struct md_stack_cpu_data, md_stack_data);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct md_suspend_context_data {
78*4882a593Smuzhiyun int task_mdno;
79*4882a593Smuzhiyun int stack_mdidx[STACK_NUM_PAGES];
80*4882a593Smuzhiyun struct md_region stack_mdr[STACK_NUM_PAGES];
81*4882a593Smuzhiyun struct md_region task_mdr;
82*4882a593Smuzhiyun bool init;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static struct md_suspend_context_data md_suspend_context;
86*4882a593Smuzhiyun #endif /* CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK */
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static bool is_vmap_stack __read_mostly;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_FTRACE
91*4882a593Smuzhiyun #include <trace/hooks/ftrace_dump.h>
92*4882a593Smuzhiyun #include <linux/ring_buffer.h>
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define MD_FTRACE_BUF_SIZE SZ_2M
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static char *md_ftrace_buf_addr;
97*4882a593Smuzhiyun static size_t md_ftrace_buf_current;
98*4882a593Smuzhiyun static bool minidump_ftrace_in_oops;
99*4882a593Smuzhiyun static bool minidump_ftrace_dump = true;
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
103*4882a593Smuzhiyun /* Rnqueue information */
104*4882a593Smuzhiyun #define MD_RUNQUEUE_PAGES 8
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun static bool md_in_oops_handler;
107*4882a593Smuzhiyun static struct seq_buf *md_runq_seq_buf;
108*4882a593Smuzhiyun static int md_align_offset;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* CPU context information */
111*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
112*4882a593Smuzhiyun #define MD_CPU_CNTXT_PAGES 32
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun static int die_cpu = -1;
115*4882a593Smuzhiyun static struct seq_buf *md_cntxt_seq_buf;
116*4882a593Smuzhiyun #endif
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Meminfo */
119*4882a593Smuzhiyun static struct seq_buf *md_meminfo_seq_buf;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Slabinfo */
122*4882a593Smuzhiyun #ifdef CONFIG_SLUB_DEBUG
123*4882a593Smuzhiyun static struct seq_buf *md_slabinfo_seq_buf;
124*4882a593Smuzhiyun #endif
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun #ifdef CONFIG_PAGE_OWNER
127*4882a593Smuzhiyun size_t md_pageowner_dump_size = SZ_2M;
128*4882a593Smuzhiyun char *md_pageowner_dump_addr;
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #ifdef CONFIG_SLUB_DEBUG
132*4882a593Smuzhiyun size_t md_slabowner_dump_size = SZ_2M;
133*4882a593Smuzhiyun char *md_slabowner_dump_addr;
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun size_t md_dma_buf_info_size = SZ_256K;
137*4882a593Smuzhiyun char *md_dma_buf_info_addr;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun size_t md_dma_buf_procs_size = SZ_256K;
140*4882a593Smuzhiyun char *md_dma_buf_procs_addr;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Modules information */
143*4882a593Smuzhiyun #ifdef CONFIG_MODULES
144*4882a593Smuzhiyun #define MD_MODULE_PAGES 8
145*4882a593Smuzhiyun static struct seq_buf *md_mod_info_seq_buf;
146*4882a593Smuzhiyun static DEFINE_SPINLOCK(md_modules_lock);
147*4882a593Smuzhiyun #endif /* CONFIG_MODULES */
148*4882a593Smuzhiyun #endif
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static struct md_region note_md_entry;
151*4882a593Smuzhiyun static DEFINE_PER_CPU_SHARED_ALIGNED(struct elf_prstatus *, cpu_epr);
152*4882a593Smuzhiyun
register_stack_entry(struct md_region * ksp_entry,u64 sp,u64 size)153*4882a593Smuzhiyun static int register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct page *sp_page;
156*4882a593Smuzhiyun int entry;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun ksp_entry->virt_addr = sp;
159*4882a593Smuzhiyun ksp_entry->size = size;
160*4882a593Smuzhiyun if (is_vmap_stack) {
161*4882a593Smuzhiyun sp_page = vmalloc_to_page((const void *) sp);
162*4882a593Smuzhiyun ksp_entry->phys_addr = page_to_phys(sp_page);
163*4882a593Smuzhiyun } else {
164*4882a593Smuzhiyun ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun entry = rk_minidump_add_region(ksp_entry);
168*4882a593Smuzhiyun if (entry < 0)
169*4882a593Smuzhiyun pr_err("Failed to add stack of entry %s in Minidump\n",
170*4882a593Smuzhiyun ksp_entry->name);
171*4882a593Smuzhiyun return entry;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
register_kernel_sections(void)175*4882a593Smuzhiyun static void register_kernel_sections(void)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct md_region ksec_entry;
178*4882a593Smuzhiyun char *data_name = "KDATABSS";
179*4882a593Smuzhiyun char *rodata_name = "KROAIDATA";
180*4882a593Smuzhiyun size_t static_size;
181*4882a593Smuzhiyun void __percpu *base;
182*4882a593Smuzhiyun unsigned int cpu;
183*4882a593Smuzhiyun void *_sdata, *__bss_stop;
184*4882a593Smuzhiyun void *start_ro, *end_ro;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun _sdata = android_debug_symbol(ADS_SDATA);
187*4882a593Smuzhiyun __bss_stop = android_debug_symbol(ADS_BSS_END);
188*4882a593Smuzhiyun base = android_debug_symbol(ADS_PER_CPU_START);
189*4882a593Smuzhiyun static_size = (size_t)(android_debug_symbol(ADS_PER_CPU_END) - base);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun strscpy(ksec_entry.name, data_name, sizeof(ksec_entry.name));
192*4882a593Smuzhiyun ksec_entry.virt_addr = (u64)_sdata;
193*4882a593Smuzhiyun ksec_entry.phys_addr = virt_to_phys(_sdata);
194*4882a593Smuzhiyun ksec_entry.size = roundup((__bss_stop - _sdata), 4);
195*4882a593Smuzhiyun if (rk_minidump_add_region(&ksec_entry) < 0)
196*4882a593Smuzhiyun pr_err("Failed to add data section in Minidump\n");
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun start_ro = android_debug_symbol(ADS_START_RO_AFTER_INIT);
199*4882a593Smuzhiyun end_ro = android_debug_symbol(ADS_END_RO_AFTER_INIT);
200*4882a593Smuzhiyun strscpy(ksec_entry.name, rodata_name, sizeof(ksec_entry.name));
201*4882a593Smuzhiyun ksec_entry.virt_addr = (uintptr_t)start_ro;
202*4882a593Smuzhiyun ksec_entry.phys_addr = virt_to_phys(start_ro);
203*4882a593Smuzhiyun ksec_entry.size = roundup((end_ro - start_ro), 4);
204*4882a593Smuzhiyun if (rk_minidump_add_region(&ksec_entry) < 0)
205*4882a593Smuzhiyun pr_err("Failed to add rodata section in Minidump\n");
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Add percpu static sections */
208*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
209*4882a593Smuzhiyun void *start = per_cpu_ptr(base, cpu);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun memset(&ksec_entry, 0, sizeof(ksec_entry));
212*4882a593Smuzhiyun scnprintf(ksec_entry.name, sizeof(ksec_entry.name),
213*4882a593Smuzhiyun "KSPERCPU%d", cpu);
214*4882a593Smuzhiyun ksec_entry.virt_addr = (uintptr_t)start;
215*4882a593Smuzhiyun ksec_entry.phys_addr = per_cpu_ptr_to_phys(start);
216*4882a593Smuzhiyun ksec_entry.size = static_size;
217*4882a593Smuzhiyun if (rk_minidump_add_region(&ksec_entry) < 0)
218*4882a593Smuzhiyun pr_err("Failed to add percpu sections in Minidump\n");
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun
in_stack_range(u64 sp,u64 base_addr,unsigned int stack_size)223*4882a593Smuzhiyun static inline bool in_stack_range(
224*4882a593Smuzhiyun u64 sp, u64 base_addr, unsigned int stack_size)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun u64 min_addr = base_addr;
227*4882a593Smuzhiyun u64 max_addr = base_addr + stack_size;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return (min_addr <= sp && sp < max_addr);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
calculate_copy_pages(u64 sp,struct vm_struct * stack_area)232*4882a593Smuzhiyun static unsigned int calculate_copy_pages(u64 sp, struct vm_struct *stack_area)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun u64 tsk_stack_base = (u64) stack_area->addr;
235*4882a593Smuzhiyun u64 offset;
236*4882a593Smuzhiyun unsigned int stack_pages, copy_pages;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (in_stack_range(sp, tsk_stack_base, get_vm_area_size(stack_area))) {
239*4882a593Smuzhiyun offset = sp - tsk_stack_base;
240*4882a593Smuzhiyun stack_pages = get_vm_area_size(stack_area) / PAGE_SIZE;
241*4882a593Smuzhiyun copy_pages = stack_pages - (offset / PAGE_SIZE);
242*4882a593Smuzhiyun } else {
243*4882a593Smuzhiyun copy_pages = 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun return copy_pages;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
dump_stack_minidump(u64 sp)248*4882a593Smuzhiyun void dump_stack_minidump(u64 sp)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct md_region ksp_entry, ktsk_entry;
251*4882a593Smuzhiyun u32 cpu = smp_processor_id();
252*4882a593Smuzhiyun struct vm_struct *stack_vm_area;
253*4882a593Smuzhiyun unsigned int i, copy_pages;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK))
256*4882a593Smuzhiyun return;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (is_idle_task(current))
259*4882a593Smuzhiyun return;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (sp < KIMAGE_VADDR || sp > -256UL)
264*4882a593Smuzhiyun sp = current_stack_pointer;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Since stacks are now allocated with vmalloc, the translation to
268*4882a593Smuzhiyun * physical address is not a simple linear transformation like it is
269*4882a593Smuzhiyun * for kernel logical addresses, since vmalloc creates a virtual
270*4882a593Smuzhiyun * mapping. Thus, virt_to_phys() should not be used in this context;
271*4882a593Smuzhiyun * instead the page table must be walked to acquire the physical
272*4882a593Smuzhiyun * address of one page of the stack.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun stack_vm_area = task_stack_vm_area(current);
275*4882a593Smuzhiyun if (is_vmap_stack) {
276*4882a593Smuzhiyun sp &= ~(PAGE_SIZE - 1);
277*4882a593Smuzhiyun copy_pages = calculate_copy_pages(sp, stack_vm_area);
278*4882a593Smuzhiyun for (i = 0; i < copy_pages; i++) {
279*4882a593Smuzhiyun scnprintf(ksp_entry.name, sizeof(ksp_entry.name),
280*4882a593Smuzhiyun "KSTACK%d_%d", cpu, i);
281*4882a593Smuzhiyun (void)register_stack_entry(&ksp_entry, sp, PAGE_SIZE);
282*4882a593Smuzhiyun sp += PAGE_SIZE;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun } else {
285*4882a593Smuzhiyun sp &= ~(THREAD_SIZE - 1);
286*4882a593Smuzhiyun scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d",
287*4882a593Smuzhiyun cpu);
288*4882a593Smuzhiyun (void)register_stack_entry(&ksp_entry, sp, THREAD_SIZE);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu);
292*4882a593Smuzhiyun ktsk_entry.virt_addr = (u64)current;
293*4882a593Smuzhiyun ktsk_entry.phys_addr = virt_to_phys((uintptr_t *)current);
294*4882a593Smuzhiyun ktsk_entry.size = sizeof(struct task_struct);
295*4882a593Smuzhiyun if (rk_minidump_add_region(&ktsk_entry) < 0)
296*4882a593Smuzhiyun pr_err("Failed to add current task %d in Minidump\n", cpu);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
update_stack_entry(struct md_region * ksp_entry,u64 sp,int mdno)300*4882a593Smuzhiyun static void update_stack_entry(struct md_region *ksp_entry, u64 sp,
301*4882a593Smuzhiyun int mdno)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct page *sp_page;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ksp_entry->virt_addr = sp;
306*4882a593Smuzhiyun if (likely(is_vmap_stack)) {
307*4882a593Smuzhiyun sp_page = vmalloc_to_page((const void *) sp);
308*4882a593Smuzhiyun ksp_entry->phys_addr = page_to_phys(sp_page);
309*4882a593Smuzhiyun } else {
310*4882a593Smuzhiyun ksp_entry->phys_addr = virt_to_phys((uintptr_t *)sp);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun if (rk_minidump_update_region(mdno, ksp_entry) < 0) {
313*4882a593Smuzhiyun pr_err_ratelimited(
314*4882a593Smuzhiyun "Failed to update stack entry %s in minidump\n",
315*4882a593Smuzhiyun ksp_entry->name);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
register_vmapped_stack(struct md_region * mdr,int * mdno,u64 sp,char * name_str,bool update)319*4882a593Smuzhiyun static void register_vmapped_stack(struct md_region *mdr, int *mdno,
320*4882a593Smuzhiyun u64 sp, char *name_str, bool update)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun int i;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun sp &= ~(PAGE_SIZE - 1);
325*4882a593Smuzhiyun for (i = 0; i < STACK_NUM_PAGES; i++) {
326*4882a593Smuzhiyun if (unlikely(!update)) {
327*4882a593Smuzhiyun scnprintf(mdr->name, sizeof(mdr->name), "%s_%d",
328*4882a593Smuzhiyun name_str, i);
329*4882a593Smuzhiyun *mdno = register_stack_entry(mdr, sp, PAGE_SIZE);
330*4882a593Smuzhiyun } else {
331*4882a593Smuzhiyun update_stack_entry(mdr, sp, *mdno);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun sp += PAGE_SIZE;
334*4882a593Smuzhiyun mdr++;
335*4882a593Smuzhiyun mdno++;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
register_normal_stack(struct md_region * mdr,int * mdno,u64 sp,char * name_str,bool update)339*4882a593Smuzhiyun static void register_normal_stack(struct md_region *mdr, int *mdno,
340*4882a593Smuzhiyun u64 sp, char *name_str, bool update)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun sp &= ~(THREAD_SIZE - 1);
343*4882a593Smuzhiyun if (unlikely(!update)) {
344*4882a593Smuzhiyun scnprintf(mdr->name, sizeof(mdr->name), name_str);
345*4882a593Smuzhiyun *mdno = register_stack_entry(mdr, sp, THREAD_SIZE);
346*4882a593Smuzhiyun } else {
347*4882a593Smuzhiyun update_stack_entry(mdr, sp, *mdno);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
update_md_stack(struct md_region * stack_mdr,int * stack_mdno,u64 sp)351*4882a593Smuzhiyun static void update_md_stack(struct md_region *stack_mdr,
352*4882a593Smuzhiyun int *stack_mdno, u64 sp)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun unsigned int i;
355*4882a593Smuzhiyun int *mdno;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (likely(is_vmap_stack)) {
358*4882a593Smuzhiyun for (i = 0; i < STACK_NUM_PAGES; i++) {
359*4882a593Smuzhiyun mdno = stack_mdno + i;
360*4882a593Smuzhiyun if (unlikely(*mdno < 0))
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun register_vmapped_stack(stack_mdr, stack_mdno, sp, NULL, true);
364*4882a593Smuzhiyun } else {
365*4882a593Smuzhiyun if (unlikely(*stack_mdno < 0))
366*4882a593Smuzhiyun return;
367*4882a593Smuzhiyun register_normal_stack(stack_mdr, stack_mdno, sp, NULL, true);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
update_md_cpu_stack(u32 cpu,u64 sp)371*4882a593Smuzhiyun static void update_md_cpu_stack(u32 cpu, u64 sp)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun struct md_stack_cpu_data *md_stack_cpu_d = &per_cpu(md_stack_data, cpu);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (!md_current_stack_init)
376*4882a593Smuzhiyun return;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun update_md_stack(md_stack_cpu_d->stack_mdr,
379*4882a593Smuzhiyun md_stack_cpu_d->stack_mdidx, sp);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
md_current_stack_notifer(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next)382*4882a593Smuzhiyun static void md_current_stack_notifer(void *ignore, bool preempt,
383*4882a593Smuzhiyun struct task_struct *prev, struct task_struct *next)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun u32 cpu = task_cpu(next);
386*4882a593Smuzhiyun u64 sp = (u64)next->stack;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun update_md_cpu_stack(cpu, sp);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
md_current_stack_ipi_handler(void * data)391*4882a593Smuzhiyun static void md_current_stack_ipi_handler(void *data)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun u32 cpu = smp_processor_id();
394*4882a593Smuzhiyun struct vm_struct *stack_vm_area;
395*4882a593Smuzhiyun u64 sp = current_stack_pointer;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (is_idle_task(current))
398*4882a593Smuzhiyun return;
399*4882a593Smuzhiyun if (likely(is_vmap_stack)) {
400*4882a593Smuzhiyun stack_vm_area = task_stack_vm_area(current);
401*4882a593Smuzhiyun sp = (u64)stack_vm_area->addr;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun update_md_cpu_stack(cpu, sp);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
update_md_current_task(struct md_region * mdr,int mdno)406*4882a593Smuzhiyun static void update_md_current_task(struct md_region *mdr, int mdno)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun mdr->virt_addr = (u64)current;
409*4882a593Smuzhiyun mdr->phys_addr = virt_to_phys((uintptr_t *)current);
410*4882a593Smuzhiyun if (rk_minidump_update_region(mdno, mdr) < 0)
411*4882a593Smuzhiyun pr_err("Failed to update %s current task in minidump\n",
412*4882a593Smuzhiyun mdr->name);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
update_md_suspend_current_stack(void)415*4882a593Smuzhiyun static void update_md_suspend_current_stack(void)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun u64 sp = current_stack_pointer;
418*4882a593Smuzhiyun struct vm_struct *stack_vm_area;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (likely(is_vmap_stack)) {
421*4882a593Smuzhiyun stack_vm_area = task_stack_vm_area(current);
422*4882a593Smuzhiyun sp = (u64)stack_vm_area->addr;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun update_md_stack(md_suspend_context.stack_mdr,
425*4882a593Smuzhiyun md_suspend_context.stack_mdidx, sp);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
update_md_suspend_current_task(void)428*4882a593Smuzhiyun static void update_md_suspend_current_task(void)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun if (unlikely(md_suspend_context.task_mdno < 0))
431*4882a593Smuzhiyun return;
432*4882a593Smuzhiyun update_md_current_task(&md_suspend_context.task_mdr,
433*4882a593Smuzhiyun md_suspend_context.task_mdno);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
update_md_suspend_currents(void)436*4882a593Smuzhiyun static void update_md_suspend_currents(void)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun if (!md_suspend_context.init)
439*4882a593Smuzhiyun return;
440*4882a593Smuzhiyun update_md_suspend_current_stack();
441*4882a593Smuzhiyun update_md_suspend_current_task();
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
register_current_stack(void)444*4882a593Smuzhiyun static void register_current_stack(void)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun int cpu;
447*4882a593Smuzhiyun u64 sp = current_stack_pointer;
448*4882a593Smuzhiyun struct md_stack_cpu_data *md_stack_cpu_d;
449*4882a593Smuzhiyun struct vm_struct *stack_vm_area;
450*4882a593Smuzhiyun char name_str[MD_MAX_NAME_LENGTH];
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * Since stacks are now allocated with vmalloc, the translation to
454*4882a593Smuzhiyun * physical address is not a simple linear transformation like it is
455*4882a593Smuzhiyun * for kernel logical addresses, since vmalloc creates a virtual
456*4882a593Smuzhiyun * mapping. Thus, virt_to_phys() should not be used in this context;
457*4882a593Smuzhiyun * instead the page table must be walked to acquire the physical
458*4882a593Smuzhiyun * address of all pages of the stack.
459*4882a593Smuzhiyun */
460*4882a593Smuzhiyun if (likely(is_vmap_stack)) {
461*4882a593Smuzhiyun stack_vm_area = task_stack_vm_area(current);
462*4882a593Smuzhiyun sp = (u64)stack_vm_area->addr;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
465*4882a593Smuzhiyun /*
466*4882a593Smuzhiyun * Let's register dummies for now,
467*4882a593Smuzhiyun * once system up and running, let the cpu update its currents.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun md_stack_cpu_d = &per_cpu(md_stack_data, cpu);
470*4882a593Smuzhiyun scnprintf(name_str, sizeof(name_str), "KSTACK%d", cpu);
471*4882a593Smuzhiyun if (is_vmap_stack)
472*4882a593Smuzhiyun register_vmapped_stack(md_stack_cpu_d->stack_mdr,
473*4882a593Smuzhiyun md_stack_cpu_d->stack_mdidx, sp,
474*4882a593Smuzhiyun name_str, false);
475*4882a593Smuzhiyun else
476*4882a593Smuzhiyun register_normal_stack(md_stack_cpu_d->stack_mdr,
477*4882a593Smuzhiyun md_stack_cpu_d->stack_mdidx, sp,
478*4882a593Smuzhiyun name_str, false);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun register_trace_sched_switch(md_current_stack_notifer, NULL);
482*4882a593Smuzhiyun md_current_stack_init = 1;
483*4882a593Smuzhiyun smp_call_function(md_current_stack_ipi_handler, NULL, 1);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
register_suspend_stack(void)486*4882a593Smuzhiyun static void register_suspend_stack(void)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun char name_str[MD_MAX_NAME_LENGTH];
489*4882a593Smuzhiyun u64 sp = current_stack_pointer;
490*4882a593Smuzhiyun struct vm_struct *stack_vm_area = task_stack_vm_area(current);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun scnprintf(name_str, sizeof(name_str), "KSUSPSTK");
493*4882a593Smuzhiyun if (is_vmap_stack) {
494*4882a593Smuzhiyun sp = (u64)stack_vm_area->addr;
495*4882a593Smuzhiyun register_vmapped_stack(md_suspend_context.stack_mdr,
496*4882a593Smuzhiyun md_suspend_context.stack_mdidx,
497*4882a593Smuzhiyun sp, name_str, false);
498*4882a593Smuzhiyun } else {
499*4882a593Smuzhiyun register_normal_stack(md_suspend_context.stack_mdr,
500*4882a593Smuzhiyun md_suspend_context.stack_mdidx,
501*4882a593Smuzhiyun sp, name_str, false);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
register_current_task(struct md_region * mdr,int * mdno,char * name_str)505*4882a593Smuzhiyun static void register_current_task(struct md_region *mdr, int *mdno,
506*4882a593Smuzhiyun char *name_str)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun scnprintf(mdr->name, sizeof(mdr->name), name_str);
509*4882a593Smuzhiyun mdr->virt_addr = (u64)current;
510*4882a593Smuzhiyun mdr->phys_addr = virt_to_phys((uintptr_t *)current);
511*4882a593Smuzhiyun mdr->size = sizeof(struct task_struct);
512*4882a593Smuzhiyun *mdno = rk_minidump_add_region(mdr);
513*4882a593Smuzhiyun if (*mdno < 0)
514*4882a593Smuzhiyun pr_err("Failed to add current task %s in Minidump\n",
515*4882a593Smuzhiyun mdr->name);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
register_suspend_current_task(void)518*4882a593Smuzhiyun static void register_suspend_current_task(void)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun char name_str[MD_MAX_NAME_LENGTH];
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun scnprintf(name_str, sizeof(name_str), "KSUSPTASK");
523*4882a593Smuzhiyun register_current_task(&md_suspend_context.task_mdr,
524*4882a593Smuzhiyun &md_suspend_context.task_mdno, name_str);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun #if !defined(MODULE) && defined(CONFIG_ARM64)
register_irq_stacks(void)528*4882a593Smuzhiyun static void register_irq_stacks(void)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct md_region md_entry;
531*4882a593Smuzhiyun int cpu, ret;
532*4882a593Smuzhiyun struct page *sp_page;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
535*4882a593Smuzhiyun scnprintf(md_entry.name, sizeof(md_entry.name), "KIRQSTACK%d", cpu);
536*4882a593Smuzhiyun md_entry.virt_addr = (u64)per_cpu(irq_stack_ptr, cpu);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (is_vmap_stack) {
539*4882a593Smuzhiyun sp_page = vmalloc_to_page((const void *) md_entry.virt_addr);
540*4882a593Smuzhiyun md_entry.phys_addr = page_to_phys(sp_page);
541*4882a593Smuzhiyun } else {
542*4882a593Smuzhiyun md_entry.phys_addr = virt_to_phys((const volatile void *)md_entry.virt_addr);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun md_entry.size = IRQ_STACK_SIZE;
546*4882a593Smuzhiyun ret = rk_minidump_add_region(&md_entry);
547*4882a593Smuzhiyun if (ret < 0)
548*4882a593Smuzhiyun pr_err("Failed to add %s entry in Minidump\n", md_entry.name);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun #else
register_irq_stacks(void)552*4882a593Smuzhiyun static inline void register_irq_stacks(void)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun #endif
556*4882a593Smuzhiyun
minidump_pm_notifier(struct notifier_block * nb,unsigned long event,void * unused)557*4882a593Smuzhiyun static int minidump_pm_notifier(struct notifier_block *nb,
558*4882a593Smuzhiyun unsigned long event, void *unused)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun switch (event) {
561*4882a593Smuzhiyun case PM_SUSPEND_PREPARE:
562*4882a593Smuzhiyun update_md_suspend_currents();
563*4882a593Smuzhiyun break;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun return NOTIFY_DONE;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun static struct notifier_block minidump_pm_nb = {
569*4882a593Smuzhiyun .notifier_call = minidump_pm_notifier,
570*4882a593Smuzhiyun };
571*4882a593Smuzhiyun
register_suspend_context(void)572*4882a593Smuzhiyun static void register_suspend_context(void)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun register_suspend_stack();
575*4882a593Smuzhiyun register_suspend_current_task();
576*4882a593Smuzhiyun register_pm_notifier(&minidump_pm_nb);
577*4882a593Smuzhiyun md_suspend_context.init = true;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun #endif /* CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK */
580*4882a593Smuzhiyun
append_elf_note(Elf_Word * buf,char * name,unsigned int type,size_t data_len)581*4882a593Smuzhiyun static Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
582*4882a593Smuzhiyun size_t data_len)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun struct elf_note *note = (struct elf_note *)buf;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun note->n_namesz = strlen(name) + 1;
587*4882a593Smuzhiyun note->n_descsz = data_len;
588*4882a593Smuzhiyun note->n_type = type;
589*4882a593Smuzhiyun buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf_Word));
590*4882a593Smuzhiyun memcpy(buf, name, note->n_namesz);
591*4882a593Smuzhiyun buf += DIV_ROUND_UP(note->n_namesz, sizeof(Elf_Word));
592*4882a593Smuzhiyun return buf;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
register_note_section(void)595*4882a593Smuzhiyun static void register_note_section(void)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun int ret = 0, i = 0;
598*4882a593Smuzhiyun size_t data_len;
599*4882a593Smuzhiyun Elf_Word *buf;
600*4882a593Smuzhiyun void *buffer_start;
601*4882a593Smuzhiyun struct elf_prstatus *epr;
602*4882a593Smuzhiyun struct md_region *mdr = ¬e_md_entry;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun buffer_start = kzalloc(PAGE_SIZE, GFP_KERNEL);
605*4882a593Smuzhiyun if (!buffer_start)
606*4882a593Smuzhiyun return;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun memcpy(mdr->name, "note", 5);
609*4882a593Smuzhiyun mdr->virt_addr = (uintptr_t)buffer_start;
610*4882a593Smuzhiyun mdr->phys_addr = virt_to_phys(buffer_start);
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun buf = (Elf_Word *)mdr->virt_addr;
613*4882a593Smuzhiyun data_len = sizeof(struct elf_prstatus);
614*4882a593Smuzhiyun for_each_possible_cpu(i) {
615*4882a593Smuzhiyun buf = append_elf_note(buf, "CORE", NT_PRSTATUS, data_len);
616*4882a593Smuzhiyun epr = (struct elf_prstatus *)buf;
617*4882a593Smuzhiyun epr->pr_pid = i;
618*4882a593Smuzhiyun per_cpu(cpu_epr, i) = epr;
619*4882a593Smuzhiyun buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun mdr->size = (u64)buf - mdr->virt_addr;
623*4882a593Smuzhiyun rk_md_flush_dcache_area((void *)mdr->virt_addr, mdr->size);
624*4882a593Smuzhiyun ret = rk_minidump_add_region(mdr);
625*4882a593Smuzhiyun if (ret < 0)
626*4882a593Smuzhiyun pr_err("Failed to add %s entry in Minidump\n", mdr->name);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
rk_minidump_update_cpu_regs(struct pt_regs * regs)629*4882a593Smuzhiyun void rk_minidump_update_cpu_regs(struct pt_regs *regs)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun int cpu = raw_smp_processor_id();
632*4882a593Smuzhiyun struct elf_prstatus *epr = per_cpu(cpu_epr, cpu);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (!epr)
635*4882a593Smuzhiyun return;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun memcpy((void *)&epr->pr_reg, (void *)regs, sizeof(elf_gregset_t));
638*4882a593Smuzhiyun rk_md_flush_dcache_area((void *)&epr->pr_reg, sizeof(elf_gregset_t));
639*4882a593Smuzhiyun rk_md_flush_dcache_area((void *)(regs->sp & ~(PAGE_SIZE - 1)), PAGE_SIZE);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun EXPORT_SYMBOL(rk_minidump_update_cpu_regs);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_FTRACE
minidump_add_trace_event(char * buf,size_t size)644*4882a593Smuzhiyun static void minidump_add_trace_event(char *buf, size_t size)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun char *addr;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (!READ_ONCE(md_ftrace_buf_addr) ||
649*4882a593Smuzhiyun (size > (size_t)MD_FTRACE_BUF_SIZE))
650*4882a593Smuzhiyun return;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if ((md_ftrace_buf_current + size) > (size_t)MD_FTRACE_BUF_SIZE)
653*4882a593Smuzhiyun md_ftrace_buf_current = 0;
654*4882a593Smuzhiyun addr = md_ftrace_buf_addr + md_ftrace_buf_current;
655*4882a593Smuzhiyun memcpy(addr, buf, size);
656*4882a593Smuzhiyun md_ftrace_buf_current += size;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
md_trace_oops_enter(void * unused,bool * enter_check)659*4882a593Smuzhiyun static void md_trace_oops_enter(void *unused, bool *enter_check)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun if (!minidump_ftrace_in_oops) {
662*4882a593Smuzhiyun minidump_ftrace_in_oops = true;
663*4882a593Smuzhiyun *enter_check = false;
664*4882a593Smuzhiyun } else {
665*4882a593Smuzhiyun *enter_check = true;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
md_trace_oops_exit(void * unused,bool * exit_check)669*4882a593Smuzhiyun static void md_trace_oops_exit(void *unused, bool *exit_check)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun minidump_ftrace_in_oops = false;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
md_update_trace_fmt(void * unused,bool * format_check)674*4882a593Smuzhiyun static void md_update_trace_fmt(void *unused, bool *format_check)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun *format_check = false;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
md_buf_size_check(void * unused,unsigned long buffer_size,bool * size_check)679*4882a593Smuzhiyun static void md_buf_size_check(void *unused, unsigned long buffer_size,
680*4882a593Smuzhiyun bool *size_check)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun if (!minidump_ftrace_dump) {
683*4882a593Smuzhiyun *size_check = true;
684*4882a593Smuzhiyun return;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (buffer_size > (SZ_256K + PAGE_SIZE)) {
688*4882a593Smuzhiyun pr_err("Skip md ftrace buffer dump for: %#lx\n", buffer_size);
689*4882a593Smuzhiyun minidump_ftrace_dump = false;
690*4882a593Smuzhiyun *size_check = true;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
md_dump_trace_buf(void * unused,struct trace_seq * trace_buf,bool * printk_check)694*4882a593Smuzhiyun static void md_dump_trace_buf(void *unused, struct trace_seq *trace_buf,
695*4882a593Smuzhiyun bool *printk_check)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun if (minidump_ftrace_in_oops && minidump_ftrace_dump) {
698*4882a593Smuzhiyun minidump_add_trace_event(trace_buf->buffer,
699*4882a593Smuzhiyun trace_buf->seq.len);
700*4882a593Smuzhiyun *printk_check = false;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
md_register_trace_buf(void)704*4882a593Smuzhiyun static void md_register_trace_buf(void)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun struct md_region md_entry;
707*4882a593Smuzhiyun void *buffer_start;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun buffer_start = kzalloc(MD_FTRACE_BUF_SIZE, GFP_KERNEL);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (!buffer_start)
712*4882a593Smuzhiyun return;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun strscpy(md_entry.name, "KFTRACE", sizeof(md_entry.name));
715*4882a593Smuzhiyun md_entry.virt_addr = (uintptr_t)buffer_start;
716*4882a593Smuzhiyun md_entry.phys_addr = virt_to_phys(buffer_start);
717*4882a593Smuzhiyun md_entry.size = MD_FTRACE_BUF_SIZE;
718*4882a593Smuzhiyun if (rk_minidump_add_region(&md_entry) < 0)
719*4882a593Smuzhiyun pr_err("Failed to add ftrace buffer entry in Minidump\n");
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun register_trace_android_vh_ftrace_oops_enter(md_trace_oops_enter,
722*4882a593Smuzhiyun NULL);
723*4882a593Smuzhiyun register_trace_android_vh_ftrace_oops_exit(md_trace_oops_exit,
724*4882a593Smuzhiyun NULL);
725*4882a593Smuzhiyun register_trace_android_vh_ftrace_size_check(md_buf_size_check,
726*4882a593Smuzhiyun NULL);
727*4882a593Smuzhiyun register_trace_android_vh_ftrace_format_check(md_update_trace_fmt,
728*4882a593Smuzhiyun NULL);
729*4882a593Smuzhiyun register_trace_android_vh_ftrace_dump_buffer(md_dump_trace_buf,
730*4882a593Smuzhiyun NULL);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Complete registration before adding entries */
733*4882a593Smuzhiyun smp_mb();
734*4882a593Smuzhiyun WRITE_ONCE(md_ftrace_buf_addr, buffer_start);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun #endif
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
md_dump_align(void)739*4882a593Smuzhiyun static void md_dump_align(void)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun int tab_offset = md_align_offset;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun while (tab_offset--)
744*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf, " | ");
745*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf, " |--");
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
md_dump_task_info(struct task_struct * task,char * status,struct task_struct * curr)748*4882a593Smuzhiyun static void md_dump_task_info(struct task_struct *task, char *status,
749*4882a593Smuzhiyun struct task_struct *curr)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun struct sched_entity *se;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun md_dump_align();
754*4882a593Smuzhiyun if (!task) {
755*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf, "%s : None(0)\n", status);
756*4882a593Smuzhiyun return;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun se = &task->se;
760*4882a593Smuzhiyun if (task == curr) {
761*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
762*4882a593Smuzhiyun "[status: curr] pid: %d comm: %s preempt: %#llx\n",
763*4882a593Smuzhiyun task_pid_nr(task), task->comm,
764*4882a593Smuzhiyun (u64)task->thread_info.preempt_count);
765*4882a593Smuzhiyun return;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
769*4882a593Smuzhiyun "[status: %s] pid: %d tsk: %#lx comm: %s stack: %#lx",
770*4882a593Smuzhiyun status, task_pid_nr(task),
771*4882a593Smuzhiyun (unsigned long)task,
772*4882a593Smuzhiyun task->comm,
773*4882a593Smuzhiyun (unsigned long)task->stack);
774*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
775*4882a593Smuzhiyun " prio: %d aff: %*pb",
776*4882a593Smuzhiyun task->prio, cpumask_pr_args(&task->cpus_mask));
777*4882a593Smuzhiyun #ifdef CONFIG_SCHED_WALT
778*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf, " enq: %lu wake: %lu sleep: %lu",
779*4882a593Smuzhiyun task->wts.last_enqueued_ts, task->wts.last_wake_ts,
780*4882a593Smuzhiyun task->wts.last_sleep_ts);
781*4882a593Smuzhiyun #endif
782*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
783*4882a593Smuzhiyun " vrun: %lu arr: %lu sum_ex: %lu\n",
784*4882a593Smuzhiyun (unsigned long)se->vruntime,
785*4882a593Smuzhiyun (unsigned long)se->exec_start,
786*4882a593Smuzhiyun (unsigned long)se->sum_exec_runtime);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun static void md_dump_cfs_rq(struct cfs_rq *cfs, struct task_struct *curr);
790*4882a593Smuzhiyun
md_dump_cgroup_state(char * status,struct sched_entity * se_p,struct task_struct * curr)791*4882a593Smuzhiyun static void md_dump_cgroup_state(char *status, struct sched_entity *se_p,
792*4882a593Smuzhiyun struct task_struct *curr)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun struct task_struct *task;
795*4882a593Smuzhiyun struct cfs_rq *my_q = NULL;
796*4882a593Smuzhiyun unsigned int nr_running;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (!se_p) {
799*4882a593Smuzhiyun md_dump_task_info(NULL, status, NULL);
800*4882a593Smuzhiyun return;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun #ifdef CONFIG_FAIR_GROUP_SCHED
803*4882a593Smuzhiyun my_q = se_p->my_q;
804*4882a593Smuzhiyun #endif
805*4882a593Smuzhiyun if (!my_q) {
806*4882a593Smuzhiyun task = container_of(se_p, struct task_struct, se);
807*4882a593Smuzhiyun md_dump_task_info(task, status, curr);
808*4882a593Smuzhiyun return;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun nr_running = my_q->nr_running;
811*4882a593Smuzhiyun md_dump_align();
812*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf, "%s: %d process is grouping\n",
813*4882a593Smuzhiyun status, nr_running);
814*4882a593Smuzhiyun md_align_offset++;
815*4882a593Smuzhiyun md_dump_cfs_rq(my_q, curr);
816*4882a593Smuzhiyun md_align_offset--;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
md_dump_cfs_node_func(struct rb_node * node,struct task_struct * curr)819*4882a593Smuzhiyun static void md_dump_cfs_node_func(struct rb_node *node,
820*4882a593Smuzhiyun struct task_struct *curr)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct sched_entity *se_p = container_of(node, struct sched_entity,
823*4882a593Smuzhiyun run_node);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun md_dump_cgroup_state("pend", se_p, curr);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
md_rb_walk_cfs(struct rb_root_cached * rb_root_cached_p,struct task_struct * curr)828*4882a593Smuzhiyun static void md_rb_walk_cfs(struct rb_root_cached *rb_root_cached_p,
829*4882a593Smuzhiyun struct task_struct *curr)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun int max_walk = 200; /* Bail out, in case of loop */
832*4882a593Smuzhiyun struct rb_node *leftmost = rb_root_cached_p->rb_leftmost;
833*4882a593Smuzhiyun struct rb_root *root = &rb_root_cached_p->rb_root;
834*4882a593Smuzhiyun struct rb_node *rb_node = rb_first(root);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (!leftmost)
837*4882a593Smuzhiyun return;
838*4882a593Smuzhiyun while (rb_node && max_walk--) {
839*4882a593Smuzhiyun md_dump_cfs_node_func(rb_node, curr);
840*4882a593Smuzhiyun rb_node = rb_next(rb_node);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
md_dump_cfs_rq(struct cfs_rq * cfs,struct task_struct * curr)844*4882a593Smuzhiyun static void md_dump_cfs_rq(struct cfs_rq *cfs, struct task_struct *curr)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun struct rb_root_cached *rb_root_cached_p = &cfs->tasks_timeline;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun md_dump_cgroup_state("curr", cfs->curr, curr);
849*4882a593Smuzhiyun md_dump_cgroup_state("next", cfs->next, curr);
850*4882a593Smuzhiyun md_dump_cgroup_state("last", cfs->last, curr);
851*4882a593Smuzhiyun md_dump_cgroup_state("skip", cfs->skip, curr);
852*4882a593Smuzhiyun md_rb_walk_cfs(rb_root_cached_p, curr);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
md_dump_rt_rq(struct rt_rq * rt_rq,struct task_struct * curr)855*4882a593Smuzhiyun static void md_dump_rt_rq(struct rt_rq *rt_rq, struct task_struct *curr)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct rt_prio_array *array = &rt_rq->active;
858*4882a593Smuzhiyun struct sched_rt_entity *rt_se;
859*4882a593Smuzhiyun int idx;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* Lifted most of the below code from dump_throttled_rt_tasks() */
862*4882a593Smuzhiyun if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
863*4882a593Smuzhiyun return;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun idx = sched_find_first_bit(array->bitmap);
866*4882a593Smuzhiyun while (idx < MAX_RT_PRIO) {
867*4882a593Smuzhiyun list_for_each_entry(rt_se, array->queue + idx, run_list) {
868*4882a593Smuzhiyun struct task_struct *p;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun #ifdef CONFIG_RT_GROUP_SCHED
871*4882a593Smuzhiyun if (rt_se->my_q)
872*4882a593Smuzhiyun continue;
873*4882a593Smuzhiyun #endif
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun p = container_of(rt_se, struct task_struct, rt);
876*4882a593Smuzhiyun md_dump_task_info(p, "pend", curr);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
md_dump_runqueues(void)882*4882a593Smuzhiyun static void md_dump_runqueues(void)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun int cpu;
885*4882a593Smuzhiyun struct rq *rq;
886*4882a593Smuzhiyun struct rt_rq *rt;
887*4882a593Smuzhiyun struct cfs_rq *cfs;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (!md_runq_seq_buf)
890*4882a593Smuzhiyun return;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
893*4882a593Smuzhiyun rq = cpu_rq(cpu);
894*4882a593Smuzhiyun rt = &rq->rt;
895*4882a593Smuzhiyun cfs = &rq->cfs;
896*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
897*4882a593Smuzhiyun "CPU%d %d process is running\n",
898*4882a593Smuzhiyun cpu, rq->nr_running);
899*4882a593Smuzhiyun md_dump_task_info(cpu_curr(cpu), "curr", NULL);
900*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
901*4882a593Smuzhiyun "CFS %d process is pending\n",
902*4882a593Smuzhiyun cfs->nr_running);
903*4882a593Smuzhiyun md_dump_cfs_rq(cfs, cpu_curr(cpu));
904*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf,
905*4882a593Smuzhiyun "RT %d process is pending\n",
906*4882a593Smuzhiyun rt->rt_nr_running);
907*4882a593Smuzhiyun md_dump_rt_rq(rt, cpu_curr(cpu));
908*4882a593Smuzhiyun seq_buf_printf(md_runq_seq_buf, "\n");
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun rk_md_flush_dcache_area((void *)md_runq_seq_buf->buffer, md_runq_seq_buf->len);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
915*4882a593Smuzhiyun /*
916*4882a593Smuzhiyun * dump a block of kernel memory from around the given address.
917*4882a593Smuzhiyun * Bulk of the code is lifted from arch/arm64/kernel/process.c.
918*4882a593Smuzhiyun */
md_dump_data(unsigned long addr,int nbytes,const char * name)919*4882a593Smuzhiyun static void md_dump_data(unsigned long addr, int nbytes, const char *name)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun int i, j;
922*4882a593Smuzhiyun int nlines;
923*4882a593Smuzhiyun u32 *p;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /*
926*4882a593Smuzhiyun * don't attempt to dump non-kernel addresses or
927*4882a593Smuzhiyun * values that are probably just small negative numbers
928*4882a593Smuzhiyun */
929*4882a593Smuzhiyun if (addr < PAGE_OFFSET || addr > -256UL)
930*4882a593Smuzhiyun return;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, "\n%s: %#lx:\n", name, addr);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /*
935*4882a593Smuzhiyun * round address down to a 32 bit boundary
936*4882a593Smuzhiyun * and always dump a multiple of 32 bytes
937*4882a593Smuzhiyun */
938*4882a593Smuzhiyun p = (u32 *)(addr & ~(sizeof(u32) - 1));
939*4882a593Smuzhiyun nbytes += (addr & (sizeof(u32) - 1));
940*4882a593Smuzhiyun nlines = (nbytes + 31) / 32;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun for (i = 0; i < nlines; i++) {
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun * just display low 16 bits of address to keep
945*4882a593Smuzhiyun * each line of the dump < 80 characters
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, "%04lx ",
948*4882a593Smuzhiyun (unsigned long)p & 0xffff);
949*4882a593Smuzhiyun for (j = 0; j < 8; j++) {
950*4882a593Smuzhiyun u32 data = 0;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (get_kernel_nofault(data, p))
953*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, " ********");
954*4882a593Smuzhiyun else
955*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, " %08x", data);
956*4882a593Smuzhiyun ++p;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, "\n");
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
md_reg_context_data(struct pt_regs * regs)962*4882a593Smuzhiyun static void md_reg_context_data(struct pt_regs *regs)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun mm_segment_t fs;
965*4882a593Smuzhiyun unsigned int i;
966*4882a593Smuzhiyun int nbytes = 128;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (user_mode(regs) || !regs->pc)
969*4882a593Smuzhiyun return;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun rk_minidump_update_cpu_regs(regs);
972*4882a593Smuzhiyun fs = get_fs();
973*4882a593Smuzhiyun set_fs(KERNEL_DS);
974*4882a593Smuzhiyun md_dump_data(regs->pc - nbytes, nbytes * 2, "PC");
975*4882a593Smuzhiyun md_dump_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
976*4882a593Smuzhiyun md_dump_data(regs->sp - nbytes, nbytes * 2, "SP");
977*4882a593Smuzhiyun for (i = 0; i < 30; i++) {
978*4882a593Smuzhiyun char name[4];
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun snprintf(name, sizeof(name), "X%u", i);
981*4882a593Smuzhiyun md_dump_data(regs->regs[i] - nbytes, nbytes * 2, name);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun set_fs(fs);
984*4882a593Smuzhiyun rk_md_flush_dcache_area((void *)md_cntxt_seq_buf->buffer, md_cntxt_seq_buf->len);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
md_dump_panic_regs(void)987*4882a593Smuzhiyun static inline void md_dump_panic_regs(void)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun struct pt_regs regs;
990*4882a593Smuzhiyun u64 tmp1, tmp2;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /* Lifted from crash_setup_regs() */
993*4882a593Smuzhiyun __asm__ __volatile__ (
994*4882a593Smuzhiyun "stp x0, x1, [%2, #16 * 0]\n"
995*4882a593Smuzhiyun "stp x2, x3, [%2, #16 * 1]\n"
996*4882a593Smuzhiyun "stp x4, x5, [%2, #16 * 2]\n"
997*4882a593Smuzhiyun "stp x6, x7, [%2, #16 * 3]\n"
998*4882a593Smuzhiyun "stp x8, x9, [%2, #16 * 4]\n"
999*4882a593Smuzhiyun "stp x10, x11, [%2, #16 * 5]\n"
1000*4882a593Smuzhiyun "stp x12, x13, [%2, #16 * 6]\n"
1001*4882a593Smuzhiyun "stp x14, x15, [%2, #16 * 7]\n"
1002*4882a593Smuzhiyun "stp x16, x17, [%2, #16 * 8]\n"
1003*4882a593Smuzhiyun "stp x18, x19, [%2, #16 * 9]\n"
1004*4882a593Smuzhiyun "stp x20, x21, [%2, #16 * 10]\n"
1005*4882a593Smuzhiyun "stp x22, x23, [%2, #16 * 11]\n"
1006*4882a593Smuzhiyun "stp x24, x25, [%2, #16 * 12]\n"
1007*4882a593Smuzhiyun "stp x26, x27, [%2, #16 * 13]\n"
1008*4882a593Smuzhiyun "stp x28, x29, [%2, #16 * 14]\n"
1009*4882a593Smuzhiyun "mov %0, sp\n"
1010*4882a593Smuzhiyun "stp x30, %0, [%2, #16 * 15]\n"
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun "/* faked current PSTATE */\n"
1013*4882a593Smuzhiyun "mrs %0, CurrentEL\n"
1014*4882a593Smuzhiyun "mrs %1, SPSEL\n"
1015*4882a593Smuzhiyun "orr %0, %0, %1\n"
1016*4882a593Smuzhiyun "mrs %1, DAIF\n"
1017*4882a593Smuzhiyun "orr %0, %0, %1\n"
1018*4882a593Smuzhiyun "mrs %1, NZCV\n"
1019*4882a593Smuzhiyun "orr %0, %0, %1\n"
1020*4882a593Smuzhiyun /* pc */
1021*4882a593Smuzhiyun "adr %1, 1f\n"
1022*4882a593Smuzhiyun "1:\n"
1023*4882a593Smuzhiyun "stp %1, %0, [%2, #16 * 16]\n"
1024*4882a593Smuzhiyun : "=&r" (tmp1), "=&r" (tmp2)
1025*4882a593Smuzhiyun : "r" (®s)
1026*4882a593Smuzhiyun : "memory"
1027*4882a593Smuzhiyun );
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, "PANIC CPU : %d\n",
1030*4882a593Smuzhiyun raw_smp_processor_id());
1031*4882a593Smuzhiyun md_reg_context_data(®s);
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
md_die_context_notify(struct notifier_block * self,unsigned long val,void * data)1034*4882a593Smuzhiyun static int md_die_context_notify(struct notifier_block *self,
1035*4882a593Smuzhiyun unsigned long val, void *data)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun struct die_args *args = (struct die_args *)data;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (md_in_oops_handler)
1040*4882a593Smuzhiyun return NOTIFY_DONE;
1041*4882a593Smuzhiyun md_in_oops_handler = true;
1042*4882a593Smuzhiyun if (!md_cntxt_seq_buf) {
1043*4882a593Smuzhiyun md_in_oops_handler = false;
1044*4882a593Smuzhiyun return NOTIFY_DONE;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun die_cpu = raw_smp_processor_id();
1047*4882a593Smuzhiyun seq_buf_printf(md_cntxt_seq_buf, "\nDIE CPU : %d\n", die_cpu);
1048*4882a593Smuzhiyun md_reg_context_data(args->regs);
1049*4882a593Smuzhiyun md_in_oops_handler = false;
1050*4882a593Smuzhiyun return NOTIFY_DONE;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun static struct notifier_block md_die_context_nb = {
1054*4882a593Smuzhiyun .notifier_call = md_die_context_notify,
1055*4882a593Smuzhiyun .priority = INT_MAX - 2, /* < rk watchdog die notifier */
1056*4882a593Smuzhiyun };
1057*4882a593Smuzhiyun #endif
1058*4882a593Smuzhiyun
md_panic_handler(struct notifier_block * this,unsigned long event,void * ptr)1059*4882a593Smuzhiyun static int md_panic_handler(struct notifier_block *this,
1060*4882a593Smuzhiyun unsigned long event, void *ptr)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun if (md_in_oops_handler)
1063*4882a593Smuzhiyun return NOTIFY_DONE;
1064*4882a593Smuzhiyun md_in_oops_handler = true;
1065*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
1066*4882a593Smuzhiyun if (!md_cntxt_seq_buf)
1067*4882a593Smuzhiyun goto dump_rq;
1068*4882a593Smuzhiyun if (raw_smp_processor_id() != die_cpu)
1069*4882a593Smuzhiyun md_dump_panic_regs();
1070*4882a593Smuzhiyun dump_rq:
1071*4882a593Smuzhiyun #endif
1072*4882a593Smuzhiyun md_dump_runqueues();
1073*4882a593Smuzhiyun if (md_meminfo_seq_buf)
1074*4882a593Smuzhiyun md_dump_meminfo(md_meminfo_seq_buf);
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun #ifdef CONFIG_SLUB_DEBUG
1077*4882a593Smuzhiyun if (md_slabinfo_seq_buf)
1078*4882a593Smuzhiyun md_dump_slabinfo(md_slabinfo_seq_buf);
1079*4882a593Smuzhiyun #endif
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun #ifdef CONFIG_PAGE_OWNER
1082*4882a593Smuzhiyun if (md_pageowner_dump_addr)
1083*4882a593Smuzhiyun md_dump_pageowner(md_pageowner_dump_addr, md_pageowner_dump_size);
1084*4882a593Smuzhiyun #endif
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun #ifdef CONFIG_SLUB_DEBUG
1087*4882a593Smuzhiyun if (md_slabowner_dump_addr)
1088*4882a593Smuzhiyun md_dump_slabowner(md_slabowner_dump_addr, md_slabowner_dump_size);
1089*4882a593Smuzhiyun #endif
1090*4882a593Smuzhiyun if (md_dma_buf_info_addr)
1091*4882a593Smuzhiyun md_dma_buf_info(md_dma_buf_info_addr, md_dma_buf_info_size);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (md_dma_buf_procs_addr)
1094*4882a593Smuzhiyun md_dma_buf_procs(md_dma_buf_procs_addr, md_dma_buf_procs_size);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun rk_minidump_flush_elfheader();
1097*4882a593Smuzhiyun md_in_oops_handler = false;
1098*4882a593Smuzhiyun return NOTIFY_DONE;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun static struct notifier_block md_panic_blk = {
1102*4882a593Smuzhiyun .notifier_call = md_panic_handler,
1103*4882a593Smuzhiyun .priority = INT_MAX - 2,
1104*4882a593Smuzhiyun };
1105*4882a593Smuzhiyun
md_register_minidump_entry(char * name,u64 virt_addr,u64 phys_addr,u64 size)1106*4882a593Smuzhiyun static int md_register_minidump_entry(char *name, u64 virt_addr,
1107*4882a593Smuzhiyun u64 phys_addr, u64 size)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct md_region md_entry;
1110*4882a593Smuzhiyun int ret;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun strscpy(md_entry.name, name, sizeof(md_entry.name));
1113*4882a593Smuzhiyun md_entry.virt_addr = virt_addr;
1114*4882a593Smuzhiyun md_entry.phys_addr = phys_addr;
1115*4882a593Smuzhiyun md_entry.size = size;
1116*4882a593Smuzhiyun ret = rk_minidump_add_region(&md_entry);
1117*4882a593Smuzhiyun if (ret < 0)
1118*4882a593Smuzhiyun pr_err("Failed to add %s entry in Minidump\n", name);
1119*4882a593Smuzhiyun return ret;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
md_register_panic_entries(int num_pages,char * name,struct seq_buf ** global_buf)1122*4882a593Smuzhiyun static int md_register_panic_entries(int num_pages, char *name,
1123*4882a593Smuzhiyun struct seq_buf **global_buf)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun char *buf;
1126*4882a593Smuzhiyun struct seq_buf *seq_buf_p;
1127*4882a593Smuzhiyun int ret;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun buf = kzalloc(num_pages * PAGE_SIZE, GFP_KERNEL);
1130*4882a593Smuzhiyun if (!buf)
1131*4882a593Smuzhiyun return -EINVAL;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun seq_buf_p = kzalloc(sizeof(*seq_buf_p), GFP_KERNEL);
1134*4882a593Smuzhiyun if (!seq_buf_p) {
1135*4882a593Smuzhiyun ret = -EINVAL;
1136*4882a593Smuzhiyun goto err_seq_buf;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun ret = md_register_minidump_entry(name, (uintptr_t)buf,
1140*4882a593Smuzhiyun virt_to_phys(buf),
1141*4882a593Smuzhiyun num_pages * PAGE_SIZE);
1142*4882a593Smuzhiyun if (ret < 0)
1143*4882a593Smuzhiyun goto err_entry_reg;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun seq_buf_init(seq_buf_p, buf, num_pages * PAGE_SIZE);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* Complete registration before populating data */
1148*4882a593Smuzhiyun smp_mb();
1149*4882a593Smuzhiyun WRITE_ONCE(*global_buf, seq_buf_p);
1150*4882a593Smuzhiyun return 0;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun err_entry_reg:
1153*4882a593Smuzhiyun kfree(seq_buf_p);
1154*4882a593Smuzhiyun err_seq_buf:
1155*4882a593Smuzhiyun kfree(buf);
1156*4882a593Smuzhiyun return ret;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
md_register_panic_data(void)1159*4882a593Smuzhiyun static void md_register_panic_data(void)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun struct dentry *minidump_dir = NULL;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun md_register_panic_entries(MD_RUNQUEUE_PAGES, "KRUNQUEUE",
1164*4882a593Smuzhiyun &md_runq_seq_buf);
1165*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
1166*4882a593Smuzhiyun md_register_panic_entries(MD_CPU_CNTXT_PAGES, "KCNTXT",
1167*4882a593Smuzhiyun &md_cntxt_seq_buf);
1168*4882a593Smuzhiyun #endif
1169*4882a593Smuzhiyun md_register_panic_entries(MD_MEMINFO_PAGES, "MEMINFO",
1170*4882a593Smuzhiyun &md_meminfo_seq_buf);
1171*4882a593Smuzhiyun #ifdef CONFIG_SLUB_DEBUG
1172*4882a593Smuzhiyun md_register_panic_entries(MD_SLABINFO_PAGES, "SLABINFO",
1173*4882a593Smuzhiyun &md_slabinfo_seq_buf);
1174*4882a593Smuzhiyun #endif
1175*4882a593Smuzhiyun if (!minidump_dir)
1176*4882a593Smuzhiyun minidump_dir = debugfs_create_dir("minidump", NULL);
1177*4882a593Smuzhiyun #ifdef CONFIG_PAGE_OWNER
1178*4882a593Smuzhiyun if (is_page_owner_enabled()) {
1179*4882a593Smuzhiyun md_register_memory_dump(md_pageowner_dump_size, "PAGEOWNER");
1180*4882a593Smuzhiyun md_debugfs_pageowner(minidump_dir);
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun #endif
1183*4882a593Smuzhiyun #ifdef CONFIG_SLUB_DEBUG
1184*4882a593Smuzhiyun if (is_slub_debug_enabled()) {
1185*4882a593Smuzhiyun md_register_memory_dump(md_slabowner_dump_size, "SLABOWNER");
1186*4882a593Smuzhiyun md_debugfs_slabowner(minidump_dir);
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun #endif
1189*4882a593Smuzhiyun md_register_memory_dump(md_dma_buf_info_size, "DMABUF_INFO");
1190*4882a593Smuzhiyun md_debugfs_dmabufinfo(minidump_dir);
1191*4882a593Smuzhiyun md_register_memory_dump(md_dma_buf_procs_size, "DMABUF_PROCS");
1192*4882a593Smuzhiyun md_debugfs_dmabufprocs(minidump_dir);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
print_module(const char * name,void * mod_addr,void * data)1195*4882a593Smuzhiyun static int print_module(const char *name, void *mod_addr, void *data)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun if (!md_mod_info_seq_buf) {
1198*4882a593Smuzhiyun pr_err("md_mod_info_seq_buf is NULL\n");
1199*4882a593Smuzhiyun return -EINVAL;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun seq_buf_printf(md_mod_info_seq_buf, "name: %s, base: %#lx\n", name, (uintptr_t)mod_addr);
1203*4882a593Smuzhiyun return 0;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
md_module_notify(struct notifier_block * self,unsigned long val,void * data)1206*4882a593Smuzhiyun static int md_module_notify(struct notifier_block *self,
1207*4882a593Smuzhiyun unsigned long val, void *data)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun struct module *mod = data;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun spin_lock(&md_modules_lock);
1212*4882a593Smuzhiyun switch (mod->state) {
1213*4882a593Smuzhiyun case MODULE_STATE_LIVE:
1214*4882a593Smuzhiyun print_module(mod->name, mod->core_layout.base, data);
1215*4882a593Smuzhiyun break;
1216*4882a593Smuzhiyun case MODULE_STATE_GOING:
1217*4882a593Smuzhiyun print_module(mod->name, mod->core_layout.base, data);
1218*4882a593Smuzhiyun break;
1219*4882a593Smuzhiyun default:
1220*4882a593Smuzhiyun break;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun spin_unlock(&md_modules_lock);
1223*4882a593Smuzhiyun return 0;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun static struct notifier_block md_module_nb = {
1227*4882a593Smuzhiyun .notifier_call = md_module_notify,
1228*4882a593Smuzhiyun };
1229*4882a593Smuzhiyun
md_register_module_data(void)1230*4882a593Smuzhiyun static void md_register_module_data(void)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun int ret;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun ret = md_register_panic_entries(MD_MODULE_PAGES, "KMODULES",
1235*4882a593Smuzhiyun &md_mod_info_seq_buf);
1236*4882a593Smuzhiyun if (ret) {
1237*4882a593Smuzhiyun pr_err("Failed to register minidump module buffer\n");
1238*4882a593Smuzhiyun return;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun seq_buf_printf(md_mod_info_seq_buf, "=== MODULE INFO ===\n");
1242*4882a593Smuzhiyun ret = register_module_notifier(&md_module_nb);
1243*4882a593Smuzhiyun if (ret) {
1244*4882a593Smuzhiyun pr_err("Failed to register minidump module notifier\n");
1245*4882a593Smuzhiyun return;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun android_debug_for_each_module(print_module, NULL);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun #endif /* CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP */
1251*4882a593Smuzhiyun
rk_minidump_log_init(void)1252*4882a593Smuzhiyun int rk_minidump_log_init(void)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun register_note_section();
1257*4882a593Smuzhiyun #ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
1258*4882a593Smuzhiyun register_kernel_sections();
1259*4882a593Smuzhiyun #endif
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
1262*4882a593Smuzhiyun register_current_stack();
1263*4882a593Smuzhiyun register_suspend_context();
1264*4882a593Smuzhiyun register_irq_stacks();
1265*4882a593Smuzhiyun #endif
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_FTRACE
1268*4882a593Smuzhiyun md_register_trace_buf();
1269*4882a593Smuzhiyun #endif
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
1272*4882a593Smuzhiyun md_register_module_data();
1273*4882a593Smuzhiyun md_register_panic_data();
1274*4882a593Smuzhiyun atomic_notifier_chain_register(&panic_notifier_list, &md_panic_blk);
1275*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
1276*4882a593Smuzhiyun register_die_notifier(&md_die_context_nb);
1277*4882a593Smuzhiyun #endif
1278*4882a593Smuzhiyun #endif
1279*4882a593Smuzhiyun return 0;
1280*4882a593Smuzhiyun }
1281