1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Stack trace management functions
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/sched/debug.h>
8*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
9*4882a593Smuzhiyun #include <linux/stacktrace.h>
10*4882a593Smuzhiyun #include <linux/export.h>
11*4882a593Smuzhiyun #include <linux/uaccess.h>
12*4882a593Smuzhiyun #include <asm/stacktrace.h>
13*4882a593Smuzhiyun #include <asm/unwind.h>
14*4882a593Smuzhiyun
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)15*4882a593Smuzhiyun void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
16*4882a593Smuzhiyun struct task_struct *task, struct pt_regs *regs)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun struct unwind_state state;
19*4882a593Smuzhiyun unsigned long addr;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun if (regs && !consume_entry(cookie, regs->ip))
22*4882a593Smuzhiyun return;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
25*4882a593Smuzhiyun unwind_next_frame(&state)) {
26*4882a593Smuzhiyun addr = unwind_get_return_address(&state);
27*4882a593Smuzhiyun if (!addr || !consume_entry(cookie, addr))
28*4882a593Smuzhiyun break;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * This function returns an error if it detects any unreliable features of the
34*4882a593Smuzhiyun * stack. Otherwise it guarantees that the stack trace is reliable.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * If the task is not 'current', the caller *must* ensure the task is inactive.
37*4882a593Smuzhiyun */
arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task)38*4882a593Smuzhiyun int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
39*4882a593Smuzhiyun void *cookie, struct task_struct *task)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct unwind_state state;
42*4882a593Smuzhiyun struct pt_regs *regs;
43*4882a593Smuzhiyun unsigned long addr;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun for (unwind_start(&state, task, NULL, NULL);
46*4882a593Smuzhiyun !unwind_done(&state) && !unwind_error(&state);
47*4882a593Smuzhiyun unwind_next_frame(&state)) {
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun regs = unwind_get_entry_regs(&state, NULL);
50*4882a593Smuzhiyun if (regs) {
51*4882a593Smuzhiyun /* Success path for user tasks */
52*4882a593Smuzhiyun if (user_mode(regs))
53*4882a593Smuzhiyun return 0;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Kernel mode registers on the stack indicate an
57*4882a593Smuzhiyun * in-kernel interrupt or exception (e.g., preemption
58*4882a593Smuzhiyun * or a page fault), which can make frame pointers
59*4882a593Smuzhiyun * unreliable.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_FRAME_POINTER))
62*4882a593Smuzhiyun return -EINVAL;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun addr = unwind_get_return_address(&state);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * A NULL or invalid return address probably means there's some
69*4882a593Smuzhiyun * generated code which __kernel_text_address() doesn't know
70*4882a593Smuzhiyun * about.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun if (!addr)
73*4882a593Smuzhiyun return -EINVAL;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (!consume_entry(cookie, addr))
76*4882a593Smuzhiyun return -EINVAL;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Check for stack corruption */
80*4882a593Smuzhiyun if (unwind_error(&state))
81*4882a593Smuzhiyun return -EINVAL;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun return 0;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct stack_frame_user {
89*4882a593Smuzhiyun const void __user *next_fp;
90*4882a593Smuzhiyun unsigned long ret_addr;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static int
copy_stack_frame(const struct stack_frame_user __user * fp,struct stack_frame_user * frame)94*4882a593Smuzhiyun copy_stack_frame(const struct stack_frame_user __user *fp,
95*4882a593Smuzhiyun struct stack_frame_user *frame)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun int ret;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
100*4882a593Smuzhiyun return 0;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun ret = 1;
103*4882a593Smuzhiyun pagefault_disable();
104*4882a593Smuzhiyun if (__get_user(frame->next_fp, &fp->next_fp) ||
105*4882a593Smuzhiyun __get_user(frame->ret_addr, &fp->ret_addr))
106*4882a593Smuzhiyun ret = 0;
107*4882a593Smuzhiyun pagefault_enable();
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return ret;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)112*4882a593Smuzhiyun void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
113*4882a593Smuzhiyun const struct pt_regs *regs)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun const void __user *fp = (const void __user *)regs->bp;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (!consume_entry(cookie, regs->ip))
118*4882a593Smuzhiyun return;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun while (1) {
121*4882a593Smuzhiyun struct stack_frame_user frame;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun frame.next_fp = NULL;
124*4882a593Smuzhiyun frame.ret_addr = 0;
125*4882a593Smuzhiyun if (!copy_stack_frame(fp, &frame))
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun if ((unsigned long)fp < regs->sp)
128*4882a593Smuzhiyun break;
129*4882a593Smuzhiyun if (!frame.ret_addr)
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun if (!consume_entry(cookie, frame.ret_addr))
132*4882a593Smuzhiyun break;
133*4882a593Smuzhiyun fp = frame.next_fp;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137