1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Performance counter callchain support - powerpc architecture code
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright © 2009 Paul Mackerras, IBM Corporation.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/perf_event.h>
10*4882a593Smuzhiyun #include <linux/percpu.h>
11*4882a593Smuzhiyun #include <linux/uaccess.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <asm/ptrace.h>
14*4882a593Smuzhiyun #include <asm/sigcontext.h>
15*4882a593Smuzhiyun #include <asm/ucontext.h>
16*4882a593Smuzhiyun #include <asm/vdso.h>
17*4882a593Smuzhiyun #include <asm/pte-walk.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "callchain.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * On 64-bit we don't want to invoke hash_page on user addresses from
23*4882a593Smuzhiyun * interrupt context, so if the access faults, we read the page tables
24*4882a593Smuzhiyun * to find which page (if any) is mapped and access it directly.
25*4882a593Smuzhiyun */
read_user_stack_slow(const void __user * ptr,void * buf,int nb)26*4882a593Smuzhiyun int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun unsigned long addr = (unsigned long) ptr;
30*4882a593Smuzhiyun unsigned long offset;
31*4882a593Smuzhiyun struct page *page;
32*4882a593Smuzhiyun void *kaddr;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
35*4882a593Smuzhiyun kaddr = page_address(page);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* align address to page boundary */
38*4882a593Smuzhiyun offset = addr & ~PAGE_MASK;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun memcpy(buf, kaddr + offset, nb);
41*4882a593Smuzhiyun put_page(page);
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun return -EFAULT;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
read_user_stack_64(const unsigned long __user * ptr,unsigned long * ret)47*4882a593Smuzhiyun static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun return __read_user_stack(ptr, ret, sizeof(*ret));
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * 64-bit user processes use the same stack frame for RT and non-RT signals.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun struct signal_frame_64 {
56*4882a593Smuzhiyun char dummy[__SIGNAL_FRAMESIZE];
57*4882a593Smuzhiyun struct ucontext uc;
58*4882a593Smuzhiyun unsigned long unused[2];
59*4882a593Smuzhiyun unsigned int tramp[6];
60*4882a593Smuzhiyun struct siginfo *pinfo;
61*4882a593Smuzhiyun void *puc;
62*4882a593Smuzhiyun struct siginfo info;
63*4882a593Smuzhiyun char abigap[288];
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
is_sigreturn_64_address(unsigned long nip,unsigned long fp)66*4882a593Smuzhiyun static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun if (nip == fp + offsetof(struct signal_frame_64, tramp))
69*4882a593Smuzhiyun return 1;
70*4882a593Smuzhiyun if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
71*4882a593Smuzhiyun nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
72*4882a593Smuzhiyun return 1;
73*4882a593Smuzhiyun return 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * Do some sanity checking on the signal frame pointed to by sp.
78*4882a593Smuzhiyun * We check the pinfo and puc pointers in the frame.
79*4882a593Smuzhiyun */
sane_signal_64_frame(unsigned long sp)80*4882a593Smuzhiyun static int sane_signal_64_frame(unsigned long sp)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct signal_frame_64 __user *sf;
83*4882a593Smuzhiyun unsigned long pinfo, puc;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun sf = (struct signal_frame_64 __user *) sp;
86*4882a593Smuzhiyun if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
87*4882a593Smuzhiyun read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun return pinfo == (unsigned long) &sf->info &&
90*4882a593Smuzhiyun puc == (unsigned long) &sf->uc;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
perf_callchain_user_64(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)93*4882a593Smuzhiyun void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
94*4882a593Smuzhiyun struct pt_regs *regs)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun unsigned long sp, next_sp;
97*4882a593Smuzhiyun unsigned long next_ip;
98*4882a593Smuzhiyun unsigned long lr;
99*4882a593Smuzhiyun long level = 0;
100*4882a593Smuzhiyun struct signal_frame_64 __user *sigframe;
101*4882a593Smuzhiyun unsigned long __user *fp, *uregs;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun next_ip = perf_instruction_pointer(regs);
104*4882a593Smuzhiyun lr = regs->link;
105*4882a593Smuzhiyun sp = regs->gpr[1];
106*4882a593Smuzhiyun perf_callchain_store(entry, next_ip);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun while (entry->nr < entry->max_stack) {
109*4882a593Smuzhiyun fp = (unsigned long __user *) sp;
110*4882a593Smuzhiyun if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
111*4882a593Smuzhiyun return;
112*4882a593Smuzhiyun if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
113*4882a593Smuzhiyun return;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * Note: the next_sp - sp >= signal frame size check
117*4882a593Smuzhiyun * is true when next_sp < sp, which can happen when
118*4882a593Smuzhiyun * transitioning from an alternate signal stack to the
119*4882a593Smuzhiyun * normal stack.
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun if (next_sp - sp >= sizeof(struct signal_frame_64) &&
122*4882a593Smuzhiyun (is_sigreturn_64_address(next_ip, sp) ||
123*4882a593Smuzhiyun (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
124*4882a593Smuzhiyun sane_signal_64_frame(sp)) {
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * This looks like an signal frame
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun sigframe = (struct signal_frame_64 __user *) sp;
129*4882a593Smuzhiyun uregs = sigframe->uc.uc_mcontext.gp_regs;
130*4882a593Smuzhiyun if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
131*4882a593Smuzhiyun read_user_stack_64(&uregs[PT_LNK], &lr) ||
132*4882a593Smuzhiyun read_user_stack_64(&uregs[PT_R1], &sp))
133*4882a593Smuzhiyun return;
134*4882a593Smuzhiyun level = 0;
135*4882a593Smuzhiyun perf_callchain_store_context(entry, PERF_CONTEXT_USER);
136*4882a593Smuzhiyun perf_callchain_store(entry, next_ip);
137*4882a593Smuzhiyun continue;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (level == 0)
141*4882a593Smuzhiyun next_ip = lr;
142*4882a593Smuzhiyun perf_callchain_store(entry, next_ip);
143*4882a593Smuzhiyun ++level;
144*4882a593Smuzhiyun sp = next_sp;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun }
147