1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kernel Probes (KProbes)
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) IBM Corporation, 2002, 2004
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8*4882a593Smuzhiyun * Probes initial implementation ( includes contributions from
9*4882a593Smuzhiyun * Rusty Russell).
10*4882a593Smuzhiyun * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11*4882a593Smuzhiyun * interface to access function arguments.
12*4882a593Smuzhiyun * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
13*4882a593Smuzhiyun * for PPC64
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/kprobes.h>
17*4882a593Smuzhiyun #include <linux/ptrace.h>
18*4882a593Smuzhiyun #include <linux/preempt.h>
19*4882a593Smuzhiyun #include <linux/extable.h>
20*4882a593Smuzhiyun #include <linux/kdebug.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <asm/code-patching.h>
23*4882a593Smuzhiyun #include <asm/cacheflush.h>
24*4882a593Smuzhiyun #include <asm/sstep.h>
25*4882a593Smuzhiyun #include <asm/sections.h>
26*4882a593Smuzhiyun #include <asm/inst.h>
27*4882a593Smuzhiyun #include <linux/uaccess.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
30*4882a593Smuzhiyun DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
33*4882a593Smuzhiyun
arch_within_kprobe_blacklist(unsigned long addr)34*4882a593Smuzhiyun bool arch_within_kprobe_blacklist(unsigned long addr)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return (addr >= (unsigned long)__kprobes_text_start &&
37*4882a593Smuzhiyun addr < (unsigned long)__kprobes_text_end) ||
38*4882a593Smuzhiyun (addr >= (unsigned long)_stext &&
39*4882a593Smuzhiyun addr < (unsigned long)__head_end);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
kprobe_lookup_name(const char * name,unsigned int offset)42*4882a593Smuzhiyun kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun kprobe_opcode_t *addr = NULL;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #ifdef PPC64_ELF_ABI_v2
47*4882a593Smuzhiyun /* PPC64 ABIv2 needs local entry point */
48*4882a593Smuzhiyun addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
49*4882a593Smuzhiyun if (addr && !offset) {
50*4882a593Smuzhiyun #ifdef CONFIG_KPROBES_ON_FTRACE
51*4882a593Smuzhiyun unsigned long faddr;
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Per livepatch.h, ftrace location is always within the first
54*4882a593Smuzhiyun * 16 bytes of a function on powerpc with -mprofile-kernel.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun faddr = ftrace_location_range((unsigned long)addr,
57*4882a593Smuzhiyun (unsigned long)addr + 16);
58*4882a593Smuzhiyun if (faddr)
59*4882a593Smuzhiyun addr = (kprobe_opcode_t *)faddr;
60*4882a593Smuzhiyun else
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun addr = (kprobe_opcode_t *)ppc_function_entry(addr);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun #elif defined(PPC64_ELF_ABI_v1)
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * 64bit powerpc ABIv1 uses function descriptors:
67*4882a593Smuzhiyun * - Check for the dot variant of the symbol first.
68*4882a593Smuzhiyun * - If that fails, try looking up the symbol provided.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * This ensures we always get to the actual symbol and not
71*4882a593Smuzhiyun * the descriptor.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Also handle <module:symbol> format.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
76*4882a593Smuzhiyun bool dot_appended = false;
77*4882a593Smuzhiyun const char *c;
78*4882a593Smuzhiyun ssize_t ret = 0;
79*4882a593Smuzhiyun int len = 0;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
82*4882a593Smuzhiyun c++;
83*4882a593Smuzhiyun len = c - name;
84*4882a593Smuzhiyun memcpy(dot_name, name, len);
85*4882a593Smuzhiyun } else
86*4882a593Smuzhiyun c = name;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (*c != '\0' && *c != '.') {
89*4882a593Smuzhiyun dot_name[len++] = '.';
90*4882a593Smuzhiyun dot_appended = true;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun ret = strscpy(dot_name + len, c, KSYM_NAME_LEN);
93*4882a593Smuzhiyun if (ret > 0)
94*4882a593Smuzhiyun addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Fallback to the original non-dot symbol lookup */
97*4882a593Smuzhiyun if (!addr && dot_appended)
98*4882a593Smuzhiyun addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
99*4882a593Smuzhiyun #else
100*4882a593Smuzhiyun addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return addr;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
arch_prepare_kprobe(struct kprobe * p)106*4882a593Smuzhiyun int arch_prepare_kprobe(struct kprobe *p)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun int ret = 0;
109*4882a593Smuzhiyun struct kprobe *prev;
110*4882a593Smuzhiyun struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if ((unsigned long)p->addr & 0x03) {
113*4882a593Smuzhiyun printk("Attempt to register kprobe at an unaligned address\n");
114*4882a593Smuzhiyun ret = -EINVAL;
115*4882a593Smuzhiyun } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
116*4882a593Smuzhiyun printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
117*4882a593Smuzhiyun ret = -EINVAL;
118*4882a593Smuzhiyun } else if ((unsigned long)p->addr & ~PAGE_MASK &&
119*4882a593Smuzhiyun ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
120*4882a593Smuzhiyun printk("Cannot register a kprobe on the second word of prefixed instruction\n");
121*4882a593Smuzhiyun ret = -EINVAL;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun preempt_disable();
124*4882a593Smuzhiyun prev = get_kprobe(p->addr - 1);
125*4882a593Smuzhiyun preempt_enable_no_resched();
126*4882a593Smuzhiyun if (prev &&
127*4882a593Smuzhiyun ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
128*4882a593Smuzhiyun printk("Cannot register a kprobe on the second word of prefixed instruction\n");
129*4882a593Smuzhiyun ret = -EINVAL;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* insn must be on a special executable page on ppc64. This is
133*4882a593Smuzhiyun * not explicitly required on ppc32 (right now), but it doesn't hurt */
134*4882a593Smuzhiyun if (!ret) {
135*4882a593Smuzhiyun p->ainsn.insn = get_insn_slot();
136*4882a593Smuzhiyun if (!p->ainsn.insn)
137*4882a593Smuzhiyun ret = -ENOMEM;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (!ret) {
141*4882a593Smuzhiyun patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
142*4882a593Smuzhiyun p->opcode = ppc_inst_val(insn);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun p->ainsn.boostable = 0;
146*4882a593Smuzhiyun return ret;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_prepare_kprobe);
149*4882a593Smuzhiyun
arch_arm_kprobe(struct kprobe * p)150*4882a593Smuzhiyun void arch_arm_kprobe(struct kprobe *p)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_arm_kprobe);
155*4882a593Smuzhiyun
arch_disarm_kprobe(struct kprobe * p)156*4882a593Smuzhiyun void arch_disarm_kprobe(struct kprobe *p)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_disarm_kprobe);
161*4882a593Smuzhiyun
arch_remove_kprobe(struct kprobe * p)162*4882a593Smuzhiyun void arch_remove_kprobe(struct kprobe *p)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun if (p->ainsn.insn) {
165*4882a593Smuzhiyun free_insn_slot(p->ainsn.insn, 0);
166*4882a593Smuzhiyun p->ainsn.insn = NULL;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_remove_kprobe);
170*4882a593Smuzhiyun
prepare_singlestep(struct kprobe * p,struct pt_regs * regs)171*4882a593Smuzhiyun static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun enable_single_step(regs);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * On powerpc we should single step on the original
177*4882a593Smuzhiyun * instruction even if the probed insn is a trap
178*4882a593Smuzhiyun * variant as values in regs could play a part in
179*4882a593Smuzhiyun * if the trap is taken or not
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun regs->nip = (unsigned long)p->ainsn.insn;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
save_previous_kprobe(struct kprobe_ctlblk * kcb)184*4882a593Smuzhiyun static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun kcb->prev_kprobe.kp = kprobe_running();
187*4882a593Smuzhiyun kcb->prev_kprobe.status = kcb->kprobe_status;
188*4882a593Smuzhiyun kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
restore_previous_kprobe(struct kprobe_ctlblk * kcb)191*4882a593Smuzhiyun static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
194*4882a593Smuzhiyun kcb->kprobe_status = kcb->prev_kprobe.status;
195*4882a593Smuzhiyun kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
set_current_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)198*4882a593Smuzhiyun static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
199*4882a593Smuzhiyun struct kprobe_ctlblk *kcb)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun __this_cpu_write(current_kprobe, p);
202*4882a593Smuzhiyun kcb->kprobe_saved_msr = regs->msr;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
arch_kprobe_on_func_entry(unsigned long offset)205*4882a593Smuzhiyun bool arch_kprobe_on_func_entry(unsigned long offset)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun #ifdef PPC64_ELF_ABI_v2
208*4882a593Smuzhiyun #ifdef CONFIG_KPROBES_ON_FTRACE
209*4882a593Smuzhiyun return offset <= 16;
210*4882a593Smuzhiyun #else
211*4882a593Smuzhiyun return offset <= 8;
212*4882a593Smuzhiyun #endif
213*4882a593Smuzhiyun #else
214*4882a593Smuzhiyun return !offset;
215*4882a593Smuzhiyun #endif
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)218*4882a593Smuzhiyun void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun ri->ret_addr = (kprobe_opcode_t *)regs->link;
221*4882a593Smuzhiyun ri->fp = NULL;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* Replace the return addr with trampoline addr */
224*4882a593Smuzhiyun regs->link = (unsigned long)kretprobe_trampoline;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_prepare_kretprobe);
227*4882a593Smuzhiyun
try_to_emulate(struct kprobe * p,struct pt_regs * regs)228*4882a593Smuzhiyun static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun int ret;
231*4882a593Smuzhiyun struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* regs->nip is also adjusted if emulate_step returns 1 */
234*4882a593Smuzhiyun ret = emulate_step(regs, insn);
235*4882a593Smuzhiyun if (ret > 0) {
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * Once this instruction has been boosted
238*4882a593Smuzhiyun * successfully, set the boostable flag
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun if (unlikely(p->ainsn.boostable == 0))
241*4882a593Smuzhiyun p->ainsn.boostable = 1;
242*4882a593Smuzhiyun } else if (ret < 0) {
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
245*4882a593Smuzhiyun * So, we should never get here... but, its still
246*4882a593Smuzhiyun * good to catch them, just in case...
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun printk("Can't step on instruction %s\n", ppc_inst_as_str(insn));
249*4882a593Smuzhiyun BUG();
250*4882a593Smuzhiyun } else {
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * If we haven't previously emulated this instruction, then it
253*4882a593Smuzhiyun * can't be boosted. Note it down so we don't try to do so again.
254*4882a593Smuzhiyun *
255*4882a593Smuzhiyun * If, however, we had emulated this instruction in the past,
256*4882a593Smuzhiyun * then this is just an error with the current run (for
257*4882a593Smuzhiyun * instance, exceptions due to a load/store). We return 0 so
258*4882a593Smuzhiyun * that this is now single-stepped, but continue to try
259*4882a593Smuzhiyun * emulating it in subsequent probe hits.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun if (unlikely(p->ainsn.boostable != 1))
262*4882a593Smuzhiyun p->ainsn.boostable = -1;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun return ret;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun NOKPROBE_SYMBOL(try_to_emulate);
268*4882a593Smuzhiyun
kprobe_handler(struct pt_regs * regs)269*4882a593Smuzhiyun int kprobe_handler(struct pt_regs *regs)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun struct kprobe *p;
272*4882a593Smuzhiyun int ret = 0;
273*4882a593Smuzhiyun unsigned int *addr = (unsigned int *)regs->nip;
274*4882a593Smuzhiyun struct kprobe_ctlblk *kcb;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (user_mode(regs))
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_BOOKE) &&
280*4882a593Smuzhiyun (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * We don't want to be preempted for the entire
285*4882a593Smuzhiyun * duration of kprobe processing
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun preempt_disable();
288*4882a593Smuzhiyun kcb = get_kprobe_ctlblk();
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun p = get_kprobe(addr);
291*4882a593Smuzhiyun if (!p) {
292*4882a593Smuzhiyun unsigned int instr;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (get_kernel_nofault(instr, addr))
295*4882a593Smuzhiyun goto no_kprobe;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (instr != BREAKPOINT_INSTRUCTION) {
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * PowerPC has multiple variants of the "trap"
300*4882a593Smuzhiyun * instruction. If the current instruction is a
301*4882a593Smuzhiyun * trap variant, it could belong to someone else
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun if (is_trap(instr))
304*4882a593Smuzhiyun goto no_kprobe;
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * The breakpoint instruction was removed right
307*4882a593Smuzhiyun * after we hit it. Another cpu has removed
308*4882a593Smuzhiyun * either a probepoint or a debugger breakpoint
309*4882a593Smuzhiyun * at this address. In either case, no further
310*4882a593Smuzhiyun * handling of this interrupt is appropriate.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun ret = 1;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun /* Not one of ours: let kernel handle it */
315*4882a593Smuzhiyun goto no_kprobe;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* Check we're not actually recursing */
319*4882a593Smuzhiyun if (kprobe_running()) {
320*4882a593Smuzhiyun kprobe_opcode_t insn = *p->ainsn.insn;
321*4882a593Smuzhiyun if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
322*4882a593Smuzhiyun /* Turn off 'trace' bits */
323*4882a593Smuzhiyun regs->msr &= ~MSR_SINGLESTEP;
324*4882a593Smuzhiyun regs->msr |= kcb->kprobe_saved_msr;
325*4882a593Smuzhiyun goto no_kprobe;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun * We have reentered the kprobe_handler(), since another probe
330*4882a593Smuzhiyun * was hit while within the handler. We here save the original
331*4882a593Smuzhiyun * kprobes variables and just single step on the instruction of
332*4882a593Smuzhiyun * the new probe without calling any user handlers.
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun save_previous_kprobe(kcb);
335*4882a593Smuzhiyun set_current_kprobe(p, regs, kcb);
336*4882a593Smuzhiyun kprobes_inc_nmissed_count(p);
337*4882a593Smuzhiyun kcb->kprobe_status = KPROBE_REENTER;
338*4882a593Smuzhiyun if (p->ainsn.boostable >= 0) {
339*4882a593Smuzhiyun ret = try_to_emulate(p, regs);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (ret > 0) {
342*4882a593Smuzhiyun restore_previous_kprobe(kcb);
343*4882a593Smuzhiyun preempt_enable_no_resched();
344*4882a593Smuzhiyun return 1;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun prepare_singlestep(p, regs);
348*4882a593Smuzhiyun return 1;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun kcb->kprobe_status = KPROBE_HIT_ACTIVE;
352*4882a593Smuzhiyun set_current_kprobe(p, regs, kcb);
353*4882a593Smuzhiyun if (p->pre_handler && p->pre_handler(p, regs)) {
354*4882a593Smuzhiyun /* handler changed execution path, so skip ss setup */
355*4882a593Smuzhiyun reset_current_kprobe();
356*4882a593Smuzhiyun preempt_enable_no_resched();
357*4882a593Smuzhiyun return 1;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (p->ainsn.boostable >= 0) {
361*4882a593Smuzhiyun ret = try_to_emulate(p, regs);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (ret > 0) {
364*4882a593Smuzhiyun if (p->post_handler)
365*4882a593Smuzhiyun p->post_handler(p, regs, 0);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun kcb->kprobe_status = KPROBE_HIT_SSDONE;
368*4882a593Smuzhiyun reset_current_kprobe();
369*4882a593Smuzhiyun preempt_enable_no_resched();
370*4882a593Smuzhiyun return 1;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun prepare_singlestep(p, regs);
374*4882a593Smuzhiyun kcb->kprobe_status = KPROBE_HIT_SS;
375*4882a593Smuzhiyun return 1;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun no_kprobe:
378*4882a593Smuzhiyun preempt_enable_no_resched();
379*4882a593Smuzhiyun return ret;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun NOKPROBE_SYMBOL(kprobe_handler);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun * Function return probe trampoline:
385*4882a593Smuzhiyun * - init_kprobes() establishes a probepoint here
386*4882a593Smuzhiyun * - When the probed function returns, this probe
387*4882a593Smuzhiyun * causes the handlers to fire
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun asm(".global kretprobe_trampoline\n"
390*4882a593Smuzhiyun ".type kretprobe_trampoline, @function\n"
391*4882a593Smuzhiyun "kretprobe_trampoline:\n"
392*4882a593Smuzhiyun "nop\n"
393*4882a593Smuzhiyun "blr\n"
394*4882a593Smuzhiyun ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /*
397*4882a593Smuzhiyun * Called when the probe at kretprobe trampoline is hit
398*4882a593Smuzhiyun */
trampoline_probe_handler(struct kprobe * p,struct pt_regs * regs)399*4882a593Smuzhiyun static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun unsigned long orig_ret_address;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun * We get here through one of two paths:
406*4882a593Smuzhiyun * 1. by taking a trap -> kprobe_handler() -> here
407*4882a593Smuzhiyun * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
408*4882a593Smuzhiyun *
409*4882a593Smuzhiyun * When going back through (1), we need regs->nip to be setup properly
410*4882a593Smuzhiyun * as it is used to determine the return address from the trap.
411*4882a593Smuzhiyun * For (2), since nip is not honoured with optprobes, we instead setup
412*4882a593Smuzhiyun * the link register properly so that the subsequent 'blr' in
413*4882a593Smuzhiyun * kretprobe_trampoline jumps back to the right instruction.
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * For nip, we should set the address to the previous instruction since
416*4882a593Smuzhiyun * we end up emulating it in kprobe_handler(), which increments the nip
417*4882a593Smuzhiyun * again.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun regs->nip = orig_ret_address - 4;
420*4882a593Smuzhiyun regs->link = orig_ret_address;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun NOKPROBE_SYMBOL(trampoline_probe_handler);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun * Called after single-stepping. p->addr is the address of the
428*4882a593Smuzhiyun * instruction whose first byte has been replaced by the "breakpoint"
429*4882a593Smuzhiyun * instruction. To avoid the SMP problems that can occur when we
430*4882a593Smuzhiyun * temporarily put back the original opcode to single-step, we
431*4882a593Smuzhiyun * single-stepped a copy of the instruction. The address of this
432*4882a593Smuzhiyun * copy is p->ainsn.insn.
433*4882a593Smuzhiyun */
kprobe_post_handler(struct pt_regs * regs)434*4882a593Smuzhiyun int kprobe_post_handler(struct pt_regs *regs)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun int len;
437*4882a593Smuzhiyun struct kprobe *cur = kprobe_running();
438*4882a593Smuzhiyun struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!cur || user_mode(regs))
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
444*4882a593Smuzhiyun /* make sure we got here for instruction we have a kprobe on */
445*4882a593Smuzhiyun if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
446*4882a593Smuzhiyun return 0;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
449*4882a593Smuzhiyun kcb->kprobe_status = KPROBE_HIT_SSDONE;
450*4882a593Smuzhiyun cur->post_handler(cur, regs, 0);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Adjust nip to after the single-stepped instruction */
454*4882a593Smuzhiyun regs->nip = (unsigned long)cur->addr + len;
455*4882a593Smuzhiyun regs->msr |= kcb->kprobe_saved_msr;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /*Restore back the original saved kprobes variables and continue. */
458*4882a593Smuzhiyun if (kcb->kprobe_status == KPROBE_REENTER) {
459*4882a593Smuzhiyun restore_previous_kprobe(kcb);
460*4882a593Smuzhiyun goto out;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun reset_current_kprobe();
463*4882a593Smuzhiyun out:
464*4882a593Smuzhiyun preempt_enable_no_resched();
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * if somebody else is singlestepping across a probe point, msr
468*4882a593Smuzhiyun * will have DE/SE set, in which case, continue the remaining processing
469*4882a593Smuzhiyun * of do_debug, as if this is not a probe hit.
470*4882a593Smuzhiyun */
471*4882a593Smuzhiyun if (regs->msr & MSR_SINGLESTEP)
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return 1;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun NOKPROBE_SYMBOL(kprobe_post_handler);
477*4882a593Smuzhiyun
kprobe_fault_handler(struct pt_regs * regs,int trapnr)478*4882a593Smuzhiyun int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun struct kprobe *cur = kprobe_running();
481*4882a593Smuzhiyun struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
482*4882a593Smuzhiyun const struct exception_table_entry *entry;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun switch(kcb->kprobe_status) {
485*4882a593Smuzhiyun case KPROBE_HIT_SS:
486*4882a593Smuzhiyun case KPROBE_REENTER:
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun * We are here because the instruction being single
489*4882a593Smuzhiyun * stepped caused a page fault. We reset the current
490*4882a593Smuzhiyun * kprobe and the nip points back to the probe address
491*4882a593Smuzhiyun * and allow the page fault handler to continue as a
492*4882a593Smuzhiyun * normal page fault.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun regs->nip = (unsigned long)cur->addr;
495*4882a593Smuzhiyun regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
496*4882a593Smuzhiyun regs->msr |= kcb->kprobe_saved_msr;
497*4882a593Smuzhiyun if (kcb->kprobe_status == KPROBE_REENTER)
498*4882a593Smuzhiyun restore_previous_kprobe(kcb);
499*4882a593Smuzhiyun else
500*4882a593Smuzhiyun reset_current_kprobe();
501*4882a593Smuzhiyun preempt_enable_no_resched();
502*4882a593Smuzhiyun break;
503*4882a593Smuzhiyun case KPROBE_HIT_ACTIVE:
504*4882a593Smuzhiyun case KPROBE_HIT_SSDONE:
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * We increment the nmissed count for accounting,
507*4882a593Smuzhiyun * we can also use npre/npostfault count for accounting
508*4882a593Smuzhiyun * these specific fault cases.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun kprobes_inc_nmissed_count(cur);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * We come here because instructions in the pre/post
514*4882a593Smuzhiyun * handler caused the page_fault, this could happen
515*4882a593Smuzhiyun * if handler tries to access user space by
516*4882a593Smuzhiyun * copy_from_user(), get_user() etc. Let the
517*4882a593Smuzhiyun * user-specified handler try to fix it first.
518*4882a593Smuzhiyun */
519*4882a593Smuzhiyun if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
520*4882a593Smuzhiyun return 1;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /*
523*4882a593Smuzhiyun * In case the user-specified fault handler returned
524*4882a593Smuzhiyun * zero, try to fix up.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun if ((entry = search_exception_tables(regs->nip)) != NULL) {
527*4882a593Smuzhiyun regs->nip = extable_fixup(entry);
528*4882a593Smuzhiyun return 1;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /*
532*4882a593Smuzhiyun * fixup_exception() could not handle it,
533*4882a593Smuzhiyun * Let do_page_fault() fix it.
534*4882a593Smuzhiyun */
535*4882a593Smuzhiyun break;
536*4882a593Smuzhiyun default:
537*4882a593Smuzhiyun break;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun return 0;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun NOKPROBE_SYMBOL(kprobe_fault_handler);
542*4882a593Smuzhiyun
arch_deref_entry_point(void * entry)543*4882a593Smuzhiyun unsigned long arch_deref_entry_point(void *entry)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun #ifdef PPC64_ELF_ABI_v1
546*4882a593Smuzhiyun if (!kernel_text_address((unsigned long)entry))
547*4882a593Smuzhiyun return ppc_global_function_entry(entry);
548*4882a593Smuzhiyun else
549*4882a593Smuzhiyun #endif
550*4882a593Smuzhiyun return (unsigned long)entry;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_deref_entry_point);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun static struct kprobe trampoline_p = {
555*4882a593Smuzhiyun .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
556*4882a593Smuzhiyun .pre_handler = trampoline_probe_handler
557*4882a593Smuzhiyun };
558*4882a593Smuzhiyun
arch_init_kprobes(void)559*4882a593Smuzhiyun int __init arch_init_kprobes(void)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun return register_kprobe(&trampoline_p);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
arch_trampoline_kprobe(struct kprobe * p)564*4882a593Smuzhiyun int arch_trampoline_kprobe(struct kprobe *p)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
567*4882a593Smuzhiyun return 1;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return 0;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun NOKPROBE_SYMBOL(arch_trampoline_kprobe);
572