1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kernel Probes Jump Optimization (Optprobes)
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) IBM Corporation, 2002, 2004
6*4882a593Smuzhiyun * Copyright (C) Hitachi Ltd., 2012
7*4882a593Smuzhiyun * Copyright (C) Huawei Inc., 2014
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kprobes.h>
11*4882a593Smuzhiyun #include <linux/jump_label.h>
12*4882a593Smuzhiyun #include <asm/kprobes.h>
13*4882a593Smuzhiyun #include <asm/cacheflush.h>
14*4882a593Smuzhiyun /* for arm_gen_branch */
15*4882a593Smuzhiyun #include <asm/insn.h>
16*4882a593Smuzhiyun /* for patch_text */
17*4882a593Smuzhiyun #include <asm/patch.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "core.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * See register_usage_flags. If the probed instruction doesn't use PC,
23*4882a593Smuzhiyun * we can copy it into template and have it executed directly without
24*4882a593Smuzhiyun * simulation or emulation.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define ARM_REG_PC 15
27*4882a593Smuzhiyun #define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m)))
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * NOTE: the first sub and add instruction will be modified according
31*4882a593Smuzhiyun * to the stack cost of the instruction.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun asm (
34*4882a593Smuzhiyun ".global optprobe_template_entry\n"
35*4882a593Smuzhiyun "optprobe_template_entry:\n"
36*4882a593Smuzhiyun ".global optprobe_template_sub_sp\n"
37*4882a593Smuzhiyun "optprobe_template_sub_sp:"
38*4882a593Smuzhiyun " sub sp, sp, #0xff\n"
39*4882a593Smuzhiyun " stmia sp, {r0 - r14} \n"
40*4882a593Smuzhiyun ".global optprobe_template_add_sp\n"
41*4882a593Smuzhiyun "optprobe_template_add_sp:"
42*4882a593Smuzhiyun " add r3, sp, #0xff\n"
43*4882a593Smuzhiyun " str r3, [sp, #52]\n"
44*4882a593Smuzhiyun " mrs r4, cpsr\n"
45*4882a593Smuzhiyun " str r4, [sp, #64]\n"
46*4882a593Smuzhiyun " mov r1, sp\n"
47*4882a593Smuzhiyun " ldr r0, 1f\n"
48*4882a593Smuzhiyun " ldr r2, 2f\n"
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * AEABI requires an 8-bytes alignment stack. If
51*4882a593Smuzhiyun * SP % 8 != 0 (SP % 4 == 0 should be ensured),
52*4882a593Smuzhiyun * alloc more bytes here.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun " and r4, sp, #4\n"
55*4882a593Smuzhiyun " sub sp, sp, r4\n"
56*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 5
57*4882a593Smuzhiyun " blx r2\n"
58*4882a593Smuzhiyun #else
59*4882a593Smuzhiyun " mov lr, pc\n"
60*4882a593Smuzhiyun " mov pc, r2\n"
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun " add sp, sp, r4\n"
63*4882a593Smuzhiyun " ldr r1, [sp, #64]\n"
64*4882a593Smuzhiyun " tst r1, #"__stringify(PSR_T_BIT)"\n"
65*4882a593Smuzhiyun " ldrne r2, [sp, #60]\n"
66*4882a593Smuzhiyun " orrne r2, #1\n"
67*4882a593Smuzhiyun " strne r2, [sp, #60] @ set bit0 of PC for thumb\n"
68*4882a593Smuzhiyun " msr cpsr_cxsf, r1\n"
69*4882a593Smuzhiyun ".global optprobe_template_restore_begin\n"
70*4882a593Smuzhiyun "optprobe_template_restore_begin:\n"
71*4882a593Smuzhiyun " ldmia sp, {r0 - r15}\n"
72*4882a593Smuzhiyun ".global optprobe_template_restore_orig_insn\n"
73*4882a593Smuzhiyun "optprobe_template_restore_orig_insn:\n"
74*4882a593Smuzhiyun " nop\n"
75*4882a593Smuzhiyun ".global optprobe_template_restore_end\n"
76*4882a593Smuzhiyun "optprobe_template_restore_end:\n"
77*4882a593Smuzhiyun " nop\n"
78*4882a593Smuzhiyun ".global optprobe_template_val\n"
79*4882a593Smuzhiyun "optprobe_template_val:\n"
80*4882a593Smuzhiyun "1: .long 0\n"
81*4882a593Smuzhiyun ".global optprobe_template_call\n"
82*4882a593Smuzhiyun "optprobe_template_call:\n"
83*4882a593Smuzhiyun "2: .long 0\n"
84*4882a593Smuzhiyun ".global optprobe_template_end\n"
85*4882a593Smuzhiyun "optprobe_template_end:\n");
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define TMPL_VAL_IDX \
88*4882a593Smuzhiyun ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
89*4882a593Smuzhiyun #define TMPL_CALL_IDX \
90*4882a593Smuzhiyun ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
91*4882a593Smuzhiyun #define TMPL_END_IDX \
92*4882a593Smuzhiyun ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
93*4882a593Smuzhiyun #define TMPL_ADD_SP \
94*4882a593Smuzhiyun ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
95*4882a593Smuzhiyun #define TMPL_SUB_SP \
96*4882a593Smuzhiyun ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
97*4882a593Smuzhiyun #define TMPL_RESTORE_BEGIN \
98*4882a593Smuzhiyun ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
99*4882a593Smuzhiyun #define TMPL_RESTORE_ORIGN_INSN \
100*4882a593Smuzhiyun ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
101*4882a593Smuzhiyun #define TMPL_RESTORE_END \
102*4882a593Smuzhiyun ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * ARM can always optimize an instruction when using ARM ISA, except
106*4882a593Smuzhiyun * instructions like 'str r0, [sp, r1]' which store to stack and unable
107*4882a593Smuzhiyun * to determine stack space consumption statically.
108*4882a593Smuzhiyun */
arch_prepared_optinsn(struct arch_optimized_insn * optinsn)109*4882a593Smuzhiyun int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun return optinsn->insn != NULL;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * In ARM ISA, kprobe opt always replace one instruction (4 bytes
116*4882a593Smuzhiyun * aligned and 4 bytes long). It is impossible to encounter another
117*4882a593Smuzhiyun * kprobe in the address range. So always return 0.
118*4882a593Smuzhiyun */
arch_check_optimized_kprobe(struct optimized_kprobe * op)119*4882a593Smuzhiyun int arch_check_optimized_kprobe(struct optimized_kprobe *op)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Caller must ensure addr & 3 == 0 */
can_optimize(struct kprobe * kp)125*4882a593Smuzhiyun static int can_optimize(struct kprobe *kp)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun if (kp->ainsn.stack_space < 0)
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * 255 is the biggest imm can be used in 'sub r0, r0, #<imm>'.
131*4882a593Smuzhiyun * Number larger than 255 needs special encoding.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs))
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun return 1;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Free optimized instruction slot */
139*4882a593Smuzhiyun static void
__arch_remove_optimized_kprobe(struct optimized_kprobe * op,int dirty)140*4882a593Smuzhiyun __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun if (op->optinsn.insn) {
143*4882a593Smuzhiyun free_optinsn_slot(op->optinsn.insn, dirty);
144*4882a593Smuzhiyun op->optinsn.insn = NULL;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun extern void kprobe_handler(struct pt_regs *regs);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static void
optimized_callback(struct optimized_kprobe * op,struct pt_regs * regs)151*4882a593Smuzhiyun optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun unsigned long flags;
154*4882a593Smuzhiyun struct kprobe *p = &op->kp;
155*4882a593Smuzhiyun struct kprobe_ctlblk *kcb;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* Save skipped registers */
158*4882a593Smuzhiyun regs->ARM_pc = (unsigned long)op->kp.addr;
159*4882a593Smuzhiyun regs->ARM_ORIG_r0 = ~0UL;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun local_irq_save(flags);
162*4882a593Smuzhiyun kcb = get_kprobe_ctlblk();
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (kprobe_running()) {
165*4882a593Smuzhiyun kprobes_inc_nmissed_count(&op->kp);
166*4882a593Smuzhiyun } else {
167*4882a593Smuzhiyun __this_cpu_write(current_kprobe, &op->kp);
168*4882a593Smuzhiyun kcb->kprobe_status = KPROBE_HIT_ACTIVE;
169*4882a593Smuzhiyun opt_pre_handler(&op->kp, regs);
170*4882a593Smuzhiyun __this_cpu_write(current_kprobe, NULL);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * We singlestep the replaced instruction only when it can't be
175*4882a593Smuzhiyun * executed directly during restore.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun if (!p->ainsn.kprobe_direct_exec)
178*4882a593Smuzhiyun op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun local_irq_restore(flags);
181*4882a593Smuzhiyun }
NOKPROBE_SYMBOL(optimized_callback)182*4882a593Smuzhiyun NOKPROBE_SYMBOL(optimized_callback)
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun kprobe_opcode_t *code;
187*4882a593Smuzhiyun unsigned long rel_chk;
188*4882a593Smuzhiyun unsigned long val;
189*4882a593Smuzhiyun unsigned long stack_protect = sizeof(struct pt_regs);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!can_optimize(orig))
192*4882a593Smuzhiyun return -EILSEQ;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun code = get_optinsn_slot();
195*4882a593Smuzhiyun if (!code)
196*4882a593Smuzhiyun return -ENOMEM;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Verify if the address gap is in 32MiB range, because this uses
200*4882a593Smuzhiyun * a relative jump.
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * kprobe opt use a 'b' instruction to branch to optinsn.insn.
203*4882a593Smuzhiyun * According to ARM manual, branch instruction is:
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * 31 28 27 24 23 0
206*4882a593Smuzhiyun * +------+---+---+---+---+----------------+
207*4882a593Smuzhiyun * | cond | 1 | 0 | 1 | 0 | imm24 |
208*4882a593Smuzhiyun * +------+---+---+---+---+----------------+
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * imm24 is a signed 24 bits integer. The real branch offset is computed
211*4882a593Smuzhiyun * by: imm32 = SignExtend(imm24:'00', 32);
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * So the maximum forward branch should be:
214*4882a593Smuzhiyun * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
215*4882a593Smuzhiyun * The maximum backword branch should be:
216*4882a593Smuzhiyun * (0xff800000 << 2) = 0xfe000000 = -0x2000000
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * We can simply check (rel & 0xfe000003):
219*4882a593Smuzhiyun * if rel is positive, (rel & 0xfe000000) shoule be 0
220*4882a593Smuzhiyun * if rel is negitive, (rel & 0xfe000000) should be 0xfe000000
221*4882a593Smuzhiyun * the last '3' is used for alignment checking.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun rel_chk = (unsigned long)((long)code -
224*4882a593Smuzhiyun (long)orig->addr + 8) & 0xfe000003;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if ((rel_chk != 0) && (rel_chk != 0xfe000000)) {
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Different from x86, we free code buf directly instead of
229*4882a593Smuzhiyun * calling __arch_remove_optimized_kprobe() because
230*4882a593Smuzhiyun * we have not fill any field in op.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun free_optinsn_slot(code, 0);
233*4882a593Smuzhiyun return -ERANGE;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Copy arch-dep-instance from template. */
237*4882a593Smuzhiyun memcpy(code, (unsigned long *)optprobe_template_entry,
238*4882a593Smuzhiyun TMPL_END_IDX * sizeof(kprobe_opcode_t));
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Adjust buffer according to instruction. */
241*4882a593Smuzhiyun BUG_ON(orig->ainsn.stack_space < 0);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun stack_protect += orig->ainsn.stack_space;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Should have been filtered by can_optimize(). */
246*4882a593Smuzhiyun BUG_ON(stack_protect > 255);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Create a 'sub sp, sp, #<stack_protect>' */
249*4882a593Smuzhiyun code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect);
250*4882a593Smuzhiyun /* Create a 'add r3, sp, #<stack_protect>' */
251*4882a593Smuzhiyun code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Set probe information */
254*4882a593Smuzhiyun val = (unsigned long)op;
255*4882a593Smuzhiyun code[TMPL_VAL_IDX] = val;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Set probe function call */
258*4882a593Smuzhiyun val = (unsigned long)optimized_callback;
259*4882a593Smuzhiyun code[TMPL_CALL_IDX] = val;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* If possible, copy insn and have it executed during restore */
262*4882a593Smuzhiyun orig->ainsn.kprobe_direct_exec = false;
263*4882a593Smuzhiyun if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) {
264*4882a593Smuzhiyun kprobe_opcode_t final_branch = arm_gen_branch(
265*4882a593Smuzhiyun (unsigned long)(&code[TMPL_RESTORE_END]),
266*4882a593Smuzhiyun (unsigned long)(op->kp.addr) + 4);
267*4882a593Smuzhiyun if (final_branch != 0) {
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * Replace original 'ldmia sp, {r0 - r15}' with
270*4882a593Smuzhiyun * 'ldmia {r0 - r14}', restore all registers except pc.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* The original probed instruction */
275*4882a593Smuzhiyun code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Jump back to next instruction */
278*4882a593Smuzhiyun code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch);
279*4882a593Smuzhiyun orig->ainsn.kprobe_direct_exec = true;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun flush_icache_range((unsigned long)code,
284*4882a593Smuzhiyun (unsigned long)(&code[TMPL_END_IDX]));
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* Set op->optinsn.insn means prepared. */
287*4882a593Smuzhiyun op->optinsn.insn = code;
288*4882a593Smuzhiyun return 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
arch_optimize_kprobes(struct list_head * oplist)291*4882a593Smuzhiyun void __kprobes arch_optimize_kprobes(struct list_head *oplist)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct optimized_kprobe *op, *tmp;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun list_for_each_entry_safe(op, tmp, oplist, list) {
296*4882a593Smuzhiyun unsigned long insn;
297*4882a593Smuzhiyun WARN_ON(kprobe_disabled(&op->kp));
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * Backup instructions which will be replaced
301*4882a593Smuzhiyun * by jump address
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun memcpy(op->optinsn.copied_insn, op->kp.addr,
304*4882a593Smuzhiyun RELATIVEJUMP_SIZE);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun insn = arm_gen_branch((unsigned long)op->kp.addr,
307*4882a593Smuzhiyun (unsigned long)op->optinsn.insn);
308*4882a593Smuzhiyun BUG_ON(insn == 0);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun * Make it a conditional branch if replaced insn
312*4882a593Smuzhiyun * is consitional
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun insn = (__mem_to_opcode_arm(
315*4882a593Smuzhiyun op->optinsn.copied_insn[0]) & 0xf0000000) |
316*4882a593Smuzhiyun (insn & 0x0fffffff);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Similar to __arch_disarm_kprobe, operations which
320*4882a593Smuzhiyun * removing breakpoints must be wrapped by stop_machine
321*4882a593Smuzhiyun * to avoid racing.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun kprobes_remove_breakpoint(op->kp.addr, insn);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun list_del_init(&op->list);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
arch_unoptimize_kprobe(struct optimized_kprobe * op)329*4882a593Smuzhiyun void arch_unoptimize_kprobe(struct optimized_kprobe *op)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun arch_arm_kprobe(&op->kp);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * Recover original instructions and breakpoints from relative jumps.
336*4882a593Smuzhiyun * Caller must call with locking kprobe_mutex.
337*4882a593Smuzhiyun */
arch_unoptimize_kprobes(struct list_head * oplist,struct list_head * done_list)338*4882a593Smuzhiyun void arch_unoptimize_kprobes(struct list_head *oplist,
339*4882a593Smuzhiyun struct list_head *done_list)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct optimized_kprobe *op, *tmp;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun list_for_each_entry_safe(op, tmp, oplist, list) {
344*4882a593Smuzhiyun arch_unoptimize_kprobe(op);
345*4882a593Smuzhiyun list_move(&op->list, done_list);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
arch_within_optimized_kprobe(struct optimized_kprobe * op,unsigned long addr)349*4882a593Smuzhiyun int arch_within_optimized_kprobe(struct optimized_kprobe *op,
350*4882a593Smuzhiyun unsigned long addr)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun return ((unsigned long)op->kp.addr <= addr &&
353*4882a593Smuzhiyun (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
arch_remove_optimized_kprobe(struct optimized_kprobe * op)356*4882a593Smuzhiyun void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun __arch_remove_optimized_kprobe(op, 1);
359*4882a593Smuzhiyun }
360