1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_PTRACE_H
3*4882a593Smuzhiyun #define _LINUX_PTRACE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/compiler.h> /* For unlikely. */
6*4882a593Smuzhiyun #include <linux/sched.h> /* For struct task_struct. */
7*4882a593Smuzhiyun #include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */
8*4882a593Smuzhiyun #include <linux/err.h> /* for IS_ERR_VALUE */
9*4882a593Smuzhiyun #include <linux/bug.h> /* For BUG_ON. */
10*4882a593Smuzhiyun #include <linux/pid_namespace.h> /* For task_active_pid_ns. */
11*4882a593Smuzhiyun #include <uapi/linux/ptrace.h>
12*4882a593Smuzhiyun #include <linux/seccomp.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
15*4882a593Smuzhiyun struct syscall_info {
16*4882a593Smuzhiyun __u64 sp;
17*4882a593Smuzhiyun struct seccomp_data data;
18*4882a593Smuzhiyun };
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
21*4882a593Smuzhiyun void *buf, int len, unsigned int gup_flags);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Ptrace flags
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * The owner ship rules for task->ptrace which holds the ptrace
27*4882a593Smuzhiyun * flags is simple. When a task is running it owns it's task->ptrace
28*4882a593Smuzhiyun * flags. When the a task is stopped the ptracer owns task->ptrace.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
32*4882a593Smuzhiyun #define PT_PTRACED 0x00000001
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define PT_OPT_FLAG_SHIFT 3
35*4882a593Smuzhiyun /* PT_TRACE_* event enable flags */
36*4882a593Smuzhiyun #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
37*4882a593Smuzhiyun #define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
38*4882a593Smuzhiyun #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
39*4882a593Smuzhiyun #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
40*4882a593Smuzhiyun #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
41*4882a593Smuzhiyun #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
42*4882a593Smuzhiyun #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
43*4882a593Smuzhiyun #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
44*4882a593Smuzhiyun #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
47*4882a593Smuzhiyun #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun extern long arch_ptrace(struct task_struct *child, long request,
50*4882a593Smuzhiyun unsigned long addr, unsigned long data);
51*4882a593Smuzhiyun extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
52*4882a593Smuzhiyun extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
53*4882a593Smuzhiyun extern void ptrace_disable(struct task_struct *);
54*4882a593Smuzhiyun extern int ptrace_request(struct task_struct *child, long request,
55*4882a593Smuzhiyun unsigned long addr, unsigned long data);
56*4882a593Smuzhiyun extern void ptrace_notify(int exit_code);
57*4882a593Smuzhiyun extern void __ptrace_link(struct task_struct *child,
58*4882a593Smuzhiyun struct task_struct *new_parent,
59*4882a593Smuzhiyun const struct cred *ptracer_cred);
60*4882a593Smuzhiyun extern void __ptrace_unlink(struct task_struct *child);
61*4882a593Smuzhiyun extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
62*4882a593Smuzhiyun #define PTRACE_MODE_READ 0x01
63*4882a593Smuzhiyun #define PTRACE_MODE_ATTACH 0x02
64*4882a593Smuzhiyun #define PTRACE_MODE_NOAUDIT 0x04
65*4882a593Smuzhiyun #define PTRACE_MODE_FSCREDS 0x08
66*4882a593Smuzhiyun #define PTRACE_MODE_REALCREDS 0x10
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
69*4882a593Smuzhiyun #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
70*4882a593Smuzhiyun #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
71*4882a593Smuzhiyun #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
72*4882a593Smuzhiyun #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * ptrace_may_access - check whether the caller is permitted to access
76*4882a593Smuzhiyun * a target task.
77*4882a593Smuzhiyun * @task: target task
78*4882a593Smuzhiyun * @mode: selects type of access and caller credentials
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * Returns true on success, false on denial.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
83*4882a593Smuzhiyun * be set in @mode to specify whether the access was requested through
84*4882a593Smuzhiyun * a filesystem syscall (should use effective capabilities and fsuid
85*4882a593Smuzhiyun * of the caller) or through an explicit syscall such as
86*4882a593Smuzhiyun * process_vm_writev or ptrace (and should use the real credentials).
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
89*4882a593Smuzhiyun
ptrace_reparented(struct task_struct * child)90*4882a593Smuzhiyun static inline int ptrace_reparented(struct task_struct *child)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return !same_thread_group(child->real_parent, child->parent);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
ptrace_unlink(struct task_struct * child)95*4882a593Smuzhiyun static inline void ptrace_unlink(struct task_struct *child)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun if (unlikely(child->ptrace))
98*4882a593Smuzhiyun __ptrace_unlink(child);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
102*4882a593Smuzhiyun unsigned long data);
103*4882a593Smuzhiyun int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
104*4882a593Smuzhiyun unsigned long data);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun * ptrace_parent - return the task that is tracing the given task
108*4882a593Smuzhiyun * @task: task to consider
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Returns %NULL if no one is tracing @task, or the &struct task_struct
111*4882a593Smuzhiyun * pointer to its tracer.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Must called under rcu_read_lock(). The pointer returned might be kept
114*4882a593Smuzhiyun * live only by RCU. During exec, this may be called with task_lock() held
115*4882a593Smuzhiyun * on @task, still held from when check_unsafe_exec() was called.
116*4882a593Smuzhiyun */
ptrace_parent(struct task_struct * task)117*4882a593Smuzhiyun static inline struct task_struct *ptrace_parent(struct task_struct *task)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun if (unlikely(task->ptrace))
120*4882a593Smuzhiyun return rcu_dereference(task->parent);
121*4882a593Smuzhiyun return NULL;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * ptrace_event_enabled - test whether a ptrace event is enabled
126*4882a593Smuzhiyun * @task: ptracee of interest
127*4882a593Smuzhiyun * @event: %PTRACE_EVENT_* to test
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * Test whether @event is enabled for ptracee @task.
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * Returns %true if @event is enabled, %false otherwise.
132*4882a593Smuzhiyun */
ptrace_event_enabled(struct task_struct * task,int event)133*4882a593Smuzhiyun static inline bool ptrace_event_enabled(struct task_struct *task, int event)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun return task->ptrace & PT_EVENT_FLAG(event);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun * ptrace_event - possibly stop for a ptrace event notification
140*4882a593Smuzhiyun * @event: %PTRACE_EVENT_* value to report
141*4882a593Smuzhiyun * @message: value for %PTRACE_GETEVENTMSG to return
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * Check whether @event is enabled and, if so, report @event and @message
144*4882a593Smuzhiyun * to the ptrace parent.
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * Called without locks.
147*4882a593Smuzhiyun */
ptrace_event(int event,unsigned long message)148*4882a593Smuzhiyun static inline void ptrace_event(int event, unsigned long message)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun if (unlikely(ptrace_event_enabled(current, event))) {
151*4882a593Smuzhiyun current->ptrace_message = message;
152*4882a593Smuzhiyun ptrace_notify((event << 8) | SIGTRAP);
153*4882a593Smuzhiyun } else if (event == PTRACE_EVENT_EXEC) {
154*4882a593Smuzhiyun /* legacy EXEC report via SIGTRAP */
155*4882a593Smuzhiyun if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
156*4882a593Smuzhiyun send_sig(SIGTRAP, current, 0);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun * ptrace_event_pid - possibly stop for a ptrace event notification
162*4882a593Smuzhiyun * @event: %PTRACE_EVENT_* value to report
163*4882a593Smuzhiyun * @pid: process identifier for %PTRACE_GETEVENTMSG to return
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * Check whether @event is enabled and, if so, report @event and @pid
166*4882a593Smuzhiyun * to the ptrace parent. @pid is reported as the pid_t seen from the
167*4882a593Smuzhiyun * the ptrace parent's pid namespace.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Called without locks.
170*4882a593Smuzhiyun */
ptrace_event_pid(int event,struct pid * pid)171*4882a593Smuzhiyun static inline void ptrace_event_pid(int event, struct pid *pid)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * FIXME: There's a potential race if a ptracer in a different pid
175*4882a593Smuzhiyun * namespace than parent attaches between computing message below and
176*4882a593Smuzhiyun * when we acquire tasklist_lock in ptrace_stop(). If this happens,
177*4882a593Smuzhiyun * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun unsigned long message = 0;
180*4882a593Smuzhiyun struct pid_namespace *ns;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun rcu_read_lock();
183*4882a593Smuzhiyun ns = task_active_pid_ns(rcu_dereference(current->parent));
184*4882a593Smuzhiyun if (ns)
185*4882a593Smuzhiyun message = pid_nr_ns(pid, ns);
186*4882a593Smuzhiyun rcu_read_unlock();
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun ptrace_event(event, message);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /**
192*4882a593Smuzhiyun * ptrace_init_task - initialize ptrace state for a new child
193*4882a593Smuzhiyun * @child: new child task
194*4882a593Smuzhiyun * @ptrace: true if child should be ptrace'd by parent's tracer
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun * This is called immediately after adding @child to its parent's children
197*4882a593Smuzhiyun * list. @ptrace is false in the normal case, and true to ptrace @child.
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
200*4882a593Smuzhiyun */
ptrace_init_task(struct task_struct * child,bool ptrace)201*4882a593Smuzhiyun static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun INIT_LIST_HEAD(&child->ptrace_entry);
204*4882a593Smuzhiyun INIT_LIST_HEAD(&child->ptraced);
205*4882a593Smuzhiyun child->jobctl = 0;
206*4882a593Smuzhiyun child->ptrace = 0;
207*4882a593Smuzhiyun child->parent = child->real_parent;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (unlikely(ptrace) && current->ptrace) {
210*4882a593Smuzhiyun child->ptrace = current->ptrace;
211*4882a593Smuzhiyun __ptrace_link(child, current->parent, current->ptracer_cred);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (child->ptrace & PT_SEIZED)
214*4882a593Smuzhiyun task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
215*4882a593Smuzhiyun else
216*4882a593Smuzhiyun sigaddset(&child->pending.signal, SIGSTOP);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun else
219*4882a593Smuzhiyun child->ptracer_cred = NULL;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
224*4882a593Smuzhiyun * @task: task in %EXIT_DEAD state
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Called with write_lock(&tasklist_lock) held.
227*4882a593Smuzhiyun */
ptrace_release_task(struct task_struct * task)228*4882a593Smuzhiyun static inline void ptrace_release_task(struct task_struct *task)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun BUG_ON(!list_empty(&task->ptraced));
231*4882a593Smuzhiyun ptrace_unlink(task);
232*4882a593Smuzhiyun BUG_ON(!list_empty(&task->ptrace_entry));
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #ifndef force_successful_syscall_return
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * System call handlers that, upon successful completion, need to return a
238*4882a593Smuzhiyun * negative value should call force_successful_syscall_return() right before
239*4882a593Smuzhiyun * returning. On architectures where the syscall convention provides for a
240*4882a593Smuzhiyun * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
241*4882a593Smuzhiyun * others), this macro can be used to ensure that the error flag will not get
242*4882a593Smuzhiyun * set. On architectures which do not support a separate error flag, the macro
243*4882a593Smuzhiyun * is a no-op and the spurious error condition needs to be filtered out by some
244*4882a593Smuzhiyun * other means (e.g., in user-level, by passing an extra argument to the
245*4882a593Smuzhiyun * syscall handler, or something along those lines).
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun #define force_successful_syscall_return() do { } while (0)
248*4882a593Smuzhiyun #endif
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun #ifndef is_syscall_success
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * On most systems we can tell if a syscall is a success based on if the retval
253*4882a593Smuzhiyun * is an error value. On some systems like ia64 and powerpc they have different
254*4882a593Smuzhiyun * indicators of success/failure and must define their own.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * These do-nothing inlines are used when the arch does not
263*4882a593Smuzhiyun * implement single-step. The kerneldoc comments are here
264*4882a593Smuzhiyun * to document the interface for all arch definitions.
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun #ifndef arch_has_single_step
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * arch_has_single_step - does this CPU support user-mode single-step?
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * If this is defined, then there must be function declarations or
272*4882a593Smuzhiyun * inlines for user_enable_single_step() and user_disable_single_step().
273*4882a593Smuzhiyun * arch_has_single_step() should evaluate to nonzero iff the machine
274*4882a593Smuzhiyun * supports instruction single-step for user mode.
275*4882a593Smuzhiyun * It can be a constant or it can test a CPU feature bit.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun #define arch_has_single_step() (0)
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun * user_enable_single_step - single-step in user-mode task
281*4882a593Smuzhiyun * @task: either current or a task stopped in %TASK_TRACED
282*4882a593Smuzhiyun *
283*4882a593Smuzhiyun * This can only be called when arch_has_single_step() has returned nonzero.
284*4882a593Smuzhiyun * Set @task so that when it returns to user mode, it will trap after the
285*4882a593Smuzhiyun * next single instruction executes. If arch_has_block_step() is defined,
286*4882a593Smuzhiyun * this must clear the effects of user_enable_block_step() too.
287*4882a593Smuzhiyun */
user_enable_single_step(struct task_struct * task)288*4882a593Smuzhiyun static inline void user_enable_single_step(struct task_struct *task)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun BUG(); /* This can never be called. */
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun * user_disable_single_step - cancel user-mode single-step
295*4882a593Smuzhiyun * @task: either current or a task stopped in %TASK_TRACED
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * Clear @task of the effects of user_enable_single_step() and
298*4882a593Smuzhiyun * user_enable_block_step(). This can be called whether or not either
299*4882a593Smuzhiyun * of those was ever called on @task, and even if arch_has_single_step()
300*4882a593Smuzhiyun * returned zero.
301*4882a593Smuzhiyun */
user_disable_single_step(struct task_struct * task)302*4882a593Smuzhiyun static inline void user_disable_single_step(struct task_struct *task)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun #else
306*4882a593Smuzhiyun extern void user_enable_single_step(struct task_struct *);
307*4882a593Smuzhiyun extern void user_disable_single_step(struct task_struct *);
308*4882a593Smuzhiyun #endif /* arch_has_single_step */
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun #ifndef arch_has_block_step
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun * arch_has_block_step - does this CPU support user-mode block-step?
313*4882a593Smuzhiyun *
314*4882a593Smuzhiyun * If this is defined, then there must be a function declaration or inline
315*4882a593Smuzhiyun * for user_enable_block_step(), and arch_has_single_step() must be defined
316*4882a593Smuzhiyun * too. arch_has_block_step() should evaluate to nonzero iff the machine
317*4882a593Smuzhiyun * supports step-until-branch for user mode. It can be a constant or it
318*4882a593Smuzhiyun * can test a CPU feature bit.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun #define arch_has_block_step() (0)
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /**
323*4882a593Smuzhiyun * user_enable_block_step - step until branch in user-mode task
324*4882a593Smuzhiyun * @task: either current or a task stopped in %TASK_TRACED
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * This can only be called when arch_has_block_step() has returned nonzero,
327*4882a593Smuzhiyun * and will never be called when single-instruction stepping is being used.
328*4882a593Smuzhiyun * Set @task so that when it returns to user mode, it will trap after the
329*4882a593Smuzhiyun * next branch or trap taken.
330*4882a593Smuzhiyun */
user_enable_block_step(struct task_struct * task)331*4882a593Smuzhiyun static inline void user_enable_block_step(struct task_struct *task)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun BUG(); /* This can never be called. */
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun #else
336*4882a593Smuzhiyun extern void user_enable_block_step(struct task_struct *);
337*4882a593Smuzhiyun #endif /* arch_has_block_step */
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun #ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
340*4882a593Smuzhiyun extern void user_single_step_report(struct pt_regs *regs);
341*4882a593Smuzhiyun #else
user_single_step_report(struct pt_regs * regs)342*4882a593Smuzhiyun static inline void user_single_step_report(struct pt_regs *regs)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun kernel_siginfo_t info;
345*4882a593Smuzhiyun clear_siginfo(&info);
346*4882a593Smuzhiyun info.si_signo = SIGTRAP;
347*4882a593Smuzhiyun info.si_errno = 0;
348*4882a593Smuzhiyun info.si_code = SI_USER;
349*4882a593Smuzhiyun info.si_pid = 0;
350*4882a593Smuzhiyun info.si_uid = 0;
351*4882a593Smuzhiyun force_sig_info(&info);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun #endif
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun #ifndef arch_ptrace_stop_needed
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
358*4882a593Smuzhiyun * @code: current->exit_code value ptrace will stop with
359*4882a593Smuzhiyun * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * This is called with the siglock held, to decide whether or not it's
362*4882a593Smuzhiyun * necessary to release the siglock and call arch_ptrace_stop() with the
363*4882a593Smuzhiyun * same @code and @info arguments. It can be defined to a constant if
364*4882a593Smuzhiyun * arch_ptrace_stop() is never required, or always is. On machines where
365*4882a593Smuzhiyun * this makes sense, it should be defined to a quick test to optimize out
366*4882a593Smuzhiyun * calling arch_ptrace_stop() when it would be superfluous. For example,
367*4882a593Smuzhiyun * if the thread has not been back to user mode since the last stop, the
368*4882a593Smuzhiyun * thread state might indicate that nothing needs to be done.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * This is guaranteed to be invoked once before a task stops for ptrace and
371*4882a593Smuzhiyun * may include arch-specific operations necessary prior to a ptrace stop.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun #define arch_ptrace_stop_needed(code, info) (0)
374*4882a593Smuzhiyun #endif
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun #ifndef arch_ptrace_stop
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
379*4882a593Smuzhiyun * @code: current->exit_code value ptrace will stop with
380*4882a593Smuzhiyun * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
381*4882a593Smuzhiyun *
382*4882a593Smuzhiyun * This is called with no locks held when arch_ptrace_stop_needed() has
383*4882a593Smuzhiyun * just returned nonzero. It is allowed to block, e.g. for user memory
384*4882a593Smuzhiyun * access. The arch can have machine-specific work to be done before
385*4882a593Smuzhiyun * ptrace stops. On ia64, register backing store gets written back to user
386*4882a593Smuzhiyun * memory here. Since this can be costly (requires dropping the siglock),
387*4882a593Smuzhiyun * we only do it when the arch requires it for this particular stop, as
388*4882a593Smuzhiyun * indicated by arch_ptrace_stop_needed().
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun #define arch_ptrace_stop(code, info) do { } while (0)
391*4882a593Smuzhiyun #endif
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun #ifndef current_pt_regs
394*4882a593Smuzhiyun #define current_pt_regs() task_pt_regs(current)
395*4882a593Smuzhiyun #endif
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
399*4882a593Smuzhiyun * on *all* architectures; the only reason to have a per-arch definition
400*4882a593Smuzhiyun * is optimisation.
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun #ifndef signal_pt_regs
403*4882a593Smuzhiyun #define signal_pt_regs() task_pt_regs(current)
404*4882a593Smuzhiyun #endif
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun #ifndef current_user_stack_pointer
407*4882a593Smuzhiyun #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
408*4882a593Smuzhiyun #endif
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
413*4882a593Smuzhiyun #endif
414