1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2001 PPC64 Team, IBM Corp
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This struct defines the way the registers are stored on the
6*4882a593Smuzhiyun * kernel stack during a system call or other kernel entry.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * this should only contain volatile regs
9*4882a593Smuzhiyun * since we can keep non-volatile in the thread_struct
10*4882a593Smuzhiyun * should set this up when only volatiles are saved
11*4882a593Smuzhiyun * by intr code.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
14*4882a593Smuzhiyun * that the overall structure is a multiple of 16 bytes in length.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Note that the offsets of the fields in this struct correspond with
17*4882a593Smuzhiyun * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun #ifndef _ASM_POWERPC_PTRACE_H
20*4882a593Smuzhiyun #define _ASM_POWERPC_PTRACE_H
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/err.h>
23*4882a593Smuzhiyun #include <uapi/asm/ptrace.h>
24*4882a593Smuzhiyun #include <asm/asm-const.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #ifndef __ASSEMBLY__
27*4882a593Smuzhiyun struct pt_regs
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun union {
30*4882a593Smuzhiyun struct user_pt_regs user_regs;
31*4882a593Smuzhiyun struct {
32*4882a593Smuzhiyun unsigned long gpr[32];
33*4882a593Smuzhiyun unsigned long nip;
34*4882a593Smuzhiyun unsigned long msr;
35*4882a593Smuzhiyun unsigned long orig_gpr3;
36*4882a593Smuzhiyun unsigned long ctr;
37*4882a593Smuzhiyun unsigned long link;
38*4882a593Smuzhiyun unsigned long xer;
39*4882a593Smuzhiyun unsigned long ccr;
40*4882a593Smuzhiyun #ifdef CONFIG_PPC64
41*4882a593Smuzhiyun unsigned long softe;
42*4882a593Smuzhiyun #else
43*4882a593Smuzhiyun unsigned long mq;
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun unsigned long trap;
46*4882a593Smuzhiyun unsigned long dar;
47*4882a593Smuzhiyun unsigned long dsisr;
48*4882a593Smuzhiyun unsigned long result;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun union {
53*4882a593Smuzhiyun struct {
54*4882a593Smuzhiyun #ifdef CONFIG_PPC64
55*4882a593Smuzhiyun unsigned long ppr;
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
58*4882a593Smuzhiyun unsigned long kuap;
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun #endif
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #ifdef __powerpc64__
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * Size of redzone that userspace is allowed to use below the stack
73*4882a593Smuzhiyun * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
74*4882a593Smuzhiyun * the new ELFv2 little-endian ABI, so we allow the larger amount.
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * For kernel code we allow a 288-byte redzone, in order to conserve
77*4882a593Smuzhiyun * kernel stack space; gcc currently only uses 288 bytes, and will
78*4882a593Smuzhiyun * hopefully allow explicit control of the redzone size in future.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun #define USER_REDZONE_SIZE 512
81*4882a593Smuzhiyun #define KERNEL_REDZONE_SIZE 288
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
84*4882a593Smuzhiyun #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
85*4882a593Smuzhiyun #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
86*4882a593Smuzhiyun #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
87*4882a593Smuzhiyun STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
88*4882a593Smuzhiyun #define STACK_FRAME_MARKER 12
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #ifdef PPC64_ELF_ABI_v2
91*4882a593Smuzhiyun #define STACK_FRAME_MIN_SIZE 32
92*4882a593Smuzhiyun #else
93*4882a593Smuzhiyun #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Size of dummy stack frame allocated when calling signal handler. */
97*4882a593Smuzhiyun #define __SIGNAL_FRAMESIZE 128
98*4882a593Smuzhiyun #define __SIGNAL_FRAMESIZE32 64
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #else /* __powerpc64__ */
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define USER_REDZONE_SIZE 0
103*4882a593Smuzhiyun #define KERNEL_REDZONE_SIZE 0
104*4882a593Smuzhiyun #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
105*4882a593Smuzhiyun #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
106*4882a593Smuzhiyun #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
107*4882a593Smuzhiyun #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
108*4882a593Smuzhiyun #define STACK_FRAME_MARKER 2
109*4882a593Smuzhiyun #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* Size of stack frame allocated when calling signal handler. */
112*4882a593Smuzhiyun #define __SIGNAL_FRAMESIZE 64
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #endif /* __powerpc64__ */
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #ifndef __ASSEMBLY__
117*4882a593Smuzhiyun
instruction_pointer(struct pt_regs * regs)118*4882a593Smuzhiyun static inline unsigned long instruction_pointer(struct pt_regs *regs)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return regs->nip;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
instruction_pointer_set(struct pt_regs * regs,unsigned long val)123*4882a593Smuzhiyun static inline void instruction_pointer_set(struct pt_regs *regs,
124*4882a593Smuzhiyun unsigned long val)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun regs->nip = val;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
user_stack_pointer(struct pt_regs * regs)129*4882a593Smuzhiyun static inline unsigned long user_stack_pointer(struct pt_regs *regs)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun return regs->gpr[1];
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
frame_pointer(struct pt_regs * regs)134*4882a593Smuzhiyun static inline unsigned long frame_pointer(struct pt_regs *regs)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #ifdef CONFIG_SMP
140*4882a593Smuzhiyun extern unsigned long profile_pc(struct pt_regs *regs);
141*4882a593Smuzhiyun #else
142*4882a593Smuzhiyun #define profile_pc(regs) instruction_pointer(regs)
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun long do_syscall_trace_enter(struct pt_regs *regs);
146*4882a593Smuzhiyun void do_syscall_trace_leave(struct pt_regs *regs);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #ifdef __powerpc64__
149*4882a593Smuzhiyun #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
150*4882a593Smuzhiyun #else
151*4882a593Smuzhiyun #define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #define force_successful_syscall_return() \
155*4882a593Smuzhiyun do { \
156*4882a593Smuzhiyun set_thread_flag(TIF_NOERROR); \
157*4882a593Smuzhiyun } while(0)
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun struct task_struct;
160*4882a593Smuzhiyun extern int ptrace_get_reg(struct task_struct *task, int regno,
161*4882a593Smuzhiyun unsigned long *data);
162*4882a593Smuzhiyun extern int ptrace_put_reg(struct task_struct *task, int regno,
163*4882a593Smuzhiyun unsigned long data);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #define current_pt_regs() \
166*4882a593Smuzhiyun ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #ifdef __powerpc64__
169*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
170*4882a593Smuzhiyun #define TRAP_FLAGS_MASK 0x10
171*4882a593Smuzhiyun #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
172*4882a593Smuzhiyun #define FULL_REGS(regs) true
173*4882a593Smuzhiyun #define SET_FULL_REGS(regs) do { } while (0)
174*4882a593Smuzhiyun #else
175*4882a593Smuzhiyun #define TRAP_FLAGS_MASK 0x11
176*4882a593Smuzhiyun #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
177*4882a593Smuzhiyun #define FULL_REGS(regs) (((regs)->trap & 1) == 0)
178*4882a593Smuzhiyun #define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun #define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
181*4882a593Smuzhiyun #define NV_REG_POISON 0xdeadbeefdeadbeefUL
182*4882a593Smuzhiyun #else
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * We use the least-significant bit of the trap field to indicate
185*4882a593Smuzhiyun * whether we have saved the full set of registers, or only a
186*4882a593Smuzhiyun * partial set. A 1 there means the partial set.
187*4882a593Smuzhiyun * On 4xx we use the next bit to indicate whether the exception
188*4882a593Smuzhiyun * is a critical exception (1 means it is).
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun #define TRAP_FLAGS_MASK 0x1F
191*4882a593Smuzhiyun #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
192*4882a593Smuzhiyun #define FULL_REGS(regs) (((regs)->trap & 1) == 0)
193*4882a593Smuzhiyun #define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
194*4882a593Smuzhiyun #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
195*4882a593Smuzhiyun #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
196*4882a593Smuzhiyun #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
197*4882a593Smuzhiyun #define NV_REG_POISON 0xdeadbeef
198*4882a593Smuzhiyun #define CHECK_FULL_REGS(regs) \
199*4882a593Smuzhiyun do { \
200*4882a593Smuzhiyun if ((regs)->trap & 1) \
201*4882a593Smuzhiyun printk(KERN_CRIT "%s: partial register set\n", __func__); \
202*4882a593Smuzhiyun } while (0)
203*4882a593Smuzhiyun #endif /* __powerpc64__ */
204*4882a593Smuzhiyun
set_trap(struct pt_regs * regs,unsigned long val)205*4882a593Smuzhiyun static inline void set_trap(struct pt_regs *regs, unsigned long val)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
trap_is_scv(struct pt_regs * regs)210*4882a593Smuzhiyun static inline bool trap_is_scv(struct pt_regs *regs)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
trap_is_syscall(struct pt_regs * regs)215*4882a593Smuzhiyun static inline bool trap_is_syscall(struct pt_regs *regs)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
trap_norestart(struct pt_regs * regs)220*4882a593Smuzhiyun static inline bool trap_norestart(struct pt_regs *regs)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return regs->trap & 0x10;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
set_trap_norestart(struct pt_regs * regs)225*4882a593Smuzhiyun static inline void set_trap_norestart(struct pt_regs *regs)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun regs->trap |= 0x10;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun #define kernel_stack_pointer(regs) ((regs)->gpr[1])
is_syscall_success(struct pt_regs * regs)231*4882a593Smuzhiyun static inline int is_syscall_success(struct pt_regs *regs)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun if (trap_is_scv(regs))
234*4882a593Smuzhiyun return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
235*4882a593Smuzhiyun else
236*4882a593Smuzhiyun return !(regs->ccr & 0x10000000);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
regs_return_value(struct pt_regs * regs)239*4882a593Smuzhiyun static inline long regs_return_value(struct pt_regs *regs)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun if (trap_is_scv(regs))
242*4882a593Smuzhiyun return regs->gpr[3];
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (is_syscall_success(regs))
245*4882a593Smuzhiyun return regs->gpr[3];
246*4882a593Smuzhiyun else
247*4882a593Smuzhiyun return -regs->gpr[3];
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
regs_set_return_value(struct pt_regs * regs,unsigned long rc)250*4882a593Smuzhiyun static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun regs->gpr[3] = rc;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun #define arch_has_single_step() (1)
256*4882a593Smuzhiyun #define arch_has_block_step() (true)
257*4882a593Smuzhiyun #define ARCH_HAS_USER_SINGLE_STEP_REPORT
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * kprobe-based event tracer support
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun #include <linux/stddef.h>
264*4882a593Smuzhiyun #include <linux/thread_info.h>
265*4882a593Smuzhiyun extern int regs_query_register_offset(const char *name);
266*4882a593Smuzhiyun extern const char *regs_query_register_name(unsigned int offset);
267*4882a593Smuzhiyun #define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun * regs_get_register() - get register value from its offset
271*4882a593Smuzhiyun * @regs: pt_regs from which register value is gotten
272*4882a593Smuzhiyun * @offset: offset number of the register.
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * regs_get_register returns the value of a register whose offset from @regs.
275*4882a593Smuzhiyun * The @offset is the offset of the register in struct pt_regs.
276*4882a593Smuzhiyun * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
277*4882a593Smuzhiyun */
regs_get_register(struct pt_regs * regs,unsigned int offset)278*4882a593Smuzhiyun static inline unsigned long regs_get_register(struct pt_regs *regs,
279*4882a593Smuzhiyun unsigned int offset)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun if (unlikely(offset > MAX_REG_OFFSET))
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun return *(unsigned long *)((unsigned long)regs + offset);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * regs_within_kernel_stack() - check the address in the stack
288*4882a593Smuzhiyun * @regs: pt_regs which contains kernel stack pointer.
289*4882a593Smuzhiyun * @addr: address which is checked.
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
292*4882a593Smuzhiyun * If @addr is within the kernel stack, it returns true. If not, returns false.
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)295*4882a593Smuzhiyun static inline bool regs_within_kernel_stack(struct pt_regs *regs,
296*4882a593Smuzhiyun unsigned long addr)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun return ((addr & ~(THREAD_SIZE - 1)) ==
299*4882a593Smuzhiyun (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun * regs_get_kernel_stack_nth() - get Nth entry of the stack
304*4882a593Smuzhiyun * @regs: pt_regs which contains kernel stack pointer.
305*4882a593Smuzhiyun * @n: stack entry number.
306*4882a593Smuzhiyun *
307*4882a593Smuzhiyun * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
308*4882a593Smuzhiyun * is specified by @regs. If the @n th entry is NOT in the kernel stack,
309*4882a593Smuzhiyun * this returns 0.
310*4882a593Smuzhiyun */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)311*4882a593Smuzhiyun static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
312*4882a593Smuzhiyun unsigned int n)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
315*4882a593Smuzhiyun addr += n;
316*4882a593Smuzhiyun if (regs_within_kernel_stack(regs, (unsigned long)addr))
317*4882a593Smuzhiyun return *addr;
318*4882a593Smuzhiyun else
319*4882a593Smuzhiyun return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun #ifndef __powerpc64__
325*4882a593Smuzhiyun /* We need PT_SOFTE defined at all time to avoid #ifdefs */
326*4882a593Smuzhiyun #define PT_SOFTE PT_MQ
327*4882a593Smuzhiyun #else /* __powerpc64__ */
328*4882a593Smuzhiyun #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
329*4882a593Smuzhiyun #define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
330*4882a593Smuzhiyun #define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
331*4882a593Smuzhiyun #define PT_VRSAVE_32 (PT_VR0 + 33*4)
332*4882a593Smuzhiyun #define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
333*4882a593Smuzhiyun #endif /* __powerpc64__ */
334*4882a593Smuzhiyun #endif /* _ASM_POWERPC_PTRACE_H */
335