1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Based on arch/arm/include/asm/ptrace.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1996-2003 Russell King
6*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef __ASM_PTRACE_H
9*4882a593Smuzhiyun #define __ASM_PTRACE_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/cpufeature.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <uapi/asm/ptrace.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* Current Exception Level values, as contained in CurrentEL */
16*4882a593Smuzhiyun #define CurrentEL_EL1 (1 << 2)
17*4882a593Smuzhiyun #define CurrentEL_EL2 (2 << 2)
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define INIT_PSTATE_EL1 \
20*4882a593Smuzhiyun (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h)
21*4882a593Smuzhiyun #define INIT_PSTATE_EL2 \
22*4882a593Smuzhiyun (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * PMR values used to mask/unmask interrupts.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * GIC priority masking works as follows: if an IRQ's priority is a higher value
28*4882a593Smuzhiyun * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
29*4882a593Smuzhiyun * means masking more IRQs (or at least that the same IRQs remain masked).
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * To mask interrupts, we clear the most significant bit of PMR.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Some code sections either automatically switch back to PSR.I or explicitly
34*4882a593Smuzhiyun * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
35*4882a593Smuzhiyun * in the priority mask, it indicates that PSR.I should be set and
36*4882a593Smuzhiyun * interrupt disabling temporarily does not rely on IRQ priorities.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun #define GIC_PRIO_IRQON 0xe0
39*4882a593Smuzhiyun #define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
40*4882a593Smuzhiyun #define __GIC_PRIO_IRQOFF_NS 0xa0
41*4882a593Smuzhiyun #define GIC_PRIO_PSR_I_SET (1 << 4)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define GIC_PRIO_IRQOFF \
44*4882a593Smuzhiyun ({ \
45*4882a593Smuzhiyun extern struct static_key_false gic_nonsecure_priorities;\
46*4882a593Smuzhiyun u8 __prio = __GIC_PRIO_IRQOFF; \
47*4882a593Smuzhiyun \
48*4882a593Smuzhiyun if (static_branch_unlikely(&gic_nonsecure_priorities)) \
49*4882a593Smuzhiyun __prio = __GIC_PRIO_IRQOFF_NS; \
50*4882a593Smuzhiyun \
51*4882a593Smuzhiyun __prio; \
52*4882a593Smuzhiyun })
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Additional SPSR bits not exposed in the UABI */
55*4882a593Smuzhiyun #define PSR_MODE_THREAD_BIT (1 << 0)
56*4882a593Smuzhiyun #define PSR_IL_BIT (1 << 20)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* AArch32-specific ptrace requests */
59*4882a593Smuzhiyun #define COMPAT_PTRACE_GETREGS 12
60*4882a593Smuzhiyun #define COMPAT_PTRACE_SETREGS 13
61*4882a593Smuzhiyun #define COMPAT_PTRACE_GET_THREAD_AREA 22
62*4882a593Smuzhiyun #define COMPAT_PTRACE_SET_SYSCALL 23
63*4882a593Smuzhiyun #define COMPAT_PTRACE_GETVFPREGS 27
64*4882a593Smuzhiyun #define COMPAT_PTRACE_SETVFPREGS 28
65*4882a593Smuzhiyun #define COMPAT_PTRACE_GETHBPREGS 29
66*4882a593Smuzhiyun #define COMPAT_PTRACE_SETHBPREGS 30
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* SPSR_ELx bits for exceptions taken from AArch32 */
69*4882a593Smuzhiyun #define PSR_AA32_MODE_MASK 0x0000001f
70*4882a593Smuzhiyun #define PSR_AA32_MODE_USR 0x00000010
71*4882a593Smuzhiyun #define PSR_AA32_MODE_FIQ 0x00000011
72*4882a593Smuzhiyun #define PSR_AA32_MODE_IRQ 0x00000012
73*4882a593Smuzhiyun #define PSR_AA32_MODE_SVC 0x00000013
74*4882a593Smuzhiyun #define PSR_AA32_MODE_ABT 0x00000017
75*4882a593Smuzhiyun #define PSR_AA32_MODE_HYP 0x0000001a
76*4882a593Smuzhiyun #define PSR_AA32_MODE_UND 0x0000001b
77*4882a593Smuzhiyun #define PSR_AA32_MODE_SYS 0x0000001f
78*4882a593Smuzhiyun #define PSR_AA32_T_BIT 0x00000020
79*4882a593Smuzhiyun #define PSR_AA32_F_BIT 0x00000040
80*4882a593Smuzhiyun #define PSR_AA32_I_BIT 0x00000080
81*4882a593Smuzhiyun #define PSR_AA32_A_BIT 0x00000100
82*4882a593Smuzhiyun #define PSR_AA32_E_BIT 0x00000200
83*4882a593Smuzhiyun #define PSR_AA32_PAN_BIT 0x00400000
84*4882a593Smuzhiyun #define PSR_AA32_SSBS_BIT 0x00800000
85*4882a593Smuzhiyun #define PSR_AA32_DIT_BIT 0x01000000
86*4882a593Smuzhiyun #define PSR_AA32_Q_BIT 0x08000000
87*4882a593Smuzhiyun #define PSR_AA32_V_BIT 0x10000000
88*4882a593Smuzhiyun #define PSR_AA32_C_BIT 0x20000000
89*4882a593Smuzhiyun #define PSR_AA32_Z_BIT 0x40000000
90*4882a593Smuzhiyun #define PSR_AA32_N_BIT 0x80000000
91*4882a593Smuzhiyun #define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
92*4882a593Smuzhiyun #define PSR_AA32_GE_MASK 0x000f0000
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
95*4882a593Smuzhiyun #define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
96*4882a593Smuzhiyun #else
97*4882a593Smuzhiyun #define PSR_AA32_ENDSTATE 0
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* AArch32 CPSR bits, as seen in AArch32 */
101*4882a593Smuzhiyun #define COMPAT_PSR_DIT_BIT 0x00200000
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
105*4882a593Smuzhiyun * process is located in memory.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun #define COMPAT_PT_TEXT_ADDR 0x10000
108*4882a593Smuzhiyun #define COMPAT_PT_DATA_ADDR 0x10004
109*4882a593Smuzhiyun #define COMPAT_PT_TEXT_END_ADDR 0x10008
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
113*4882a593Smuzhiyun * a syscall -- i.e., its most recent entry into the kernel from
114*4882a593Smuzhiyun * userspace was not via SVC, or otherwise a tracer cancelled the syscall.
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * This must have the value -1, for ABI compatibility with ptrace etc.
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun #define NO_SYSCALL (-1)
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun #ifndef __ASSEMBLY__
121*4882a593Smuzhiyun #include <linux/bug.h>
122*4882a593Smuzhiyun #include <linux/types.h>
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* sizeof(struct user) for AArch32 */
125*4882a593Smuzhiyun #define COMPAT_USER_SZ 296
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Architecturally defined mapping between AArch32 and AArch64 registers */
128*4882a593Smuzhiyun #define compat_usr(x) regs[(x)]
129*4882a593Smuzhiyun #define compat_fp regs[11]
130*4882a593Smuzhiyun #define compat_sp regs[13]
131*4882a593Smuzhiyun #define compat_lr regs[14]
132*4882a593Smuzhiyun #define compat_sp_hyp regs[15]
133*4882a593Smuzhiyun #define compat_lr_irq regs[16]
134*4882a593Smuzhiyun #define compat_sp_irq regs[17]
135*4882a593Smuzhiyun #define compat_lr_svc regs[18]
136*4882a593Smuzhiyun #define compat_sp_svc regs[19]
137*4882a593Smuzhiyun #define compat_lr_abt regs[20]
138*4882a593Smuzhiyun #define compat_sp_abt regs[21]
139*4882a593Smuzhiyun #define compat_lr_und regs[22]
140*4882a593Smuzhiyun #define compat_sp_und regs[23]
141*4882a593Smuzhiyun #define compat_r8_fiq regs[24]
142*4882a593Smuzhiyun #define compat_r9_fiq regs[25]
143*4882a593Smuzhiyun #define compat_r10_fiq regs[26]
144*4882a593Smuzhiyun #define compat_r11_fiq regs[27]
145*4882a593Smuzhiyun #define compat_r12_fiq regs[28]
146*4882a593Smuzhiyun #define compat_sp_fiq regs[29]
147*4882a593Smuzhiyun #define compat_lr_fiq regs[30]
148*4882a593Smuzhiyun
compat_psr_to_pstate(const unsigned long psr)149*4882a593Smuzhiyun static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun unsigned long pstate;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun pstate = psr & ~COMPAT_PSR_DIT_BIT;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (psr & COMPAT_PSR_DIT_BIT)
156*4882a593Smuzhiyun pstate |= PSR_AA32_DIT_BIT;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return pstate;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
pstate_to_compat_psr(const unsigned long pstate)161*4882a593Smuzhiyun static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun unsigned long psr;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun psr = pstate & ~PSR_AA32_DIT_BIT;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (pstate & PSR_AA32_DIT_BIT)
168*4882a593Smuzhiyun psr |= COMPAT_PSR_DIT_BIT;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return psr;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * This struct defines the way the registers are stored on the stack during an
175*4882a593Smuzhiyun * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
176*4882a593Smuzhiyun * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun struct pt_regs {
179*4882a593Smuzhiyun union {
180*4882a593Smuzhiyun struct user_pt_regs user_regs;
181*4882a593Smuzhiyun struct {
182*4882a593Smuzhiyun u64 regs[31];
183*4882a593Smuzhiyun u64 sp;
184*4882a593Smuzhiyun u64 pc;
185*4882a593Smuzhiyun u64 pstate;
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun u64 orig_x0;
189*4882a593Smuzhiyun #ifdef __AARCH64EB__
190*4882a593Smuzhiyun u32 unused2;
191*4882a593Smuzhiyun s32 syscallno;
192*4882a593Smuzhiyun #else
193*4882a593Smuzhiyun s32 syscallno;
194*4882a593Smuzhiyun u32 unused2;
195*4882a593Smuzhiyun #endif
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun u64 orig_addr_limit;
198*4882a593Smuzhiyun /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
199*4882a593Smuzhiyun u64 pmr_save;
200*4882a593Smuzhiyun u64 stackframe[2];
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Only valid for some EL1 exceptions. */
203*4882a593Smuzhiyun u64 lockdep_hardirqs;
204*4882a593Smuzhiyun u64 exit_rcu;
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun
in_syscall(struct pt_regs const * regs)207*4882a593Smuzhiyun static inline bool in_syscall(struct pt_regs const *regs)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun return regs->syscallno != NO_SYSCALL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
forget_syscall(struct pt_regs * regs)212*4882a593Smuzhiyun static inline void forget_syscall(struct pt_regs *regs)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun regs->syscallno = NO_SYSCALL;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun #define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun #define arch_has_single_step() (1)
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
222*4882a593Smuzhiyun #define compat_thumb_mode(regs) \
223*4882a593Smuzhiyun (((regs)->pstate & PSR_AA32_T_BIT))
224*4882a593Smuzhiyun #else
225*4882a593Smuzhiyun #define compat_thumb_mode(regs) (0)
226*4882a593Smuzhiyun #endif
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun #define user_mode(regs) \
229*4882a593Smuzhiyun (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun #define compat_user_mode(regs) \
232*4882a593Smuzhiyun (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \
233*4882a593Smuzhiyun (PSR_MODE32_BIT | PSR_MODE_EL0t))
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #define processor_mode(regs) \
236*4882a593Smuzhiyun ((regs)->pstate & PSR_MODE_MASK)
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #define irqs_priority_unmasked(regs) \
239*4882a593Smuzhiyun (system_uses_irq_prio_masking() ? \
240*4882a593Smuzhiyun (regs)->pmr_save == GIC_PRIO_IRQON : \
241*4882a593Smuzhiyun true)
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun #define interrupts_enabled(regs) \
244*4882a593Smuzhiyun (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun #define fast_interrupts_enabled(regs) \
247*4882a593Smuzhiyun (!((regs)->pstate & PSR_F_BIT))
248*4882a593Smuzhiyun
user_stack_pointer(struct pt_regs * regs)249*4882a593Smuzhiyun static inline unsigned long user_stack_pointer(struct pt_regs *regs)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun if (compat_user_mode(regs))
252*4882a593Smuzhiyun return regs->compat_sp;
253*4882a593Smuzhiyun return regs->sp;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun extern int regs_query_register_offset(const char *name);
257*4882a593Smuzhiyun extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
258*4882a593Smuzhiyun unsigned int n);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun * regs_get_register() - get register value from its offset
262*4882a593Smuzhiyun * @regs: pt_regs from which register value is gotten
263*4882a593Smuzhiyun * @offset: offset of the register.
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * regs_get_register returns the value of a register whose offset from @regs.
266*4882a593Smuzhiyun * The @offset is the offset of the register in struct pt_regs.
267*4882a593Smuzhiyun * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
268*4882a593Smuzhiyun */
regs_get_register(struct pt_regs * regs,unsigned int offset)269*4882a593Smuzhiyun static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun u64 val = 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun WARN_ON(offset & 7);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun offset >>= 3;
276*4882a593Smuzhiyun switch (offset) {
277*4882a593Smuzhiyun case 0 ... 30:
278*4882a593Smuzhiyun val = regs->regs[offset];
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun case offsetof(struct pt_regs, sp) >> 3:
281*4882a593Smuzhiyun val = regs->sp;
282*4882a593Smuzhiyun break;
283*4882a593Smuzhiyun case offsetof(struct pt_regs, pc) >> 3:
284*4882a593Smuzhiyun val = regs->pc;
285*4882a593Smuzhiyun break;
286*4882a593Smuzhiyun case offsetof(struct pt_regs, pstate) >> 3:
287*4882a593Smuzhiyun val = regs->pstate;
288*4882a593Smuzhiyun break;
289*4882a593Smuzhiyun default:
290*4882a593Smuzhiyun val = 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return val;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Read a register given an architectural register index r.
298*4882a593Smuzhiyun * This handles the common case where 31 means XZR, not SP.
299*4882a593Smuzhiyun */
pt_regs_read_reg(const struct pt_regs * regs,int r)300*4882a593Smuzhiyun static inline unsigned long pt_regs_read_reg(const struct pt_regs *regs, int r)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun return (r == 31) ? 0 : regs->regs[r];
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * Write a register given an architectural register index r.
307*4882a593Smuzhiyun * This handles the common case where 31 means XZR, not SP.
308*4882a593Smuzhiyun */
pt_regs_write_reg(struct pt_regs * regs,int r,unsigned long val)309*4882a593Smuzhiyun static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
310*4882a593Smuzhiyun unsigned long val)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun if (r != 31)
313*4882a593Smuzhiyun regs->regs[r] = val;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Valid only for Kernel mode traps. */
kernel_stack_pointer(struct pt_regs * regs)317*4882a593Smuzhiyun static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun return regs->sp;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
regs_return_value(struct pt_regs * regs)322*4882a593Smuzhiyun static inline unsigned long regs_return_value(struct pt_regs *regs)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun unsigned long val = regs->regs[0];
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Audit currently uses regs_return_value() instead of
328*4882a593Smuzhiyun * syscall_get_return_value(). Apply the same sign-extension here until
329*4882a593Smuzhiyun * audit is updated to use syscall_get_return_value().
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun if (compat_user_mode(regs))
332*4882a593Smuzhiyun val = sign_extend64(val, 31);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun return val;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
regs_set_return_value(struct pt_regs * regs,unsigned long rc)337*4882a593Smuzhiyun static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun regs->regs[0] = rc;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /**
343*4882a593Smuzhiyun * regs_get_kernel_argument() - get Nth function argument in kernel
344*4882a593Smuzhiyun * @regs: pt_regs of that context
345*4882a593Smuzhiyun * @n: function argument number (start from 0)
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * regs_get_argument() returns @n th argument of the function call.
348*4882a593Smuzhiyun *
349*4882a593Smuzhiyun * Note that this chooses the most likely register mapping. In very rare
350*4882a593Smuzhiyun * cases this may not return correct data, for example, if one of the
351*4882a593Smuzhiyun * function parameters is 16 bytes or bigger. In such cases, we cannot
352*4882a593Smuzhiyun * get access the parameter correctly and the register assignment of
353*4882a593Smuzhiyun * subsequent parameters will be shifted.
354*4882a593Smuzhiyun */
regs_get_kernel_argument(struct pt_regs * regs,unsigned int n)355*4882a593Smuzhiyun static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
356*4882a593Smuzhiyun unsigned int n)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun #define NR_REG_ARGUMENTS 8
359*4882a593Smuzhiyun if (n < NR_REG_ARGUMENTS)
360*4882a593Smuzhiyun return pt_regs_read_reg(regs, n);
361*4882a593Smuzhiyun return 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* We must avoid circular header include via sched.h */
365*4882a593Smuzhiyun struct task_struct;
366*4882a593Smuzhiyun int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
367*4882a593Smuzhiyun
instruction_pointer(struct pt_regs * regs)368*4882a593Smuzhiyun static inline unsigned long instruction_pointer(struct pt_regs *regs)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun return regs->pc;
371*4882a593Smuzhiyun }
instruction_pointer_set(struct pt_regs * regs,unsigned long val)372*4882a593Smuzhiyun static inline void instruction_pointer_set(struct pt_regs *regs,
373*4882a593Smuzhiyun unsigned long val)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun regs->pc = val;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
frame_pointer(struct pt_regs * regs)378*4882a593Smuzhiyun static inline unsigned long frame_pointer(struct pt_regs *regs)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun return regs->regs[29];
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun #define procedure_link_pointer(regs) ((regs)->regs[30])
384*4882a593Smuzhiyun
procedure_link_pointer_set(struct pt_regs * regs,unsigned long val)385*4882a593Smuzhiyun static inline void procedure_link_pointer_set(struct pt_regs *regs,
386*4882a593Smuzhiyun unsigned long val)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun procedure_link_pointer(regs) = val;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun extern unsigned long profile_pc(struct pt_regs *regs);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
394*4882a593Smuzhiyun #endif
395