1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_PTRACE_H
3*4882a593Smuzhiyun #define _ASM_X86_PTRACE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm/segment.h>
6*4882a593Smuzhiyun #include <asm/page_types.h>
7*4882a593Smuzhiyun #include <uapi/asm/ptrace.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef __ASSEMBLY__
10*4882a593Smuzhiyun #ifdef __i386__
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun struct pt_regs {
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * NB: 32-bit x86 CPUs are inconsistent as what happens in the
15*4882a593Smuzhiyun * following cases (where %seg represents a segment register):
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * - pushl %seg: some do a 16-bit write and leave the high
18*4882a593Smuzhiyun * bits alone
19*4882a593Smuzhiyun * - movl %seg, [mem]: some do a 16-bit write despite the movl
20*4882a593Smuzhiyun * - IDT entry: some (e.g. 486) will leave the high bits of CS
21*4882a593Smuzhiyun * and (if applicable) SS undefined.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Fortunately, x86-32 doesn't read the high bits on POP or IRET,
24*4882a593Smuzhiyun * so we can just treat all of the segment registers as 16-bit
25*4882a593Smuzhiyun * values.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun unsigned long bx;
28*4882a593Smuzhiyun unsigned long cx;
29*4882a593Smuzhiyun unsigned long dx;
30*4882a593Smuzhiyun unsigned long si;
31*4882a593Smuzhiyun unsigned long di;
32*4882a593Smuzhiyun unsigned long bp;
33*4882a593Smuzhiyun unsigned long ax;
34*4882a593Smuzhiyun unsigned short ds;
35*4882a593Smuzhiyun unsigned short __dsh;
36*4882a593Smuzhiyun unsigned short es;
37*4882a593Smuzhiyun unsigned short __esh;
38*4882a593Smuzhiyun unsigned short fs;
39*4882a593Smuzhiyun unsigned short __fsh;
40*4882a593Smuzhiyun /* On interrupt, gs and __gsh store the vector number. */
41*4882a593Smuzhiyun unsigned short gs;
42*4882a593Smuzhiyun unsigned short __gsh;
43*4882a593Smuzhiyun /* On interrupt, this is the error code. */
44*4882a593Smuzhiyun unsigned long orig_ax;
45*4882a593Smuzhiyun unsigned long ip;
46*4882a593Smuzhiyun unsigned short cs;
47*4882a593Smuzhiyun unsigned short __csh;
48*4882a593Smuzhiyun unsigned long flags;
49*4882a593Smuzhiyun unsigned long sp;
50*4882a593Smuzhiyun unsigned short ss;
51*4882a593Smuzhiyun unsigned short __ssh;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #else /* __i386__ */
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct pt_regs {
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
59*4882a593Smuzhiyun * unless syscall needs a complete, fully filled "struct pt_regs".
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun unsigned long r15;
62*4882a593Smuzhiyun unsigned long r14;
63*4882a593Smuzhiyun unsigned long r13;
64*4882a593Smuzhiyun unsigned long r12;
65*4882a593Smuzhiyun unsigned long bp;
66*4882a593Smuzhiyun unsigned long bx;
67*4882a593Smuzhiyun /* These regs are callee-clobbered. Always saved on kernel entry. */
68*4882a593Smuzhiyun unsigned long r11;
69*4882a593Smuzhiyun unsigned long r10;
70*4882a593Smuzhiyun unsigned long r9;
71*4882a593Smuzhiyun unsigned long r8;
72*4882a593Smuzhiyun unsigned long ax;
73*4882a593Smuzhiyun unsigned long cx;
74*4882a593Smuzhiyun unsigned long dx;
75*4882a593Smuzhiyun unsigned long si;
76*4882a593Smuzhiyun unsigned long di;
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * On syscall entry, this is syscall#. On CPU exception, this is error code.
79*4882a593Smuzhiyun * On hw interrupt, it's IRQ number:
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun unsigned long orig_ax;
82*4882a593Smuzhiyun /* Return frame for iretq */
83*4882a593Smuzhiyun unsigned long ip;
84*4882a593Smuzhiyun unsigned long cs;
85*4882a593Smuzhiyun unsigned long flags;
86*4882a593Smuzhiyun unsigned long sp;
87*4882a593Smuzhiyun unsigned long ss;
88*4882a593Smuzhiyun /* top of stack page */
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #endif /* !__i386__ */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT
94*4882a593Smuzhiyun #include <asm/paravirt_types.h>
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #include <asm/proto.h>
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct cpuinfo_x86;
100*4882a593Smuzhiyun struct task_struct;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun extern unsigned long profile_pc(struct pt_regs *regs);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun extern unsigned long
105*4882a593Smuzhiyun convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
106*4882a593Smuzhiyun extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun
regs_return_value(struct pt_regs * regs)109*4882a593Smuzhiyun static inline unsigned long regs_return_value(struct pt_regs *regs)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun return regs->ax;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
regs_set_return_value(struct pt_regs * regs,unsigned long rc)114*4882a593Smuzhiyun static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun regs->ax = rc;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * user_mode(regs) determines whether a register set came from user
121*4882a593Smuzhiyun * mode. On x86_32, this is true if V8086 mode was enabled OR if the
122*4882a593Smuzhiyun * register set was from protected mode with RPL-3 CS value. This
123*4882a593Smuzhiyun * tricky test checks that with one comparison.
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * On x86_64, vm86 mode is mercifully nonexistent, and we don't need
126*4882a593Smuzhiyun * the extra check.
127*4882a593Smuzhiyun */
user_mode(struct pt_regs * regs)128*4882a593Smuzhiyun static __always_inline int user_mode(struct pt_regs *regs)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun #ifdef CONFIG_X86_32
131*4882a593Smuzhiyun return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
132*4882a593Smuzhiyun #else
133*4882a593Smuzhiyun return !!(regs->cs & 3);
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
v8086_mode(struct pt_regs * regs)137*4882a593Smuzhiyun static inline int v8086_mode(struct pt_regs *regs)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun #ifdef CONFIG_X86_32
140*4882a593Smuzhiyun return (regs->flags & X86_VM_MASK);
141*4882a593Smuzhiyun #else
142*4882a593Smuzhiyun return 0; /* No V86 mode support in long mode */
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
user_64bit_mode(struct pt_regs * regs)146*4882a593Smuzhiyun static inline bool user_64bit_mode(struct pt_regs *regs)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun #ifdef CONFIG_X86_64
149*4882a593Smuzhiyun #ifndef CONFIG_PARAVIRT_XXL
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * On non-paravirt systems, this is the only long mode CPL 3
152*4882a593Smuzhiyun * selector. We do not allow long mode selectors in the LDT.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun return regs->cs == __USER_CS;
155*4882a593Smuzhiyun #else
156*4882a593Smuzhiyun /* Headers are too twisted for this to go in paravirt.h. */
157*4882a593Smuzhiyun return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun #else /* !CONFIG_X86_64 */
160*4882a593Smuzhiyun return false;
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Determine whether the register set came from any context that is running in
166*4882a593Smuzhiyun * 64-bit mode.
167*4882a593Smuzhiyun */
any_64bit_mode(struct pt_regs * regs)168*4882a593Smuzhiyun static inline bool any_64bit_mode(struct pt_regs *regs)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun #ifdef CONFIG_X86_64
171*4882a593Smuzhiyun return !user_mode(regs) || user_64bit_mode(regs);
172*4882a593Smuzhiyun #else
173*4882a593Smuzhiyun return false;
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #ifdef CONFIG_X86_64
178*4882a593Smuzhiyun #define current_user_stack_pointer() current_pt_regs()->sp
179*4882a593Smuzhiyun #define compat_user_stack_pointer() current_pt_regs()->sp
180*4882a593Smuzhiyun
ip_within_syscall_gap(struct pt_regs * regs)181*4882a593Smuzhiyun static inline bool ip_within_syscall_gap(struct pt_regs *regs)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
184*4882a593Smuzhiyun regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #ifdef CONFIG_IA32_EMULATION
187*4882a593Smuzhiyun ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
188*4882a593Smuzhiyun regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return ret;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun
kernel_stack_pointer(struct pt_regs * regs)195*4882a593Smuzhiyun static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun return regs->sp;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
instruction_pointer(struct pt_regs * regs)200*4882a593Smuzhiyun static inline unsigned long instruction_pointer(struct pt_regs *regs)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun return regs->ip;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
instruction_pointer_set(struct pt_regs * regs,unsigned long val)205*4882a593Smuzhiyun static inline void instruction_pointer_set(struct pt_regs *regs,
206*4882a593Smuzhiyun unsigned long val)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun regs->ip = val;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
frame_pointer(struct pt_regs * regs)211*4882a593Smuzhiyun static inline unsigned long frame_pointer(struct pt_regs *regs)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun return regs->bp;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
user_stack_pointer(struct pt_regs * regs)216*4882a593Smuzhiyun static inline unsigned long user_stack_pointer(struct pt_regs *regs)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun return regs->sp;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
user_stack_pointer_set(struct pt_regs * regs,unsigned long val)221*4882a593Smuzhiyun static inline void user_stack_pointer_set(struct pt_regs *regs,
222*4882a593Smuzhiyun unsigned long val)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun regs->sp = val;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
regs_irqs_disabled(struct pt_regs * regs)227*4882a593Smuzhiyun static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun return !(regs->flags & X86_EFLAGS_IF);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Query offset/name of register from its name/offset */
233*4882a593Smuzhiyun extern int regs_query_register_offset(const char *name);
234*4882a593Smuzhiyun extern const char *regs_query_register_name(unsigned int offset);
235*4882a593Smuzhiyun #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * regs_get_register() - get register value from its offset
239*4882a593Smuzhiyun * @regs: pt_regs from which register value is gotten.
240*4882a593Smuzhiyun * @offset: offset number of the register.
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * regs_get_register returns the value of a register. The @offset is the
243*4882a593Smuzhiyun * offset of the register in struct pt_regs address which specified by @regs.
244*4882a593Smuzhiyun * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
245*4882a593Smuzhiyun */
regs_get_register(struct pt_regs * regs,unsigned int offset)246*4882a593Smuzhiyun static inline unsigned long regs_get_register(struct pt_regs *regs,
247*4882a593Smuzhiyun unsigned int offset)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun if (unlikely(offset > MAX_REG_OFFSET))
250*4882a593Smuzhiyun return 0;
251*4882a593Smuzhiyun #ifdef CONFIG_X86_32
252*4882a593Smuzhiyun /* The selector fields are 16-bit. */
253*4882a593Smuzhiyun if (offset == offsetof(struct pt_regs, cs) ||
254*4882a593Smuzhiyun offset == offsetof(struct pt_regs, ss) ||
255*4882a593Smuzhiyun offset == offsetof(struct pt_regs, ds) ||
256*4882a593Smuzhiyun offset == offsetof(struct pt_regs, es) ||
257*4882a593Smuzhiyun offset == offsetof(struct pt_regs, fs) ||
258*4882a593Smuzhiyun offset == offsetof(struct pt_regs, gs)) {
259*4882a593Smuzhiyun return *(u16 *)((unsigned long)regs + offset);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun #endif
263*4882a593Smuzhiyun return *(unsigned long *)((unsigned long)regs + offset);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun * regs_within_kernel_stack() - check the address in the stack
268*4882a593Smuzhiyun * @regs: pt_regs which contains kernel stack pointer.
269*4882a593Smuzhiyun * @addr: address which is checked.
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
272*4882a593Smuzhiyun * If @addr is within the kernel stack, it returns true. If not, returns false.
273*4882a593Smuzhiyun */
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)274*4882a593Smuzhiyun static inline int regs_within_kernel_stack(struct pt_regs *regs,
275*4882a593Smuzhiyun unsigned long addr)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun return ((addr & ~(THREAD_SIZE - 1)) == (regs->sp & ~(THREAD_SIZE - 1)));
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
282*4882a593Smuzhiyun * @regs: pt_regs which contains kernel stack pointer.
283*4882a593Smuzhiyun * @n: stack entry number.
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
286*4882a593Smuzhiyun * kernel stack which is specified by @regs. If the @n th entry is NOT in
287*4882a593Smuzhiyun * the kernel stack, this returns NULL.
288*4882a593Smuzhiyun */
regs_get_kernel_stack_nth_addr(struct pt_regs * regs,unsigned int n)289*4882a593Smuzhiyun static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun unsigned long *addr = (unsigned long *)regs->sp;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun addr += n;
294*4882a593Smuzhiyun if (regs_within_kernel_stack(regs, (unsigned long)addr))
295*4882a593Smuzhiyun return addr;
296*4882a593Smuzhiyun else
297*4882a593Smuzhiyun return NULL;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* To avoid include hell, we can't include uaccess.h */
301*4882a593Smuzhiyun extern long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun * regs_get_kernel_stack_nth() - get Nth entry of the stack
305*4882a593Smuzhiyun * @regs: pt_regs which contains kernel stack pointer.
306*4882a593Smuzhiyun * @n: stack entry number.
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
309*4882a593Smuzhiyun * is specified by @regs. If the @n th entry is NOT in the kernel stack
310*4882a593Smuzhiyun * this returns 0.
311*4882a593Smuzhiyun */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)312*4882a593Smuzhiyun static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
313*4882a593Smuzhiyun unsigned int n)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun unsigned long *addr;
316*4882a593Smuzhiyun unsigned long val;
317*4882a593Smuzhiyun long ret;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun addr = regs_get_kernel_stack_nth_addr(regs, n);
320*4882a593Smuzhiyun if (addr) {
321*4882a593Smuzhiyun ret = copy_from_kernel_nofault(&val, addr, sizeof(val));
322*4882a593Smuzhiyun if (!ret)
323*4882a593Smuzhiyun return val;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun * regs_get_kernel_argument() - get Nth function argument in kernel
330*4882a593Smuzhiyun * @regs: pt_regs of that context
331*4882a593Smuzhiyun * @n: function argument number (start from 0)
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * regs_get_argument() returns @n th argument of the function call.
334*4882a593Smuzhiyun * Note that this chooses most probably assignment, in some case
335*4882a593Smuzhiyun * it can be incorrect.
336*4882a593Smuzhiyun * This is expected to be called from kprobes or ftrace with regs
337*4882a593Smuzhiyun * where the top of stack is the return address.
338*4882a593Smuzhiyun */
regs_get_kernel_argument(struct pt_regs * regs,unsigned int n)339*4882a593Smuzhiyun static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
340*4882a593Smuzhiyun unsigned int n)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun static const unsigned int argument_offs[] = {
343*4882a593Smuzhiyun #ifdef __i386__
344*4882a593Smuzhiyun offsetof(struct pt_regs, ax),
345*4882a593Smuzhiyun offsetof(struct pt_regs, dx),
346*4882a593Smuzhiyun offsetof(struct pt_regs, cx),
347*4882a593Smuzhiyun #define NR_REG_ARGUMENTS 3
348*4882a593Smuzhiyun #else
349*4882a593Smuzhiyun offsetof(struct pt_regs, di),
350*4882a593Smuzhiyun offsetof(struct pt_regs, si),
351*4882a593Smuzhiyun offsetof(struct pt_regs, dx),
352*4882a593Smuzhiyun offsetof(struct pt_regs, cx),
353*4882a593Smuzhiyun offsetof(struct pt_regs, r8),
354*4882a593Smuzhiyun offsetof(struct pt_regs, r9),
355*4882a593Smuzhiyun #define NR_REG_ARGUMENTS 6
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun };
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (n >= NR_REG_ARGUMENTS) {
360*4882a593Smuzhiyun n -= NR_REG_ARGUMENTS - 1;
361*4882a593Smuzhiyun return regs_get_kernel_stack_nth(regs, n);
362*4882a593Smuzhiyun } else
363*4882a593Smuzhiyun return regs_get_register(regs, argument_offs[n]);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun #define arch_has_single_step() (1)
367*4882a593Smuzhiyun #ifdef CONFIG_X86_DEBUGCTLMSR
368*4882a593Smuzhiyun #define arch_has_block_step() (1)
369*4882a593Smuzhiyun #else
370*4882a593Smuzhiyun #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
371*4882a593Smuzhiyun #endif
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #define ARCH_HAS_USER_SINGLE_STEP_REPORT
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun struct user_desc;
376*4882a593Smuzhiyun extern int do_get_thread_area(struct task_struct *p, int idx,
377*4882a593Smuzhiyun struct user_desc __user *info);
378*4882a593Smuzhiyun extern int do_set_thread_area(struct task_struct *p, int idx,
379*4882a593Smuzhiyun struct user_desc __user *info, int can_allocate);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun #ifdef CONFIG_X86_64
382*4882a593Smuzhiyun # define do_set_thread_area_64(p, s, t) do_arch_prctl_64(p, s, t)
383*4882a593Smuzhiyun #else
384*4882a593Smuzhiyun # define do_set_thread_area_64(p, s, t) (0)
385*4882a593Smuzhiyun #endif
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
388*4882a593Smuzhiyun #endif /* _ASM_X86_PTRACE_H */
389