xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/elf.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_ELF_H
3*4882a593Smuzhiyun #define _ASM_X86_ELF_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * ELF register definitions..
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/thread_info.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/ptrace.h>
11*4882a593Smuzhiyun #include <asm/user.h>
12*4882a593Smuzhiyun #include <asm/auxvec.h>
13*4882a593Smuzhiyun #include <asm/fsgsbase.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun typedef unsigned long elf_greg_t;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
18*4882a593Smuzhiyun typedef elf_greg_t elf_gregset_t[ELF_NGREG];
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun typedef struct user_i387_struct elf_fpregset_t;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifdef __i386__
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define R_386_NONE	0
25*4882a593Smuzhiyun #define R_386_32	1
26*4882a593Smuzhiyun #define R_386_PC32	2
27*4882a593Smuzhiyun #define R_386_GOT32	3
28*4882a593Smuzhiyun #define R_386_PLT32	4
29*4882a593Smuzhiyun #define R_386_COPY	5
30*4882a593Smuzhiyun #define R_386_GLOB_DAT	6
31*4882a593Smuzhiyun #define R_386_JMP_SLOT	7
32*4882a593Smuzhiyun #define R_386_RELATIVE	8
33*4882a593Smuzhiyun #define R_386_GOTOFF	9
34*4882a593Smuzhiyun #define R_386_GOTPC	10
35*4882a593Smuzhiyun #define R_386_NUM	11
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun  * These are used to set parameters in the core dumps.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun #define ELF_CLASS	ELFCLASS32
41*4882a593Smuzhiyun #define ELF_DATA	ELFDATA2LSB
42*4882a593Smuzhiyun #define ELF_ARCH	EM_386
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #else
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* x86-64 relocation types */
47*4882a593Smuzhiyun #define R_X86_64_NONE		0	/* No reloc */
48*4882a593Smuzhiyun #define R_X86_64_64		1	/* Direct 64 bit  */
49*4882a593Smuzhiyun #define R_X86_64_PC32		2	/* PC relative 32 bit signed */
50*4882a593Smuzhiyun #define R_X86_64_GOT32		3	/* 32 bit GOT entry */
51*4882a593Smuzhiyun #define R_X86_64_PLT32		4	/* 32 bit PLT address */
52*4882a593Smuzhiyun #define R_X86_64_COPY		5	/* Copy symbol at runtime */
53*4882a593Smuzhiyun #define R_X86_64_GLOB_DAT	6	/* Create GOT entry */
54*4882a593Smuzhiyun #define R_X86_64_JUMP_SLOT	7	/* Create PLT entry */
55*4882a593Smuzhiyun #define R_X86_64_RELATIVE	8	/* Adjust by program base */
56*4882a593Smuzhiyun #define R_X86_64_GOTPCREL	9	/* 32 bit signed pc relative
57*4882a593Smuzhiyun 					   offset to GOT */
58*4882a593Smuzhiyun #define R_X86_64_32		10	/* Direct 32 bit zero extended */
59*4882a593Smuzhiyun #define R_X86_64_32S		11	/* Direct 32 bit sign extended */
60*4882a593Smuzhiyun #define R_X86_64_16		12	/* Direct 16 bit zero extended */
61*4882a593Smuzhiyun #define R_X86_64_PC16		13	/* 16 bit sign extended pc relative */
62*4882a593Smuzhiyun #define R_X86_64_8		14	/* Direct 8 bit sign extended  */
63*4882a593Smuzhiyun #define R_X86_64_PC8		15	/* 8 bit sign extended pc relative */
64*4882a593Smuzhiyun #define R_X86_64_PC64		24	/* Place relative 64-bit signed */
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * These are used to set parameters in the core dumps.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun #define ELF_CLASS	ELFCLASS64
70*4882a593Smuzhiyun #define ELF_DATA	ELFDATA2LSB
71*4882a593Smuzhiyun #define ELF_ARCH	EM_X86_64
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #include <asm/vdso.h>
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #ifdef CONFIG_X86_64
78*4882a593Smuzhiyun extern unsigned int vdso64_enabled;
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
81*4882a593Smuzhiyun extern unsigned int vdso32_enabled;
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * This is used to ensure we don't load something for the wrong architecture.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun #define elf_check_arch_ia32(x) \
88*4882a593Smuzhiyun 	(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #include <asm/processor.h>
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #ifdef CONFIG_X86_32
93*4882a593Smuzhiyun #include <asm/desc.h>
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define elf_check_arch(x)	elf_check_arch_ia32(x)
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
98*4882a593Smuzhiyun    contains a pointer to a function which might be registered using `atexit'.
99*4882a593Smuzhiyun    This provides a mean for the dynamic linker to call DT_FINI functions for
100*4882a593Smuzhiyun    shared libraries that have been loaded before the code runs.
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun    A value of 0 tells we have no such handler.
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun    We might as well make sure everything else is cleared too (except for %esp),
105*4882a593Smuzhiyun    just to make things more deterministic.
106*4882a593Smuzhiyun  */
107*4882a593Smuzhiyun #define ELF_PLAT_INIT(_r, load_addr)		\
108*4882a593Smuzhiyun 	do {					\
109*4882a593Smuzhiyun 	_r->bx = 0; _r->cx = 0; _r->dx = 0;	\
110*4882a593Smuzhiyun 	_r->si = 0; _r->di = 0; _r->bp = 0;	\
111*4882a593Smuzhiyun 	_r->ax = 0;				\
112*4882a593Smuzhiyun } while (0)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
116*4882a593Smuzhiyun  * now struct_user_regs, they are different)
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs)	\
120*4882a593Smuzhiyun do {						\
121*4882a593Smuzhiyun 	pr_reg[0] = regs->bx;			\
122*4882a593Smuzhiyun 	pr_reg[1] = regs->cx;			\
123*4882a593Smuzhiyun 	pr_reg[2] = regs->dx;			\
124*4882a593Smuzhiyun 	pr_reg[3] = regs->si;			\
125*4882a593Smuzhiyun 	pr_reg[4] = regs->di;			\
126*4882a593Smuzhiyun 	pr_reg[5] = regs->bp;			\
127*4882a593Smuzhiyun 	pr_reg[6] = regs->ax;			\
128*4882a593Smuzhiyun 	pr_reg[7] = regs->ds;			\
129*4882a593Smuzhiyun 	pr_reg[8] = regs->es;			\
130*4882a593Smuzhiyun 	pr_reg[9] = regs->fs;			\
131*4882a593Smuzhiyun 	pr_reg[11] = regs->orig_ax;		\
132*4882a593Smuzhiyun 	pr_reg[12] = regs->ip;			\
133*4882a593Smuzhiyun 	pr_reg[13] = regs->cs;			\
134*4882a593Smuzhiyun 	pr_reg[14] = regs->flags;		\
135*4882a593Smuzhiyun 	pr_reg[15] = regs->sp;			\
136*4882a593Smuzhiyun 	pr_reg[16] = regs->ss;			\
137*4882a593Smuzhiyun } while (0);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun #define ELF_CORE_COPY_REGS(pr_reg, regs)	\
140*4882a593Smuzhiyun do {						\
141*4882a593Smuzhiyun 	ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
142*4882a593Smuzhiyun 	pr_reg[10] = get_user_gs(regs);		\
143*4882a593Smuzhiyun } while (0);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs)	\
146*4882a593Smuzhiyun do {						\
147*4882a593Smuzhiyun 	ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
148*4882a593Smuzhiyun 	savesegment(gs, pr_reg[10]);		\
149*4882a593Smuzhiyun } while (0);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define ELF_PLATFORM	(utsname()->machine)
152*4882a593Smuzhiyun #define set_personality_64bit()	do { } while (0)
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #else /* CONFIG_X86_32 */
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * This is used to ensure we don't load something for the wrong architecture.
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun #define elf_check_arch(x)			\
160*4882a593Smuzhiyun 	((x)->e_machine == EM_X86_64)
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define compat_elf_check_arch(x)					\
163*4882a593Smuzhiyun 	(elf_check_arch_ia32(x) ||					\
164*4882a593Smuzhiyun 	 (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #if __USER32_DS != __USER_DS
167*4882a593Smuzhiyun # error "The following code assumes __USER32_DS == __USER_DS"
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun 
elf_common_init(struct thread_struct * t,struct pt_regs * regs,const u16 ds)170*4882a593Smuzhiyun static inline void elf_common_init(struct thread_struct *t,
171*4882a593Smuzhiyun 				   struct pt_regs *regs, const u16 ds)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	/* ax gets execve's return value. */
174*4882a593Smuzhiyun 	/*regs->ax = */ regs->bx = regs->cx = regs->dx = 0;
175*4882a593Smuzhiyun 	regs->si = regs->di = regs->bp = 0;
176*4882a593Smuzhiyun 	regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
177*4882a593Smuzhiyun 	regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
178*4882a593Smuzhiyun 	t->fsbase = t->gsbase = 0;
179*4882a593Smuzhiyun 	t->fsindex = t->gsindex = 0;
180*4882a593Smuzhiyun 	t->ds = t->es = ds;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #define ELF_PLAT_INIT(_r, load_addr)			\
184*4882a593Smuzhiyun 	elf_common_init(&current->thread, _r, 0)
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun #define	COMPAT_ELF_PLAT_INIT(regs, load_addr)		\
187*4882a593Smuzhiyun 	elf_common_init(&current->thread, regs, __USER_DS)
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp);
190*4882a593Smuzhiyun #define compat_start_thread compat_start_thread
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun void set_personality_ia32(bool);
193*4882a593Smuzhiyun #define COMPAT_SET_PERSONALITY(ex)			\
194*4882a593Smuzhiyun 	set_personality_ia32((ex).e_machine == EM_X86_64)
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #define COMPAT_ELF_PLATFORM			("i686")
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun  * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
200*4882a593Smuzhiyun  * now struct_user_regs, they are different). Assumes current is the process
201*4882a593Smuzhiyun  * getting dumped.
202*4882a593Smuzhiyun  */
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun #define ELF_CORE_COPY_REGS(pr_reg, regs)			\
205*4882a593Smuzhiyun do {								\
206*4882a593Smuzhiyun 	unsigned v;						\
207*4882a593Smuzhiyun 	(pr_reg)[0] = (regs)->r15;				\
208*4882a593Smuzhiyun 	(pr_reg)[1] = (regs)->r14;				\
209*4882a593Smuzhiyun 	(pr_reg)[2] = (regs)->r13;				\
210*4882a593Smuzhiyun 	(pr_reg)[3] = (regs)->r12;				\
211*4882a593Smuzhiyun 	(pr_reg)[4] = (regs)->bp;				\
212*4882a593Smuzhiyun 	(pr_reg)[5] = (regs)->bx;				\
213*4882a593Smuzhiyun 	(pr_reg)[6] = (regs)->r11;				\
214*4882a593Smuzhiyun 	(pr_reg)[7] = (regs)->r10;				\
215*4882a593Smuzhiyun 	(pr_reg)[8] = (regs)->r9;				\
216*4882a593Smuzhiyun 	(pr_reg)[9] = (regs)->r8;				\
217*4882a593Smuzhiyun 	(pr_reg)[10] = (regs)->ax;				\
218*4882a593Smuzhiyun 	(pr_reg)[11] = (regs)->cx;				\
219*4882a593Smuzhiyun 	(pr_reg)[12] = (regs)->dx;				\
220*4882a593Smuzhiyun 	(pr_reg)[13] = (regs)->si;				\
221*4882a593Smuzhiyun 	(pr_reg)[14] = (regs)->di;				\
222*4882a593Smuzhiyun 	(pr_reg)[15] = (regs)->orig_ax;				\
223*4882a593Smuzhiyun 	(pr_reg)[16] = (regs)->ip;				\
224*4882a593Smuzhiyun 	(pr_reg)[17] = (regs)->cs;				\
225*4882a593Smuzhiyun 	(pr_reg)[18] = (regs)->flags;				\
226*4882a593Smuzhiyun 	(pr_reg)[19] = (regs)->sp;				\
227*4882a593Smuzhiyun 	(pr_reg)[20] = (regs)->ss;				\
228*4882a593Smuzhiyun 	(pr_reg)[21] = x86_fsbase_read_cpu();			\
229*4882a593Smuzhiyun 	(pr_reg)[22] = x86_gsbase_read_cpu_inactive();		\
230*4882a593Smuzhiyun 	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
231*4882a593Smuzhiyun 	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
232*4882a593Smuzhiyun 	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
233*4882a593Smuzhiyun 	asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v;	\
234*4882a593Smuzhiyun } while (0);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /* I'm not sure if we can use '-' here */
237*4882a593Smuzhiyun #define ELF_PLATFORM       ("x86_64")
238*4882a593Smuzhiyun extern void set_personality_64bit(void);
239*4882a593Smuzhiyun extern unsigned int sysctl_vsyscall32;
240*4882a593Smuzhiyun extern int force_personality32;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun #endif /* !CONFIG_X86_32 */
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #define CORE_DUMP_USE_REGSET
245*4882a593Smuzhiyun #define ELF_EXEC_PAGESIZE	4096
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun  * This is the base location for PIE (ET_DYN with INTERP) loads. On
249*4882a593Smuzhiyun  * 64-bit, this is above 4GB to leave the entire 32-bit address
250*4882a593Smuzhiyun  * space open for things that want to use the area for 32-bit pointers.
251*4882a593Smuzhiyun  */
252*4882a593Smuzhiyun #define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
253*4882a593Smuzhiyun 						  (DEFAULT_MAP_WINDOW / 3 * 2))
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /* This yields a mask that user programs can use to figure out what
256*4882a593Smuzhiyun    instruction set this CPU supports.  This could be done in user space,
257*4882a593Smuzhiyun    but it's not easy, and we've already done it here.  */
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #define ELF_HWCAP		(boot_cpu_data.x86_capability[CPUID_1_EDX])
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun extern u32 elf_hwcap2;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun  * HWCAP2 supplies mask with kernel enabled CPU features, so that
265*4882a593Smuzhiyun  * the application can discover that it can safely use them.
266*4882a593Smuzhiyun  * The bits are defined in uapi/asm/hwcap2.h.
267*4882a593Smuzhiyun  */
268*4882a593Smuzhiyun #define ELF_HWCAP2		(elf_hwcap2)
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /* This yields a string that ld.so will use to load implementation
271*4882a593Smuzhiyun    specific libraries for optimization.  This is more specific in
272*4882a593Smuzhiyun    intent than poking at uname or /proc/cpuinfo.
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun    For the moment, we have only optimizations for the Intel generations,
275*4882a593Smuzhiyun    but that could change... */
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #define SET_PERSONALITY(ex) set_personality_64bit()
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun  * An executable for which elf_read_implies_exec() returns TRUE will
281*4882a593Smuzhiyun  * have the READ_IMPLIES_EXEC personality flag set automatically.
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * The decision process for determining the results are:
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  *                 CPU: | lacks NX*  | has NX, ia32     | has NX, x86_64 |
286*4882a593Smuzhiyun  * ELF:                 |            |                  |                |
287*4882a593Smuzhiyun  * ---------------------|------------|------------------|----------------|
288*4882a593Smuzhiyun  * missing PT_GNU_STACK | exec-all   | exec-all         | exec-none      |
289*4882a593Smuzhiyun  * PT_GNU_STACK == RWX  | exec-stack | exec-stack       | exec-stack     |
290*4882a593Smuzhiyun  * PT_GNU_STACK == RW   | exec-none  | exec-none        | exec-none      |
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  *  exec-all  : all PROT_READ user mappings are executable, except when
293*4882a593Smuzhiyun  *              backed by files on a noexec-filesystem.
294*4882a593Smuzhiyun  *  exec-none : only PROT_EXEC user mappings are executable.
295*4882a593Smuzhiyun  *  exec-stack: only the stack and PROT_EXEC user mappings are executable.
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  *  *this column has no architectural effect: NX markings are ignored by
298*4882a593Smuzhiyun  *   hardware, but may have behavioral effects when "wants X" collides with
299*4882a593Smuzhiyun  *   "cannot be X" constraints in memory permission flags, as in
300*4882a593Smuzhiyun  *   https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
301*4882a593Smuzhiyun  *
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun #define elf_read_implies_exec(ex, executable_stack)	\
304*4882a593Smuzhiyun 	(mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT)
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun struct task_struct;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #define	ARCH_DLINFO_IA32						\
309*4882a593Smuzhiyun do {									\
310*4882a593Smuzhiyun 	if (VDSO_CURRENT_BASE) {					\
311*4882a593Smuzhiyun 		NEW_AUX_ENT(AT_SYSINFO,	VDSO_ENTRY);			\
312*4882a593Smuzhiyun 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);	\
313*4882a593Smuzhiyun 	}								\
314*4882a593Smuzhiyun } while (0)
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun  * True on X86_32 or when emulating IA32 on X86_64
318*4882a593Smuzhiyun  */
mmap_is_ia32(void)319*4882a593Smuzhiyun static inline int mmap_is_ia32(void)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	return IS_ENABLED(CONFIG_X86_32) ||
322*4882a593Smuzhiyun 	       (IS_ENABLED(CONFIG_COMPAT) &&
323*4882a593Smuzhiyun 		test_thread_flag(TIF_ADDR32));
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun extern unsigned long task_size_32bit(void);
327*4882a593Smuzhiyun extern unsigned long task_size_64bit(int full_addr_space);
328*4882a593Smuzhiyun extern unsigned long get_mmap_base(int is_legacy);
329*4882a593Smuzhiyun extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun #ifdef CONFIG_X86_32
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun #define __STACK_RND_MASK(is32bit) (0x7ff)
334*4882a593Smuzhiyun #define STACK_RND_MASK (0x7ff)
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun #define ARCH_DLINFO		ARCH_DLINFO_IA32
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun #else /* CONFIG_X86_32 */
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /* 1GB for 64bit, 8MB for 32bit */
343*4882a593Smuzhiyun #define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
344*4882a593Smuzhiyun #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun #define ARCH_DLINFO							\
347*4882a593Smuzhiyun do {									\
348*4882a593Smuzhiyun 	if (vdso64_enabled)						\
349*4882a593Smuzhiyun 		NEW_AUX_ENT(AT_SYSINFO_EHDR,				\
350*4882a593Smuzhiyun 			    (unsigned long __force)current->mm->context.vdso); \
351*4882a593Smuzhiyun } while (0)
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
354*4882a593Smuzhiyun #define ARCH_DLINFO_X32							\
355*4882a593Smuzhiyun do {									\
356*4882a593Smuzhiyun 	if (vdso64_enabled)						\
357*4882a593Smuzhiyun 		NEW_AUX_ENT(AT_SYSINFO_EHDR,				\
358*4882a593Smuzhiyun 			    (unsigned long __force)current->mm->context.vdso); \
359*4882a593Smuzhiyun } while (0)
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun #define AT_SYSINFO		32
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun #define COMPAT_ARCH_DLINFO						\
364*4882a593Smuzhiyun if (test_thread_flag(TIF_X32))						\
365*4882a593Smuzhiyun 	ARCH_DLINFO_X32;						\
366*4882a593Smuzhiyun else									\
367*4882a593Smuzhiyun 	ARCH_DLINFO_IA32
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun #define COMPAT_ELF_ET_DYN_BASE	(TASK_UNMAPPED_BASE + 0x1000000)
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun #endif /* !CONFIG_X86_32 */
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #define VDSO_CURRENT_BASE	((unsigned long)current->mm->context.vdso)
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun #define VDSO_ENTRY							\
376*4882a593Smuzhiyun 	((unsigned long)current->mm->context.vdso +			\
377*4882a593Smuzhiyun 	 vdso_image_32.sym___kernel_vsyscall)
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun struct linux_binprm;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
382*4882a593Smuzhiyun extern int arch_setup_additional_pages(struct linux_binprm *bprm,
383*4882a593Smuzhiyun 				       int uses_interp);
384*4882a593Smuzhiyun extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
385*4882a593Smuzhiyun 					      int uses_interp);
386*4882a593Smuzhiyun #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /* Do not change the values. See get_align_mask() */
389*4882a593Smuzhiyun enum align_flags {
390*4882a593Smuzhiyun 	ALIGN_VA_32	= BIT(0),
391*4882a593Smuzhiyun 	ALIGN_VA_64	= BIT(1),
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun struct va_alignment {
395*4882a593Smuzhiyun 	int flags;
396*4882a593Smuzhiyun 	unsigned long mask;
397*4882a593Smuzhiyun 	unsigned long bits;
398*4882a593Smuzhiyun } ____cacheline_aligned;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun extern struct va_alignment va_align;
401*4882a593Smuzhiyun extern unsigned long align_vdso_addr(unsigned long);
402*4882a593Smuzhiyun #endif /* _ASM_X86_ELF_H */
403