1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * S390 version 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Derived from "include/asm-i386/elf.h" 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef __ASMS390_ELF_H 9*4882a593Smuzhiyun #define __ASMS390_ELF_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun /* s390 relocations defined by the ABIs */ 12*4882a593Smuzhiyun #define R_390_NONE 0 /* No reloc. */ 13*4882a593Smuzhiyun #define R_390_8 1 /* Direct 8 bit. */ 14*4882a593Smuzhiyun #define R_390_12 2 /* Direct 12 bit. */ 15*4882a593Smuzhiyun #define R_390_16 3 /* Direct 16 bit. */ 16*4882a593Smuzhiyun #define R_390_32 4 /* Direct 32 bit. */ 17*4882a593Smuzhiyun #define R_390_PC32 5 /* PC relative 32 bit. */ 18*4882a593Smuzhiyun #define R_390_GOT12 6 /* 12 bit GOT offset. */ 19*4882a593Smuzhiyun #define R_390_GOT32 7 /* 32 bit GOT offset. */ 20*4882a593Smuzhiyun #define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ 21*4882a593Smuzhiyun #define R_390_COPY 9 /* Copy symbol at runtime. */ 22*4882a593Smuzhiyun #define R_390_GLOB_DAT 10 /* Create GOT entry. */ 23*4882a593Smuzhiyun #define R_390_JMP_SLOT 11 /* Create PLT entry. */ 24*4882a593Smuzhiyun #define R_390_RELATIVE 12 /* Adjust by program base. */ 25*4882a593Smuzhiyun #define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ 26*4882a593Smuzhiyun #define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ 27*4882a593Smuzhiyun #define R_390_GOT16 15 /* 16 bit GOT offset. */ 28*4882a593Smuzhiyun #define R_390_PC16 16 /* PC relative 16 bit. */ 29*4882a593Smuzhiyun #define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ 30*4882a593Smuzhiyun #define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ 31*4882a593Smuzhiyun #define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ 32*4882a593Smuzhiyun #define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ 33*4882a593Smuzhiyun #define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ 34*4882a593Smuzhiyun #define R_390_64 22 /* Direct 64 bit. */ 35*4882a593Smuzhiyun #define R_390_PC64 23 /* PC relative 64 bit. */ 36*4882a593Smuzhiyun #define R_390_GOT64 24 /* 64 bit GOT offset. */ 37*4882a593Smuzhiyun #define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ 38*4882a593Smuzhiyun #define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ 39*4882a593Smuzhiyun #define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ 40*4882a593Smuzhiyun #define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ 41*4882a593Smuzhiyun #define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ 42*4882a593Smuzhiyun #define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ 43*4882a593Smuzhiyun #define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ 44*4882a593Smuzhiyun #define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ 45*4882a593Smuzhiyun #define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ 46*4882a593Smuzhiyun #define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ 47*4882a593Smuzhiyun #define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ 48*4882a593Smuzhiyun #define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ 49*4882a593Smuzhiyun #define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ 50*4882a593Smuzhiyun #define R_390_TLS_GDCALL 38 /* Tag for function call in general 51*4882a593Smuzhiyun dynamic TLS code. */ 52*4882a593Smuzhiyun #define R_390_TLS_LDCALL 39 /* Tag for function call in local 53*4882a593Smuzhiyun dynamic TLS code. */ 54*4882a593Smuzhiyun #define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic 55*4882a593Smuzhiyun thread local data. */ 56*4882a593Smuzhiyun #define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic 57*4882a593Smuzhiyun thread local data. */ 58*4882a593Smuzhiyun #define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS 59*4882a593Smuzhiyun block offset. */ 60*4882a593Smuzhiyun #define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS 61*4882a593Smuzhiyun block offset. */ 62*4882a593Smuzhiyun #define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS 63*4882a593Smuzhiyun block offset. */ 64*4882a593Smuzhiyun #define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic 65*4882a593Smuzhiyun thread local data in LD code. */ 66*4882a593Smuzhiyun #define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic 67*4882a593Smuzhiyun thread local data in LD code. */ 68*4882a593Smuzhiyun #define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for 69*4882a593Smuzhiyun negated static TLS block offset. */ 70*4882a593Smuzhiyun #define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for 71*4882a593Smuzhiyun negated static TLS block offset. */ 72*4882a593Smuzhiyun #define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for 73*4882a593Smuzhiyun negated static TLS block offset. */ 74*4882a593Smuzhiyun #define R_390_TLS_LE32 50 /* 32 bit negated offset relative to 75*4882a593Smuzhiyun static TLS block. */ 76*4882a593Smuzhiyun #define R_390_TLS_LE64 51 /* 64 bit negated offset relative to 77*4882a593Smuzhiyun static TLS block. */ 78*4882a593Smuzhiyun #define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS 79*4882a593Smuzhiyun block. */ 80*4882a593Smuzhiyun #define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS 81*4882a593Smuzhiyun block. */ 82*4882a593Smuzhiyun #define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ 83*4882a593Smuzhiyun #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ 84*4882a593Smuzhiyun #define R_390_TLS_TPOFF 56 /* Negate offset in static TLS 85*4882a593Smuzhiyun block. */ 86*4882a593Smuzhiyun #define R_390_20 57 /* Direct 20 bit. */ 87*4882a593Smuzhiyun #define R_390_GOT20 58 /* 20 bit GOT offset. */ 88*4882a593Smuzhiyun #define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ 89*4882a593Smuzhiyun #define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS 90*4882a593Smuzhiyun block offset. */ 91*4882a593Smuzhiyun /* Keep this the last entry. */ 92*4882a593Smuzhiyun #define R_390_NUM 61 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun /* Bits present in AT_HWCAP. */ 95*4882a593Smuzhiyun #define HWCAP_S390_ESAN3 1 96*4882a593Smuzhiyun #define HWCAP_S390_ZARCH 2 97*4882a593Smuzhiyun #define HWCAP_S390_STFLE 4 98*4882a593Smuzhiyun #define HWCAP_S390_MSA 8 99*4882a593Smuzhiyun #define HWCAP_S390_LDISP 16 100*4882a593Smuzhiyun #define HWCAP_S390_EIMM 32 101*4882a593Smuzhiyun #define HWCAP_S390_DFP 64 102*4882a593Smuzhiyun #define HWCAP_S390_HPAGE 128 103*4882a593Smuzhiyun #define HWCAP_S390_ETF3EH 256 104*4882a593Smuzhiyun #define HWCAP_S390_HIGH_GPRS 512 105*4882a593Smuzhiyun #define HWCAP_S390_TE 1024 106*4882a593Smuzhiyun #define HWCAP_S390_VXRS 2048 107*4882a593Smuzhiyun #define HWCAP_S390_VXRS_BCD 4096 108*4882a593Smuzhiyun #define HWCAP_S390_VXRS_EXT 8192 109*4882a593Smuzhiyun #define HWCAP_S390_GS 16384 110*4882a593Smuzhiyun #define HWCAP_S390_VXRS_EXT2 32768 111*4882a593Smuzhiyun #define HWCAP_S390_VXRS_PDE 65536 112*4882a593Smuzhiyun #define HWCAP_S390_SORT 131072 113*4882a593Smuzhiyun #define HWCAP_S390_DFLT 262144 114*4882a593Smuzhiyun 115*4882a593Smuzhiyun /* Internal bits, not exposed via elf */ 116*4882a593Smuzhiyun #define HWCAP_INT_SIE 1UL 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun /* 119*4882a593Smuzhiyun * These are used to set parameters in the core dumps. 120*4882a593Smuzhiyun */ 121*4882a593Smuzhiyun #define ELF_CLASS ELFCLASS64 122*4882a593Smuzhiyun #define ELF_DATA ELFDATA2MSB 123*4882a593Smuzhiyun #define ELF_ARCH EM_S390 124*4882a593Smuzhiyun 125*4882a593Smuzhiyun /* s390 specific phdr types */ 126*4882a593Smuzhiyun #define PT_S390_PGSTE 0x70000000 127*4882a593Smuzhiyun 128*4882a593Smuzhiyun /* 129*4882a593Smuzhiyun * ELF register definitions.. 130*4882a593Smuzhiyun */ 131*4882a593Smuzhiyun 132*4882a593Smuzhiyun #include <linux/compat.h> 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun #include <asm/ptrace.h> 135*4882a593Smuzhiyun #include <asm/syscall.h> 136*4882a593Smuzhiyun #include <asm/user.h> 137*4882a593Smuzhiyun 138*4882a593Smuzhiyun typedef s390_fp_regs elf_fpregset_t; 139*4882a593Smuzhiyun typedef s390_regs elf_gregset_t; 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun typedef s390_fp_regs compat_elf_fpregset_t; 142*4882a593Smuzhiyun typedef s390_compat_regs compat_elf_gregset_t; 143*4882a593Smuzhiyun 144*4882a593Smuzhiyun #include <linux/sched/mm.h> /* for task_struct */ 145*4882a593Smuzhiyun #include <asm/mmu_context.h> 146*4882a593Smuzhiyun 147*4882a593Smuzhiyun #include <asm/vdso.h> 148*4882a593Smuzhiyun 149*4882a593Smuzhiyun extern unsigned int vdso_enabled; 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun /* 152*4882a593Smuzhiyun * This is used to ensure we don't load something for the wrong architecture. 153*4882a593Smuzhiyun */ 154*4882a593Smuzhiyun #define elf_check_arch(x) \ 155*4882a593Smuzhiyun (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 156*4882a593Smuzhiyun && (x)->e_ident[EI_CLASS] == ELF_CLASS) 157*4882a593Smuzhiyun #define compat_elf_check_arch(x) \ 158*4882a593Smuzhiyun (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 159*4882a593Smuzhiyun && (x)->e_ident[EI_CLASS] == ELF_CLASS) 160*4882a593Smuzhiyun #define compat_start_thread start_thread31 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun struct arch_elf_state { 163*4882a593Smuzhiyun int rc; 164*4882a593Smuzhiyun }; 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun #define INIT_ARCH_ELF_STATE { .rc = 0 } 167*4882a593Smuzhiyun 168*4882a593Smuzhiyun #define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) 169*4882a593Smuzhiyun #ifdef CONFIG_PGSTE 170*4882a593Smuzhiyun #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 171*4882a593Smuzhiyun ({ \ 172*4882a593Smuzhiyun struct arch_elf_state *_state = state; \ 173*4882a593Smuzhiyun if ((phdr)->p_type == PT_S390_PGSTE && \ 174*4882a593Smuzhiyun !page_table_allocate_pgste && \ 175*4882a593Smuzhiyun !test_thread_flag(TIF_PGSTE) && \ 176*4882a593Smuzhiyun !current->mm->context.alloc_pgste) { \ 177*4882a593Smuzhiyun set_thread_flag(TIF_PGSTE); \ 178*4882a593Smuzhiyun set_pt_regs_flag(task_pt_regs(current), \ 179*4882a593Smuzhiyun PIF_SYSCALL_RESTART); \ 180*4882a593Smuzhiyun _state->rc = -EAGAIN; \ 181*4882a593Smuzhiyun } \ 182*4882a593Smuzhiyun _state->rc; \ 183*4882a593Smuzhiyun }) 184*4882a593Smuzhiyun #else 185*4882a593Smuzhiyun #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 186*4882a593Smuzhiyun ({ \ 187*4882a593Smuzhiyun (state)->rc; \ 188*4882a593Smuzhiyun }) 189*4882a593Smuzhiyun #endif 190*4882a593Smuzhiyun 191*4882a593Smuzhiyun /* For SVR4/S390 the function pointer to be registered with `atexit` is 192*4882a593Smuzhiyun passed in R14. */ 193*4882a593Smuzhiyun #define ELF_PLAT_INIT(_r, load_addr) \ 194*4882a593Smuzhiyun do { \ 195*4882a593Smuzhiyun _r->gprs[14] = 0; \ 196*4882a593Smuzhiyun } while (0) 197*4882a593Smuzhiyun 198*4882a593Smuzhiyun #define CORE_DUMP_USE_REGSET 199*4882a593Smuzhiyun #define ELF_EXEC_PAGESIZE PAGE_SIZE 200*4882a593Smuzhiyun 201*4882a593Smuzhiyun /* This is the location that an ET_DYN program is loaded if exec'ed. Typical 202*4882a593Smuzhiyun use of this is to invoke "./ld.so someprog" to test out a new version of 203*4882a593Smuzhiyun the loader. We need to make sure that it is out of the way of the program 204*4882a593Smuzhiyun that it will "exec", and that there is sufficient room for the brk. 64-bit 205*4882a593Smuzhiyun tasks are aligned to 4GB. */ 206*4882a593Smuzhiyun #define ELF_ET_DYN_BASE (is_compat_task() ? \ 207*4882a593Smuzhiyun (STACK_TOP / 3 * 2) : \ 208*4882a593Smuzhiyun (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) 209*4882a593Smuzhiyun 210*4882a593Smuzhiyun /* This yields a mask that user programs can use to figure out what 211*4882a593Smuzhiyun instruction set this CPU supports. */ 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun extern unsigned long elf_hwcap; 214*4882a593Smuzhiyun #define ELF_HWCAP (elf_hwcap) 215*4882a593Smuzhiyun 216*4882a593Smuzhiyun /* Internal hardware capabilities, not exposed via elf */ 217*4882a593Smuzhiyun 218*4882a593Smuzhiyun extern unsigned long int_hwcap; 219*4882a593Smuzhiyun 220*4882a593Smuzhiyun /* This yields a string that ld.so will use to load implementation 221*4882a593Smuzhiyun specific libraries for optimization. This is more specific in 222*4882a593Smuzhiyun intent than poking at uname or /proc/cpuinfo. 223*4882a593Smuzhiyun 224*4882a593Smuzhiyun For the moment, we have only optimizations for the Intel generations, 225*4882a593Smuzhiyun but that could change... */ 226*4882a593Smuzhiyun 227*4882a593Smuzhiyun #define ELF_PLATFORM_SIZE 8 228*4882a593Smuzhiyun extern char elf_platform[]; 229*4882a593Smuzhiyun #define ELF_PLATFORM (elf_platform) 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun #ifndef CONFIG_COMPAT 232*4882a593Smuzhiyun #define SET_PERSONALITY(ex) \ 233*4882a593Smuzhiyun do { \ 234*4882a593Smuzhiyun set_personality(PER_LINUX | \ 235*4882a593Smuzhiyun (current->personality & (~PER_MASK))); \ 236*4882a593Smuzhiyun current->thread.sys_call_table = \ 237*4882a593Smuzhiyun (unsigned long) &sys_call_table; \ 238*4882a593Smuzhiyun } while (0) 239*4882a593Smuzhiyun #else /* CONFIG_COMPAT */ 240*4882a593Smuzhiyun #define SET_PERSONALITY(ex) \ 241*4882a593Smuzhiyun do { \ 242*4882a593Smuzhiyun if (personality(current->personality) != PER_LINUX32) \ 243*4882a593Smuzhiyun set_personality(PER_LINUX | \ 244*4882a593Smuzhiyun (current->personality & ~PER_MASK)); \ 245*4882a593Smuzhiyun if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 246*4882a593Smuzhiyun set_thread_flag(TIF_31BIT); \ 247*4882a593Smuzhiyun current->thread.sys_call_table = \ 248*4882a593Smuzhiyun (unsigned long) &sys_call_table_emu; \ 249*4882a593Smuzhiyun } else { \ 250*4882a593Smuzhiyun clear_thread_flag(TIF_31BIT); \ 251*4882a593Smuzhiyun current->thread.sys_call_table = \ 252*4882a593Smuzhiyun (unsigned long) &sys_call_table; \ 253*4882a593Smuzhiyun } \ 254*4882a593Smuzhiyun } while (0) 255*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */ 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun /* 258*4882a593Smuzhiyun * Cache aliasing on the latest machines calls for a mapping granularity 259*4882a593Smuzhiyun * of 512KB for the anonymous mapping base. For 64-bit processes use a 260*4882a593Smuzhiyun * 512KB alignment and a randomization of up to 1GB. For 31-bit processes 261*4882a593Smuzhiyun * the virtual address space is limited, use no alignment and limit the 262*4882a593Smuzhiyun * randomization to 8MB. 263*4882a593Smuzhiyun * For the additional randomization of the program break use 32MB for 264*4882a593Smuzhiyun * 64-bit and 8MB for 31-bit. 265*4882a593Smuzhiyun */ 266*4882a593Smuzhiyun #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) 267*4882a593Smuzhiyun #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 268*4882a593Smuzhiyun #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 269*4882a593Smuzhiyun #define STACK_RND_MASK MMAP_RND_MASK 270*4882a593Smuzhiyun 271*4882a593Smuzhiyun /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 272*4882a593Smuzhiyun #define ARCH_DLINFO \ 273*4882a593Smuzhiyun do { \ 274*4882a593Smuzhiyun if (vdso_enabled) \ 275*4882a593Smuzhiyun NEW_AUX_ENT(AT_SYSINFO_EHDR, \ 276*4882a593Smuzhiyun (unsigned long)current->mm->context.vdso_base); \ 277*4882a593Smuzhiyun } while (0) 278*4882a593Smuzhiyun 279*4882a593Smuzhiyun struct linux_binprm; 280*4882a593Smuzhiyun 281*4882a593Smuzhiyun #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 282*4882a593Smuzhiyun int arch_setup_additional_pages(struct linux_binprm *, int); 283*4882a593Smuzhiyun 284*4882a593Smuzhiyun #endif 285