1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * AT_SYSINFO entry point 4*4882a593Smuzhiyun*/ 5*4882a593Smuzhiyun 6*4882a593Smuzhiyun#include <linux/linkage.h> 7*4882a593Smuzhiyun#include <asm/dwarf2.h> 8*4882a593Smuzhiyun#include <asm/cpufeatures.h> 9*4882a593Smuzhiyun#include <asm/alternative.h> 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun .text 12*4882a593Smuzhiyun .globl __kernel_vsyscall 13*4882a593Smuzhiyun .type __kernel_vsyscall,@function 14*4882a593Smuzhiyun ALIGN 15*4882a593Smuzhiyun__kernel_vsyscall: 16*4882a593Smuzhiyun CFI_STARTPROC 17*4882a593Smuzhiyun /* 18*4882a593Smuzhiyun * Reshuffle regs so that all of any of the entry instructions 19*4882a593Smuzhiyun * will preserve enough state. 20*4882a593Smuzhiyun * 21*4882a593Smuzhiyun * A really nice entry sequence would be: 22*4882a593Smuzhiyun * pushl %edx 23*4882a593Smuzhiyun * pushl %ecx 24*4882a593Smuzhiyun * movl %esp, %ecx 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * Unfortunately, naughty Android versions between July and December 27*4882a593Smuzhiyun * 2015 actually hardcode the traditional Linux SYSENTER entry 28*4882a593Smuzhiyun * sequence. That is severely broken for a number of reasons (ask 29*4882a593Smuzhiyun * anyone with an AMD CPU, for example). Nonetheless, we try to keep 30*4882a593Smuzhiyun * it working approximately as well as it ever worked. 31*4882a593Smuzhiyun * 32*4882a593Smuzhiyun * This link may eludicate some of the history: 33*4882a593Smuzhiyun * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 34*4882a593Smuzhiyun * personally, I find it hard to understand what's going on there. 35*4882a593Smuzhiyun * 36*4882a593Smuzhiyun * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. 37*4882a593Smuzhiyun * Execute an indirect call to the address in the AT_SYSINFO auxv 38*4882a593Smuzhiyun * entry. That is the ONLY correct way to make a fast 32-bit system 39*4882a593Smuzhiyun * call on Linux. (Open-coding int $0x80 is also fine, but it's 40*4882a593Smuzhiyun * slow.) 41*4882a593Smuzhiyun */ 42*4882a593Smuzhiyun pushl %ecx 43*4882a593Smuzhiyun CFI_ADJUST_CFA_OFFSET 4 44*4882a593Smuzhiyun CFI_REL_OFFSET ecx, 0 45*4882a593Smuzhiyun pushl %edx 46*4882a593Smuzhiyun CFI_ADJUST_CFA_OFFSET 4 47*4882a593Smuzhiyun CFI_REL_OFFSET edx, 0 48*4882a593Smuzhiyun pushl %ebp 49*4882a593Smuzhiyun CFI_ADJUST_CFA_OFFSET 4 50*4882a593Smuzhiyun CFI_REL_OFFSET ebp, 0 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" 53*4882a593Smuzhiyun #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun#ifdef CONFIG_X86_64 56*4882a593Smuzhiyun /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ 57*4882a593Smuzhiyun ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ 58*4882a593Smuzhiyun SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 59*4882a593Smuzhiyun#else 60*4882a593Smuzhiyun ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP 61*4882a593Smuzhiyun#endif 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun /* Enter using int $0x80 */ 64*4882a593Smuzhiyun int $0x80 65*4882a593SmuzhiyunSYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * Restore EDX and ECX in case they were clobbered. EBP is not 69*4882a593Smuzhiyun * clobbered (the kernel restores it), but it's cleaner and 70*4882a593Smuzhiyun * probably faster to pop it than to adjust ESP using addl. 71*4882a593Smuzhiyun */ 72*4882a593Smuzhiyun popl %ebp 73*4882a593Smuzhiyun CFI_RESTORE ebp 74*4882a593Smuzhiyun CFI_ADJUST_CFA_OFFSET -4 75*4882a593Smuzhiyun popl %edx 76*4882a593Smuzhiyun CFI_RESTORE edx 77*4882a593Smuzhiyun CFI_ADJUST_CFA_OFFSET -4 78*4882a593Smuzhiyun popl %ecx 79*4882a593Smuzhiyun CFI_RESTORE ecx 80*4882a593Smuzhiyun CFI_ADJUST_CFA_OFFSET -4 81*4882a593Smuzhiyun RET 82*4882a593Smuzhiyun CFI_ENDPROC 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun .size __kernel_vsyscall,.-__kernel_vsyscall 85*4882a593Smuzhiyun .previous 86