1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/kernel/entry-common.S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2000 Russell King 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun#include <asm/assembler.h> 9*4882a593Smuzhiyun#include <asm/unistd.h> 10*4882a593Smuzhiyun#include <asm/ftrace.h> 11*4882a593Smuzhiyun#include <asm/unwind.h> 12*4882a593Smuzhiyun#include <asm/memory.h> 13*4882a593Smuzhiyun#ifdef CONFIG_AEABI 14*4882a593Smuzhiyun#include <asm/unistd-oabi.h> 15*4882a593Smuzhiyun#endif 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun .equ NR_syscalls, __NR_syscalls 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun#ifdef CONFIG_NEED_RET_TO_USER 20*4882a593Smuzhiyun#include <mach/entry-macro.S> 21*4882a593Smuzhiyun#else 22*4882a593Smuzhiyun .macro arch_ret_to_user, tmp1, tmp2 23*4882a593Smuzhiyun .endm 24*4882a593Smuzhiyun#endif 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun#include "entry-header.S" 27*4882a593Smuzhiyun 28*4882a593Smuzhiyunsaved_psr .req r8 29*4882a593Smuzhiyun#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 30*4882a593Smuzhiyunsaved_pc .req r9 31*4882a593Smuzhiyun#define TRACE(x...) x 32*4882a593Smuzhiyun#else 33*4882a593Smuzhiyunsaved_pc .req lr 34*4882a593Smuzhiyun#define TRACE(x...) 35*4882a593Smuzhiyun#endif 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun .section .entry.text,"ax",%progbits 38*4882a593Smuzhiyun .align 5 39*4882a593Smuzhiyun#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \ 40*4882a593Smuzhiyun IS_ENABLED(CONFIG_DEBUG_RSEQ)) 41*4882a593Smuzhiyun/* 42*4882a593Smuzhiyun * This is the fast syscall return path. We do as little as possible here, 43*4882a593Smuzhiyun * such as avoiding writing r0 to the stack. We only use this path if we 44*4882a593Smuzhiyun * have tracing, context tracking and rseq debug disabled - the overheads 45*4882a593Smuzhiyun * from those features make this path too inefficient. 46*4882a593Smuzhiyun */ 47*4882a593Smuzhiyunret_fast_syscall: 48*4882a593Smuzhiyun__ret_fast_syscall: 49*4882a593Smuzhiyun UNWIND(.fnstart ) 50*4882a593Smuzhiyun UNWIND(.cantunwind ) 51*4882a593Smuzhiyun disable_irq_notrace @ disable interrupts 52*4882a593Smuzhiyun ldr r2, [tsk, #TI_ADDR_LIMIT] 53*4882a593Smuzhiyun cmp r2, #TASK_SIZE 54*4882a593Smuzhiyun blne addr_limit_check_failed 55*4882a593Smuzhiyun ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 56*4882a593Smuzhiyun movs r1, r1, lsl #16 57*4882a593Smuzhiyun bne fast_work_pending 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun /* perform architecture specific actions before user return */ 61*4882a593Smuzhiyun arch_ret_to_user r1, lr 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun restore_user_regs fast = 1, offset = S_OFF 64*4882a593Smuzhiyun UNWIND(.fnend ) 65*4882a593SmuzhiyunENDPROC(ret_fast_syscall) 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* Ok, we need to do extra processing, enter the slow path. */ 68*4882a593Smuzhiyunfast_work_pending: 69*4882a593Smuzhiyun str r0, [sp, #S_R0+S_OFF]! @ returned r0 70*4882a593Smuzhiyun /* fall through to work_pending */ 71*4882a593Smuzhiyun#else 72*4882a593Smuzhiyun/* 73*4882a593Smuzhiyun * The "replacement" ret_fast_syscall for when tracing, context tracking, 74*4882a593Smuzhiyun * or rseq debug is enabled. As we will need to call out to some C functions, 75*4882a593Smuzhiyun * we save r0 first to avoid needing to save registers around each C function 76*4882a593Smuzhiyun * call. 77*4882a593Smuzhiyun */ 78*4882a593Smuzhiyunret_fast_syscall: 79*4882a593Smuzhiyun__ret_fast_syscall: 80*4882a593Smuzhiyun UNWIND(.fnstart ) 81*4882a593Smuzhiyun UNWIND(.cantunwind ) 82*4882a593Smuzhiyun str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 83*4882a593Smuzhiyun#if IS_ENABLED(CONFIG_DEBUG_RSEQ) 84*4882a593Smuzhiyun /* do_rseq_syscall needs interrupts enabled. */ 85*4882a593Smuzhiyun mov r0, sp @ 'regs' 86*4882a593Smuzhiyun bl do_rseq_syscall 87*4882a593Smuzhiyun#endif 88*4882a593Smuzhiyun disable_irq_notrace @ disable interrupts 89*4882a593Smuzhiyun ldr r2, [tsk, #TI_ADDR_LIMIT] 90*4882a593Smuzhiyun cmp r2, #TASK_SIZE 91*4882a593Smuzhiyun blne addr_limit_check_failed 92*4882a593Smuzhiyun ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 93*4882a593Smuzhiyun movs r1, r1, lsl #16 94*4882a593Smuzhiyun beq no_work_pending 95*4882a593Smuzhiyun UNWIND(.fnend ) 96*4882a593SmuzhiyunENDPROC(ret_fast_syscall) 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun /* Slower path - fall through to work_pending */ 99*4882a593Smuzhiyun#endif 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun tst r1, #_TIF_SYSCALL_WORK 102*4882a593Smuzhiyun bne __sys_trace_return_nosave 103*4882a593Smuzhiyunslow_work_pending: 104*4882a593Smuzhiyun mov r0, sp @ 'regs' 105*4882a593Smuzhiyun mov r2, why @ 'syscall' 106*4882a593Smuzhiyun bl do_work_pending 107*4882a593Smuzhiyun cmp r0, #0 108*4882a593Smuzhiyun beq no_work_pending 109*4882a593Smuzhiyun movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 110*4882a593Smuzhiyun ldmia sp, {r0 - r6} @ have to reload r0 - r6 111*4882a593Smuzhiyun b local_restart @ ... and off we go 112*4882a593SmuzhiyunENDPROC(ret_fast_syscall) 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun/* 115*4882a593Smuzhiyun * "slow" syscall return path. "why" tells us if this was a real syscall. 116*4882a593Smuzhiyun * IRQs may be enabled here, so always disable them. Note that we use the 117*4882a593Smuzhiyun * "notrace" version to avoid calling into the tracing code unnecessarily. 118*4882a593Smuzhiyun * do_work_pending() will update this state if necessary. 119*4882a593Smuzhiyun */ 120*4882a593SmuzhiyunENTRY(ret_to_user) 121*4882a593Smuzhiyunret_slow_syscall: 122*4882a593Smuzhiyun#if IS_ENABLED(CONFIG_DEBUG_RSEQ) 123*4882a593Smuzhiyun /* do_rseq_syscall needs interrupts enabled. */ 124*4882a593Smuzhiyun enable_irq_notrace @ enable interrupts 125*4882a593Smuzhiyun mov r0, sp @ 'regs' 126*4882a593Smuzhiyun bl do_rseq_syscall 127*4882a593Smuzhiyun#endif 128*4882a593Smuzhiyun disable_irq_notrace @ disable interrupts 129*4882a593SmuzhiyunENTRY(ret_to_user_from_irq) 130*4882a593Smuzhiyun ldr r2, [tsk, #TI_ADDR_LIMIT] 131*4882a593Smuzhiyun cmp r2, #TASK_SIZE 132*4882a593Smuzhiyun blne addr_limit_check_failed 133*4882a593Smuzhiyun ldr r1, [tsk, #TI_FLAGS] 134*4882a593Smuzhiyun movs r1, r1, lsl #16 135*4882a593Smuzhiyun bne slow_work_pending 136*4882a593Smuzhiyunno_work_pending: 137*4882a593Smuzhiyun asm_trace_hardirqs_on save = 0 138*4882a593Smuzhiyun 139*4882a593Smuzhiyun /* perform architecture specific actions before user return */ 140*4882a593Smuzhiyun arch_ret_to_user r1, lr 141*4882a593Smuzhiyun ct_user_enter save = 0 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun restore_user_regs fast = 0, offset = 0 144*4882a593SmuzhiyunENDPROC(ret_to_user_from_irq) 145*4882a593SmuzhiyunENDPROC(ret_to_user) 146*4882a593Smuzhiyun 147*4882a593Smuzhiyun/* 148*4882a593Smuzhiyun * This is how we return from a fork. 149*4882a593Smuzhiyun */ 150*4882a593SmuzhiyunENTRY(ret_from_fork) 151*4882a593Smuzhiyun bl schedule_tail 152*4882a593Smuzhiyun cmp r5, #0 153*4882a593Smuzhiyun movne r0, r4 154*4882a593Smuzhiyun badrne lr, 1f 155*4882a593Smuzhiyun retne r5 156*4882a593Smuzhiyun1: get_thread_info tsk 157*4882a593Smuzhiyun b ret_slow_syscall 158*4882a593SmuzhiyunENDPROC(ret_from_fork) 159*4882a593Smuzhiyun 160*4882a593Smuzhiyun/*============================================================================= 161*4882a593Smuzhiyun * SWI handler 162*4882a593Smuzhiyun *----------------------------------------------------------------------------- 163*4882a593Smuzhiyun */ 164*4882a593Smuzhiyun 165*4882a593Smuzhiyun .align 5 166*4882a593Smuzhiyun#ifdef CONFIG_HARDEN_BRANCH_HISTORY 167*4882a593SmuzhiyunENTRY(vector_bhb_loop8_swi) 168*4882a593Smuzhiyun sub sp, sp, #PT_REGS_SIZE 169*4882a593Smuzhiyun stmia sp, {r0 - r12} 170*4882a593Smuzhiyun mov r8, #8 171*4882a593Smuzhiyun1: b 2f 172*4882a593Smuzhiyun2: subs r8, r8, #1 173*4882a593Smuzhiyun bne 1b 174*4882a593Smuzhiyun dsb 175*4882a593Smuzhiyun isb 176*4882a593Smuzhiyun b 3f 177*4882a593SmuzhiyunENDPROC(vector_bhb_loop8_swi) 178*4882a593Smuzhiyun 179*4882a593Smuzhiyun .align 5 180*4882a593SmuzhiyunENTRY(vector_bhb_bpiall_swi) 181*4882a593Smuzhiyun sub sp, sp, #PT_REGS_SIZE 182*4882a593Smuzhiyun stmia sp, {r0 - r12} 183*4882a593Smuzhiyun mcr p15, 0, r8, c7, c5, 6 @ BPIALL 184*4882a593Smuzhiyun isb 185*4882a593Smuzhiyun b 3f 186*4882a593SmuzhiyunENDPROC(vector_bhb_bpiall_swi) 187*4882a593Smuzhiyun#endif 188*4882a593Smuzhiyun .align 5 189*4882a593SmuzhiyunENTRY(vector_swi) 190*4882a593Smuzhiyun#ifdef CONFIG_CPU_V7M 191*4882a593Smuzhiyun v7m_exception_entry 192*4882a593Smuzhiyun#else 193*4882a593Smuzhiyun sub sp, sp, #PT_REGS_SIZE 194*4882a593Smuzhiyun stmia sp, {r0 - r12} @ Calling r0 - r12 195*4882a593Smuzhiyun3: 196*4882a593Smuzhiyun ARM( add r8, sp, #S_PC ) 197*4882a593Smuzhiyun ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 198*4882a593Smuzhiyun THUMB( mov r8, sp ) 199*4882a593Smuzhiyun THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 200*4882a593Smuzhiyun mrs saved_psr, spsr @ called from non-FIQ mode, so ok. 201*4882a593Smuzhiyun TRACE( mov saved_pc, lr ) 202*4882a593Smuzhiyun str saved_pc, [sp, #S_PC] @ Save calling PC 203*4882a593Smuzhiyun str saved_psr, [sp, #S_PSR] @ Save CPSR 204*4882a593Smuzhiyun str r0, [sp, #S_OLD_R0] @ Save OLD_R0 205*4882a593Smuzhiyun#endif 206*4882a593Smuzhiyun zero_fp 207*4882a593Smuzhiyun alignment_trap r10, ip, __cr_alignment 208*4882a593Smuzhiyun asm_trace_hardirqs_on save=0 209*4882a593Smuzhiyun enable_irq_notrace 210*4882a593Smuzhiyun ct_user_exit save=0 211*4882a593Smuzhiyun 212*4882a593Smuzhiyun /* 213*4882a593Smuzhiyun * Get the system call number. 214*4882a593Smuzhiyun */ 215*4882a593Smuzhiyun 216*4882a593Smuzhiyun#if defined(CONFIG_OABI_COMPAT) 217*4882a593Smuzhiyun 218*4882a593Smuzhiyun /* 219*4882a593Smuzhiyun * If we have CONFIG_OABI_COMPAT then we need to look at the swi 220*4882a593Smuzhiyun * value to determine if it is an EABI or an old ABI call. 221*4882a593Smuzhiyun */ 222*4882a593Smuzhiyun#ifdef CONFIG_ARM_THUMB 223*4882a593Smuzhiyun tst saved_psr, #PSR_T_BIT 224*4882a593Smuzhiyun movne r10, #0 @ no thumb OABI emulation 225*4882a593Smuzhiyun USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction 226*4882a593Smuzhiyun#else 227*4882a593Smuzhiyun USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction 228*4882a593Smuzhiyun#endif 229*4882a593Smuzhiyun ARM_BE8(rev r10, r10) @ little endian instruction 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun#elif defined(CONFIG_AEABI) 232*4882a593Smuzhiyun 233*4882a593Smuzhiyun /* 234*4882a593Smuzhiyun * Pure EABI user space always put syscall number into scno (r7). 235*4882a593Smuzhiyun */ 236*4882a593Smuzhiyun#elif defined(CONFIG_ARM_THUMB) 237*4882a593Smuzhiyun /* Legacy ABI only, possibly thumb mode. */ 238*4882a593Smuzhiyun tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs 239*4882a593Smuzhiyun addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 240*4882a593Smuzhiyun USER( ldreq scno, [saved_pc, #-4] ) 241*4882a593Smuzhiyun 242*4882a593Smuzhiyun#else 243*4882a593Smuzhiyun /* Legacy ABI only. */ 244*4882a593Smuzhiyun USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction 245*4882a593Smuzhiyun#endif 246*4882a593Smuzhiyun 247*4882a593Smuzhiyun /* saved_psr and saved_pc are now dead */ 248*4882a593Smuzhiyun 249*4882a593Smuzhiyun uaccess_disable tbl 250*4882a593Smuzhiyun 251*4882a593Smuzhiyun adr tbl, sys_call_table @ load syscall table pointer 252*4882a593Smuzhiyun 253*4882a593Smuzhiyun#if defined(CONFIG_OABI_COMPAT) 254*4882a593Smuzhiyun /* 255*4882a593Smuzhiyun * If the swi argument is zero, this is an EABI call and we do nothing. 256*4882a593Smuzhiyun * 257*4882a593Smuzhiyun * If this is an old ABI call, get the syscall number into scno and 258*4882a593Smuzhiyun * get the old ABI syscall table address. 259*4882a593Smuzhiyun */ 260*4882a593Smuzhiyun bics r10, r10, #0xff000000 261*4882a593Smuzhiyun eorne scno, r10, #__NR_OABI_SYSCALL_BASE 262*4882a593Smuzhiyun ldrne tbl, =sys_oabi_call_table 263*4882a593Smuzhiyun#elif !defined(CONFIG_AEABI) 264*4882a593Smuzhiyun bic scno, scno, #0xff000000 @ mask off SWI op-code 265*4882a593Smuzhiyun eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 266*4882a593Smuzhiyun#endif 267*4882a593Smuzhiyun get_thread_info tsk 268*4882a593Smuzhiyun /* 269*4882a593Smuzhiyun * Reload the registers that may have been corrupted on entry to 270*4882a593Smuzhiyun * the syscall assembly (by tracing or context tracking.) 271*4882a593Smuzhiyun */ 272*4882a593Smuzhiyun TRACE( ldmia sp, {r0 - r3} ) 273*4882a593Smuzhiyun 274*4882a593Smuzhiyunlocal_restart: 275*4882a593Smuzhiyun ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 276*4882a593Smuzhiyun stmdb sp!, {r4, r5} @ push fifth and sixth args 277*4882a593Smuzhiyun 278*4882a593Smuzhiyun tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 279*4882a593Smuzhiyun bne __sys_trace 280*4882a593Smuzhiyun 281*4882a593Smuzhiyun invoke_syscall tbl, scno, r10, __ret_fast_syscall 282*4882a593Smuzhiyun 283*4882a593Smuzhiyun add r1, sp, #S_OFF 284*4882a593Smuzhiyun2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 285*4882a593Smuzhiyun eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 286*4882a593Smuzhiyun bcs arm_syscall 287*4882a593Smuzhiyun mov why, #0 @ no longer a real syscall 288*4882a593Smuzhiyun b sys_ni_syscall @ not private func 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 291*4882a593Smuzhiyun /* 292*4882a593Smuzhiyun * We failed to handle a fault trying to access the page 293*4882a593Smuzhiyun * containing the swi instruction, but we're not really in a 294*4882a593Smuzhiyun * position to return -EFAULT. Instead, return back to the 295*4882a593Smuzhiyun * instruction and re-enter the user fault handling path trying 296*4882a593Smuzhiyun * to page it in. This will likely result in sending SEGV to the 297*4882a593Smuzhiyun * current task. 298*4882a593Smuzhiyun */ 299*4882a593Smuzhiyun9001: 300*4882a593Smuzhiyun sub lr, saved_pc, #4 301*4882a593Smuzhiyun str lr, [sp, #S_PC] 302*4882a593Smuzhiyun get_thread_info tsk 303*4882a593Smuzhiyun b ret_fast_syscall 304*4882a593Smuzhiyun#endif 305*4882a593SmuzhiyunENDPROC(vector_swi) 306*4882a593Smuzhiyun 307*4882a593Smuzhiyun /* 308*4882a593Smuzhiyun * This is the really slow path. We're going to be doing 309*4882a593Smuzhiyun * context switches, and waiting for our parent to respond. 310*4882a593Smuzhiyun */ 311*4882a593Smuzhiyun__sys_trace: 312*4882a593Smuzhiyun mov r1, scno 313*4882a593Smuzhiyun add r0, sp, #S_OFF 314*4882a593Smuzhiyun bl syscall_trace_enter 315*4882a593Smuzhiyun mov scno, r0 316*4882a593Smuzhiyun invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1 317*4882a593Smuzhiyun cmp scno, #-1 @ skip the syscall? 318*4882a593Smuzhiyun bne 2b 319*4882a593Smuzhiyun add sp, sp, #S_OFF @ restore stack 320*4882a593Smuzhiyun 321*4882a593Smuzhiyun__sys_trace_return_nosave: 322*4882a593Smuzhiyun enable_irq_notrace 323*4882a593Smuzhiyun mov r0, sp 324*4882a593Smuzhiyun bl syscall_trace_exit 325*4882a593Smuzhiyun b ret_slow_syscall 326*4882a593Smuzhiyun 327*4882a593Smuzhiyun__sys_trace_return: 328*4882a593Smuzhiyun str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 329*4882a593Smuzhiyun mov r0, sp 330*4882a593Smuzhiyun bl syscall_trace_exit 331*4882a593Smuzhiyun b ret_slow_syscall 332*4882a593Smuzhiyun 333*4882a593Smuzhiyun .align 5 334*4882a593Smuzhiyun#ifdef CONFIG_ALIGNMENT_TRAP 335*4882a593Smuzhiyun .type __cr_alignment, #object 336*4882a593Smuzhiyun__cr_alignment: 337*4882a593Smuzhiyun .word cr_alignment 338*4882a593Smuzhiyun#endif 339*4882a593Smuzhiyun .ltorg 340*4882a593Smuzhiyun 341*4882a593Smuzhiyun .macro syscall_table_start, sym 342*4882a593Smuzhiyun .equ __sys_nr, 0 343*4882a593Smuzhiyun .type \sym, #object 344*4882a593SmuzhiyunENTRY(\sym) 345*4882a593Smuzhiyun .endm 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun .macro syscall, nr, func 348*4882a593Smuzhiyun .ifgt __sys_nr - \nr 349*4882a593Smuzhiyun .error "Duplicated/unorded system call entry" 350*4882a593Smuzhiyun .endif 351*4882a593Smuzhiyun .rept \nr - __sys_nr 352*4882a593Smuzhiyun .long sys_ni_syscall 353*4882a593Smuzhiyun .endr 354*4882a593Smuzhiyun .long \func 355*4882a593Smuzhiyun .equ __sys_nr, \nr + 1 356*4882a593Smuzhiyun .endm 357*4882a593Smuzhiyun 358*4882a593Smuzhiyun .macro syscall_table_end, sym 359*4882a593Smuzhiyun .ifgt __sys_nr - __NR_syscalls 360*4882a593Smuzhiyun .error "System call table too big" 361*4882a593Smuzhiyun .endif 362*4882a593Smuzhiyun .rept __NR_syscalls - __sys_nr 363*4882a593Smuzhiyun .long sys_ni_syscall 364*4882a593Smuzhiyun .endr 365*4882a593Smuzhiyun .size \sym, . - \sym 366*4882a593Smuzhiyun .endm 367*4882a593Smuzhiyun 368*4882a593Smuzhiyun#define NATIVE(nr, func) syscall nr, func 369*4882a593Smuzhiyun 370*4882a593Smuzhiyun/* 371*4882a593Smuzhiyun * This is the syscall table declaration for native ABI syscalls. 372*4882a593Smuzhiyun * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 373*4882a593Smuzhiyun */ 374*4882a593Smuzhiyun syscall_table_start sys_call_table 375*4882a593Smuzhiyun#define COMPAT(nr, native, compat) syscall nr, native 376*4882a593Smuzhiyun#ifdef CONFIG_AEABI 377*4882a593Smuzhiyun#include <calls-eabi.S> 378*4882a593Smuzhiyun#else 379*4882a593Smuzhiyun#include <calls-oabi.S> 380*4882a593Smuzhiyun#endif 381*4882a593Smuzhiyun#undef COMPAT 382*4882a593Smuzhiyun syscall_table_end sys_call_table 383*4882a593Smuzhiyun 384*4882a593Smuzhiyun/*============================================================================ 385*4882a593Smuzhiyun * Special system call wrappers 386*4882a593Smuzhiyun */ 387*4882a593Smuzhiyun@ r0 = syscall number 388*4882a593Smuzhiyun@ r8 = syscall table 389*4882a593Smuzhiyunsys_syscall: 390*4882a593Smuzhiyun bic scno, r0, #__NR_OABI_SYSCALL_BASE 391*4882a593Smuzhiyun cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 392*4882a593Smuzhiyun cmpne scno, #NR_syscalls @ check range 393*4882a593Smuzhiyun#ifdef CONFIG_CPU_SPECTRE 394*4882a593Smuzhiyun movhs scno, #0 395*4882a593Smuzhiyun csdb 396*4882a593Smuzhiyun#endif 397*4882a593Smuzhiyun stmialo sp, {r5, r6} @ shuffle args 398*4882a593Smuzhiyun movlo r0, r1 399*4882a593Smuzhiyun movlo r1, r2 400*4882a593Smuzhiyun movlo r2, r3 401*4882a593Smuzhiyun movlo r3, r4 402*4882a593Smuzhiyun ldrlo pc, [tbl, scno, lsl #2] 403*4882a593Smuzhiyun b sys_ni_syscall 404*4882a593SmuzhiyunENDPROC(sys_syscall) 405*4882a593Smuzhiyun 406*4882a593Smuzhiyunsys_sigreturn_wrapper: 407*4882a593Smuzhiyun add r0, sp, #S_OFF 408*4882a593Smuzhiyun mov why, #0 @ prevent syscall restart handling 409*4882a593Smuzhiyun b sys_sigreturn 410*4882a593SmuzhiyunENDPROC(sys_sigreturn_wrapper) 411*4882a593Smuzhiyun 412*4882a593Smuzhiyunsys_rt_sigreturn_wrapper: 413*4882a593Smuzhiyun add r0, sp, #S_OFF 414*4882a593Smuzhiyun mov why, #0 @ prevent syscall restart handling 415*4882a593Smuzhiyun b sys_rt_sigreturn 416*4882a593SmuzhiyunENDPROC(sys_rt_sigreturn_wrapper) 417*4882a593Smuzhiyun 418*4882a593Smuzhiyunsys_statfs64_wrapper: 419*4882a593Smuzhiyun teq r1, #88 420*4882a593Smuzhiyun moveq r1, #84 421*4882a593Smuzhiyun b sys_statfs64 422*4882a593SmuzhiyunENDPROC(sys_statfs64_wrapper) 423*4882a593Smuzhiyun 424*4882a593Smuzhiyunsys_fstatfs64_wrapper: 425*4882a593Smuzhiyun teq r1, #88 426*4882a593Smuzhiyun moveq r1, #84 427*4882a593Smuzhiyun b sys_fstatfs64 428*4882a593SmuzhiyunENDPROC(sys_fstatfs64_wrapper) 429*4882a593Smuzhiyun 430*4882a593Smuzhiyun/* 431*4882a593Smuzhiyun * Note: off_4k (r5) is always units of 4K. If we can't do the requested 432*4882a593Smuzhiyun * offset, we return EINVAL. 433*4882a593Smuzhiyun */ 434*4882a593Smuzhiyunsys_mmap2: 435*4882a593Smuzhiyun str r5, [sp, #4] 436*4882a593Smuzhiyun b sys_mmap_pgoff 437*4882a593SmuzhiyunENDPROC(sys_mmap2) 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun#ifdef CONFIG_OABI_COMPAT 440*4882a593Smuzhiyun 441*4882a593Smuzhiyun/* 442*4882a593Smuzhiyun * These are syscalls with argument register differences 443*4882a593Smuzhiyun */ 444*4882a593Smuzhiyun 445*4882a593Smuzhiyunsys_oabi_pread64: 446*4882a593Smuzhiyun stmia sp, {r3, r4} 447*4882a593Smuzhiyun b sys_pread64 448*4882a593SmuzhiyunENDPROC(sys_oabi_pread64) 449*4882a593Smuzhiyun 450*4882a593Smuzhiyunsys_oabi_pwrite64: 451*4882a593Smuzhiyun stmia sp, {r3, r4} 452*4882a593Smuzhiyun b sys_pwrite64 453*4882a593SmuzhiyunENDPROC(sys_oabi_pwrite64) 454*4882a593Smuzhiyun 455*4882a593Smuzhiyunsys_oabi_truncate64: 456*4882a593Smuzhiyun mov r3, r2 457*4882a593Smuzhiyun mov r2, r1 458*4882a593Smuzhiyun b sys_truncate64 459*4882a593SmuzhiyunENDPROC(sys_oabi_truncate64) 460*4882a593Smuzhiyun 461*4882a593Smuzhiyunsys_oabi_ftruncate64: 462*4882a593Smuzhiyun mov r3, r2 463*4882a593Smuzhiyun mov r2, r1 464*4882a593Smuzhiyun b sys_ftruncate64 465*4882a593SmuzhiyunENDPROC(sys_oabi_ftruncate64) 466*4882a593Smuzhiyun 467*4882a593Smuzhiyunsys_oabi_readahead: 468*4882a593Smuzhiyun str r3, [sp] 469*4882a593Smuzhiyun mov r3, r2 470*4882a593Smuzhiyun mov r2, r1 471*4882a593Smuzhiyun b sys_readahead 472*4882a593SmuzhiyunENDPROC(sys_oabi_readahead) 473*4882a593Smuzhiyun 474*4882a593Smuzhiyun/* 475*4882a593Smuzhiyun * Let's declare a second syscall table for old ABI binaries 476*4882a593Smuzhiyun * using the compatibility syscall entries. 477*4882a593Smuzhiyun */ 478*4882a593Smuzhiyun syscall_table_start sys_oabi_call_table 479*4882a593Smuzhiyun#define COMPAT(nr, native, compat) syscall nr, compat 480*4882a593Smuzhiyun#include <calls-oabi.S> 481*4882a593Smuzhiyun syscall_table_end sys_oabi_call_table 482*4882a593Smuzhiyun 483*4882a593Smuzhiyun#endif 484*4882a593Smuzhiyun 485