1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef __ASM_EXTABLE_H 3*4882a593Smuzhiyun #define __ASM_EXTABLE_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun /* 6*4882a593Smuzhiyun * The exception table consists of pairs of relative offsets: the first 7*4882a593Smuzhiyun * is the relative offset to an instruction that is allowed to fault, 8*4882a593Smuzhiyun * and the second is the relative offset at which the program should 9*4882a593Smuzhiyun * continue. No registers are modified, so it is entirely up to the 10*4882a593Smuzhiyun * continuation code to figure out what to do. 11*4882a593Smuzhiyun * 12*4882a593Smuzhiyun * All the routines below use bits of fixup code that are out of line 13*4882a593Smuzhiyun * with the main instruction path. This means when everything is well, 14*4882a593Smuzhiyun * we don't even have to jump over them. Further, they do not intrude 15*4882a593Smuzhiyun * on our cache or tlb entries. 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun struct exception_table_entry 19*4882a593Smuzhiyun { 20*4882a593Smuzhiyun int insn, fixup; 21*4882a593Smuzhiyun }; 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun #define ARCH_HAS_RELATIVE_EXTABLE 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #ifdef CONFIG_BPF_JIT 26*4882a593Smuzhiyun int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, 27*4882a593Smuzhiyun struct pt_regs *regs); 28*4882a593Smuzhiyun #else /* !CONFIG_BPF_JIT */ 29*4882a593Smuzhiyun static inline arm64_bpf_fixup_exception(const struct exception_table_entry * ex,struct pt_regs * regs)30*4882a593Smuzhiyunint arm64_bpf_fixup_exception(const struct exception_table_entry *ex, 31*4882a593Smuzhiyun struct pt_regs *regs) 32*4882a593Smuzhiyun { 33*4882a593Smuzhiyun return 0; 34*4882a593Smuzhiyun } 35*4882a593Smuzhiyun #endif /* !CONFIG_BPF_JIT */ 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun extern int fixup_exception(struct pt_regs *regs); 38*4882a593Smuzhiyun #endif 39