xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/entry-common.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun #ifndef _ASM_X86_ENTRY_COMMON_H
3*4882a593Smuzhiyun #define _ASM_X86_ENTRY_COMMON_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/user-return-notifier.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <asm/nospec-branch.h>
8*4882a593Smuzhiyun #include <asm/io_bitmap.h>
9*4882a593Smuzhiyun #include <asm/fpu/api.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /* Check that the stack and regs on entry from user mode are sane. */
arch_check_user_regs(struct pt_regs * regs)12*4882a593Smuzhiyun static __always_inline void arch_check_user_regs(struct pt_regs *regs)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
15*4882a593Smuzhiyun 		/*
16*4882a593Smuzhiyun 		 * Make sure that the entry code gave us a sensible EFLAGS
17*4882a593Smuzhiyun 		 * register.  Native because we want to check the actual CPU
18*4882a593Smuzhiyun 		 * state, not the interrupt state as imagined by Xen.
19*4882a593Smuzhiyun 		 */
20*4882a593Smuzhiyun 		unsigned long flags = native_save_fl();
21*4882a593Smuzhiyun 		unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 		/*
24*4882a593Smuzhiyun 		 * For !SMAP hardware we patch out CLAC on entry.
25*4882a593Smuzhiyun 		 */
26*4882a593Smuzhiyun 		if (boot_cpu_has(X86_FEATURE_SMAP) ||
27*4882a593Smuzhiyun 		    (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
28*4882a593Smuzhiyun 			mask |= X86_EFLAGS_AC;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 		WARN_ON_ONCE(flags & mask);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 		/* We think we came from user mode. Make sure pt_regs agrees. */
33*4882a593Smuzhiyun 		WARN_ON_ONCE(!user_mode(regs));
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 		/*
36*4882a593Smuzhiyun 		 * All entries from user mode (except #DF) should be on the
37*4882a593Smuzhiyun 		 * normal thread stack and should have user pt_regs in the
38*4882a593Smuzhiyun 		 * correct location.
39*4882a593Smuzhiyun 		 */
40*4882a593Smuzhiyun 		WARN_ON_ONCE(!on_thread_stack());
41*4882a593Smuzhiyun 		WARN_ON_ONCE(regs != task_pt_regs(current));
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun #define arch_check_user_regs arch_check_user_regs
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define ARCH_SYSCALL_EXIT_WORK		(_TIF_SINGLESTEP)
47*4882a593Smuzhiyun 
arch_exit_to_user_mode_prepare(struct pt_regs * regs,unsigned long ti_work)48*4882a593Smuzhiyun static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
49*4882a593Smuzhiyun 						  unsigned long ti_work)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	if (ti_work & _TIF_USER_RETURN_NOTIFY)
52*4882a593Smuzhiyun 		fire_user_return_notifiers();
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (unlikely(ti_work & _TIF_IO_BITMAP))
55*4882a593Smuzhiyun 		tss_update_io_bitmap();
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	fpregs_assert_state_consistent();
58*4882a593Smuzhiyun 	if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
59*4882a593Smuzhiyun 		switch_fpu_return();
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
62*4882a593Smuzhiyun 	/*
63*4882a593Smuzhiyun 	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
64*4882a593Smuzhiyun 	 * returning to user mode.  We need to clear it *after* signal
65*4882a593Smuzhiyun 	 * handling, because syscall restart has a fixup for compat
66*4882a593Smuzhiyun 	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
67*4882a593Smuzhiyun 	 * selftest.
68*4882a593Smuzhiyun 	 *
69*4882a593Smuzhiyun 	 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
70*4882a593Smuzhiyun 	 * special case only applies after poking regs and before the
71*4882a593Smuzhiyun 	 * very next return to user mode.
72*4882a593Smuzhiyun 	 */
73*4882a593Smuzhiyun 	current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
77*4882a593Smuzhiyun 
arch_exit_to_user_mode(void)78*4882a593Smuzhiyun static __always_inline void arch_exit_to_user_mode(void)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	mds_user_clear_cpu_buffers();
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun #define arch_exit_to_user_mode arch_exit_to_user_mode
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #endif
85