1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* thread_info.h: common low-level thread information accessors
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5*4882a593Smuzhiyun * - Incorporating suggestions made by Linus Torvalds
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _LINUX_THREAD_INFO_H
9*4882a593Smuzhiyun #define _LINUX_THREAD_INFO_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/bug.h>
13*4882a593Smuzhiyun #include <linux/restart_block.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifdef CONFIG_THREAD_INFO_IN_TASK
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
19*4882a593Smuzhiyun * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
20*4882a593Smuzhiyun * including <asm/current.h> can cause a circular dependency on some platforms.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun #include <asm/current.h>
23*4882a593Smuzhiyun #define current_thread_info() ((struct thread_info *)current)
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <linux/bitops.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * For per-arch arch_within_stack_frames() implementations, defined in
30*4882a593Smuzhiyun * asm/thread_info.h.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun enum {
33*4882a593Smuzhiyun BAD_STACK = -1,
34*4882a593Smuzhiyun NOT_STACK = 0,
35*4882a593Smuzhiyun GOOD_FRAME,
36*4882a593Smuzhiyun GOOD_STACK,
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include <asm/thread_info.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #ifdef __KERNEL__
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #ifndef arch_set_restart_data
44*4882a593Smuzhiyun #define arch_set_restart_data(restart) do { } while (0)
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun
set_restart_fn(struct restart_block * restart,long (* fn)(struct restart_block *))47*4882a593Smuzhiyun static inline long set_restart_fn(struct restart_block *restart,
48*4882a593Smuzhiyun long (*fn)(struct restart_block *))
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun restart->fn = fn;
51*4882a593Smuzhiyun arch_set_restart_data(restart);
52*4882a593Smuzhiyun return -ERESTART_RESTARTBLOCK;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #ifndef THREAD_ALIGN
56*4882a593Smuzhiyun #define THREAD_ALIGN THREAD_SIZE
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * flag set/clear/test wrappers
63*4882a593Smuzhiyun * - pass TIF_xxxx constants to these functions
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun
set_ti_thread_flag(struct thread_info * ti,int flag)66*4882a593Smuzhiyun static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun set_bit(flag, (unsigned long *)&ti->flags);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
clear_ti_thread_flag(struct thread_info * ti,int flag)71*4882a593Smuzhiyun static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun clear_bit(flag, (unsigned long *)&ti->flags);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
update_ti_thread_flag(struct thread_info * ti,int flag,bool value)76*4882a593Smuzhiyun static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
77*4882a593Smuzhiyun bool value)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun if (value)
80*4882a593Smuzhiyun set_ti_thread_flag(ti, flag);
81*4882a593Smuzhiyun else
82*4882a593Smuzhiyun clear_ti_thread_flag(ti, flag);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
test_and_set_ti_thread_flag(struct thread_info * ti,int flag)85*4882a593Smuzhiyun static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun return test_and_set_bit(flag, (unsigned long *)&ti->flags);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
test_and_clear_ti_thread_flag(struct thread_info * ti,int flag)90*4882a593Smuzhiyun static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
test_ti_thread_flag(struct thread_info * ti,int flag)95*4882a593Smuzhiyun static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun return test_bit(flag, (unsigned long *)&ti->flags);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define set_thread_flag(flag) \
101*4882a593Smuzhiyun set_ti_thread_flag(current_thread_info(), flag)
102*4882a593Smuzhiyun #define clear_thread_flag(flag) \
103*4882a593Smuzhiyun clear_ti_thread_flag(current_thread_info(), flag)
104*4882a593Smuzhiyun #define update_thread_flag(flag, value) \
105*4882a593Smuzhiyun update_ti_thread_flag(current_thread_info(), flag, value)
106*4882a593Smuzhiyun #define test_and_set_thread_flag(flag) \
107*4882a593Smuzhiyun test_and_set_ti_thread_flag(current_thread_info(), flag)
108*4882a593Smuzhiyun #define test_and_clear_thread_flag(flag) \
109*4882a593Smuzhiyun test_and_clear_ti_thread_flag(current_thread_info(), flag)
110*4882a593Smuzhiyun #define test_thread_flag(flag) \
111*4882a593Smuzhiyun test_ti_thread_flag(current_thread_info(), flag)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
arch_within_stack_frames(const void * const stack,const void * const stackend,const void * obj,unsigned long len)116*4882a593Smuzhiyun static inline int arch_within_stack_frames(const void * const stack,
117*4882a593Smuzhiyun const void * const stackend,
118*4882a593Smuzhiyun const void *obj, unsigned long len)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #ifdef CONFIG_HARDENED_USERCOPY
125*4882a593Smuzhiyun extern void __check_object_size(const void *ptr, unsigned long n,
126*4882a593Smuzhiyun bool to_user);
127*4882a593Smuzhiyun
check_object_size(const void * ptr,unsigned long n,bool to_user)128*4882a593Smuzhiyun static __always_inline void check_object_size(const void *ptr, unsigned long n,
129*4882a593Smuzhiyun bool to_user)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun if (!__builtin_constant_p(n))
132*4882a593Smuzhiyun __check_object_size(ptr, n, to_user);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun #else
check_object_size(const void * ptr,unsigned long n,bool to_user)135*4882a593Smuzhiyun static inline void check_object_size(const void *ptr, unsigned long n,
136*4882a593Smuzhiyun bool to_user)
137*4882a593Smuzhiyun { }
138*4882a593Smuzhiyun #endif /* CONFIG_HARDENED_USERCOPY */
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun extern void __compiletime_error("copy source size is too small")
141*4882a593Smuzhiyun __bad_copy_from(void);
142*4882a593Smuzhiyun extern void __compiletime_error("copy destination size is too small")
143*4882a593Smuzhiyun __bad_copy_to(void);
144*4882a593Smuzhiyun
copy_overflow(int size,unsigned long count)145*4882a593Smuzhiyun static inline void copy_overflow(int size, unsigned long count)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static __always_inline __must_check bool
check_copy_size(const void * addr,size_t bytes,bool is_source)151*4882a593Smuzhiyun check_copy_size(const void *addr, size_t bytes, bool is_source)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun int sz = __compiletime_object_size(addr);
154*4882a593Smuzhiyun if (unlikely(sz >= 0 && sz < bytes)) {
155*4882a593Smuzhiyun if (!__builtin_constant_p(bytes))
156*4882a593Smuzhiyun copy_overflow(sz, bytes);
157*4882a593Smuzhiyun else if (is_source)
158*4882a593Smuzhiyun __bad_copy_from();
159*4882a593Smuzhiyun else
160*4882a593Smuzhiyun __bad_copy_to();
161*4882a593Smuzhiyun return false;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun if (WARN_ON_ONCE(bytes > INT_MAX))
164*4882a593Smuzhiyun return false;
165*4882a593Smuzhiyun check_object_size(addr, bytes, is_source);
166*4882a593Smuzhiyun return true;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun #ifndef arch_setup_new_exec
arch_setup_new_exec(void)170*4882a593Smuzhiyun static inline void arch_setup_new_exec(void) { }
171*4882a593Smuzhiyun #endif
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun #endif /* __KERNEL__ */
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #endif /* _LINUX_THREAD_INFO_H */
176