xref: /OK3568_Linux_fs/kernel/include/asm-generic/rwonce.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Prevent the compiler from merging or refetching reads or writes. The
4*4882a593Smuzhiyun  * compiler is also forbidden from reordering successive instances of
5*4882a593Smuzhiyun  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
6*4882a593Smuzhiyun  * particular ordering. One way to make the compiler aware of ordering is to
7*4882a593Smuzhiyun  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
8*4882a593Smuzhiyun  * statements.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * These two macros will also work on aggregate data types like structs or
11*4882a593Smuzhiyun  * unions.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Their two major use cases are: (1) Mediating communication between
14*4882a593Smuzhiyun  * process-level code and irq/NMI handlers, all running on the same CPU,
15*4882a593Smuzhiyun  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
16*4882a593Smuzhiyun  * mutilate accesses that either do not require ordering or that interact
17*4882a593Smuzhiyun  * with an explicit memory barrier or atomic instruction that provides the
18*4882a593Smuzhiyun  * required ordering.
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun #ifndef __ASM_GENERIC_RWONCE_H
21*4882a593Smuzhiyun #define __ASM_GENERIC_RWONCE_H
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #ifndef __ASSEMBLY__
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/compiler_types.h>
26*4882a593Smuzhiyun #include <linux/kasan-checks.h>
27*4882a593Smuzhiyun #include <linux/kcsan-checks.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Yes, this permits 64-bit accesses on 32-bit architectures. These will
31*4882a593Smuzhiyun  * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
32*4882a593Smuzhiyun  * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
33*4882a593Smuzhiyun  * (e.g. a virtual address) and a strong prevailing wind.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun #define compiletime_assert_rwonce_type(t)					\
36*4882a593Smuzhiyun 	compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),	\
37*4882a593Smuzhiyun 		"Unsupported access size for {READ,WRITE}_ONCE().")
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
41*4882a593Smuzhiyun  * atomicity. Note that this may result in tears!
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #ifndef __READ_ONCE
44*4882a593Smuzhiyun #define __READ_ONCE(x)	(*(const volatile __unqual_scalar_typeof(x) *)&(x))
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define READ_ONCE(x)							\
48*4882a593Smuzhiyun ({									\
49*4882a593Smuzhiyun 	compiletime_assert_rwonce_type(x);				\
50*4882a593Smuzhiyun 	__READ_ONCE(x);							\
51*4882a593Smuzhiyun })
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define __WRITE_ONCE(x, val)						\
54*4882a593Smuzhiyun do {									\
55*4882a593Smuzhiyun 	*(volatile typeof(x) *)&(x) = (val);				\
56*4882a593Smuzhiyun } while (0)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define WRITE_ONCE(x, val)						\
59*4882a593Smuzhiyun do {									\
60*4882a593Smuzhiyun 	compiletime_assert_rwonce_type(x);				\
61*4882a593Smuzhiyun 	__WRITE_ONCE(x, val);						\
62*4882a593Smuzhiyun } while (0)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static __no_sanitize_or_inline
__read_once_word_nocheck(const void * addr)65*4882a593Smuzhiyun unsigned long __read_once_word_nocheck(const void *addr)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	return __READ_ONCE(*(unsigned long *)addr);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
72*4882a593Smuzhiyun  * word from memory atomically but without telling KASAN/KCSAN. This is
73*4882a593Smuzhiyun  * usually used by unwinding code when walking the stack of a running process.
74*4882a593Smuzhiyun  */
75*4882a593Smuzhiyun #define READ_ONCE_NOCHECK(x)						\
76*4882a593Smuzhiyun ({									\
77*4882a593Smuzhiyun 	compiletime_assert(sizeof(x) == sizeof(unsigned long),		\
78*4882a593Smuzhiyun 		"Unsupported access size for READ_ONCE_NOCHECK().");	\
79*4882a593Smuzhiyun 	(typeof(x))__read_once_word_nocheck(&(x));			\
80*4882a593Smuzhiyun })
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun static __no_kasan_or_inline
read_word_at_a_time(const void * addr)83*4882a593Smuzhiyun unsigned long read_word_at_a_time(const void *addr)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	kasan_check_read(addr, 1);
86*4882a593Smuzhiyun 	return *(unsigned long *)addr;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
90*4882a593Smuzhiyun #endif	/* __ASM_GENERIC_RWONCE_H */
91