xref: /OK3568_Linux_fs/kernel/include/uapi/linux/membarrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef _UAPI_LINUX_MEMBARRIER_H
2*4882a593Smuzhiyun #define _UAPI_LINUX_MEMBARRIER_H
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * linux/membarrier.h
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * membarrier system call API
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (c) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a copy
12*4882a593Smuzhiyun  * of this software and associated documentation files (the "Software"), to deal
13*4882a593Smuzhiyun  * in the Software without restriction, including without limitation the rights
14*4882a593Smuzhiyun  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15*4882a593Smuzhiyun  * copies of the Software, and to permit persons to whom the Software is
16*4882a593Smuzhiyun  * furnished to do so, subject to the following conditions:
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
19*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24*4882a593Smuzhiyun  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27*4882a593Smuzhiyun  * SOFTWARE.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /**
31*4882a593Smuzhiyun  * enum membarrier_cmd - membarrier system call command
32*4882a593Smuzhiyun  * @MEMBARRIER_CMD_QUERY:   Query the set of supported commands. It returns
33*4882a593Smuzhiyun  *                          a bitmask of valid commands.
34*4882a593Smuzhiyun  * @MEMBARRIER_CMD_GLOBAL:  Execute a memory barrier on all running threads.
35*4882a593Smuzhiyun  *                          Upon return from system call, the caller thread
36*4882a593Smuzhiyun  *                          is ensured that all running threads have passed
37*4882a593Smuzhiyun  *                          through a state where all memory accesses to
38*4882a593Smuzhiyun  *                          user-space addresses match program order between
39*4882a593Smuzhiyun  *                          entry to and return from the system call
40*4882a593Smuzhiyun  *                          (non-running threads are de facto in such a
41*4882a593Smuzhiyun  *                          state). This covers threads from all processes
42*4882a593Smuzhiyun  *                          running on the system. This command returns 0.
43*4882a593Smuzhiyun  * @MEMBARRIER_CMD_GLOBAL_EXPEDITED:
44*4882a593Smuzhiyun  *                          Execute a memory barrier on all running threads
45*4882a593Smuzhiyun  *                          of all processes which previously registered
46*4882a593Smuzhiyun  *                          with MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
47*4882a593Smuzhiyun  *                          Upon return from system call, the caller thread
48*4882a593Smuzhiyun  *                          is ensured that all running threads have passed
49*4882a593Smuzhiyun  *                          through a state where all memory accesses to
50*4882a593Smuzhiyun  *                          user-space addresses match program order between
51*4882a593Smuzhiyun  *                          entry to and return from the system call
52*4882a593Smuzhiyun  *                          (non-running threads are de facto in such a
53*4882a593Smuzhiyun  *                          state). This only covers threads from processes
54*4882a593Smuzhiyun  *                          which registered with
55*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
56*4882a593Smuzhiyun  *                          This command returns 0. Given that
57*4882a593Smuzhiyun  *                          registration is about the intent to receive
58*4882a593Smuzhiyun  *                          the barriers, it is valid to invoke
59*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_GLOBAL_EXPEDITED from a
60*4882a593Smuzhiyun  *                          non-registered process.
61*4882a593Smuzhiyun  * @MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
62*4882a593Smuzhiyun  *                          Register the process intent to receive
63*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_GLOBAL_EXPEDITED memory
64*4882a593Smuzhiyun  *                          barriers. Always returns 0.
65*4882a593Smuzhiyun  * @MEMBARRIER_CMD_PRIVATE_EXPEDITED:
66*4882a593Smuzhiyun  *                          Execute a memory barrier on each running
67*4882a593Smuzhiyun  *                          thread belonging to the same process as the current
68*4882a593Smuzhiyun  *                          thread. Upon return from system call, the
69*4882a593Smuzhiyun  *                          caller thread is ensured that all its running
70*4882a593Smuzhiyun  *                          threads siblings have passed through a state
71*4882a593Smuzhiyun  *                          where all memory accesses to user-space
72*4882a593Smuzhiyun  *                          addresses match program order between entry
73*4882a593Smuzhiyun  *                          to and return from the system call
74*4882a593Smuzhiyun  *                          (non-running threads are de facto in such a
75*4882a593Smuzhiyun  *                          state). This only covers threads from the
76*4882a593Smuzhiyun  *                          same process as the caller thread. This
77*4882a593Smuzhiyun  *                          command returns 0 on success. The
78*4882a593Smuzhiyun  *                          "expedited" commands complete faster than
79*4882a593Smuzhiyun  *                          the non-expedited ones, they never block,
80*4882a593Smuzhiyun  *                          but have the downside of causing extra
81*4882a593Smuzhiyun  *                          overhead. A process needs to register its
82*4882a593Smuzhiyun  *                          intent to use the private expedited command
83*4882a593Smuzhiyun  *                          prior to using it, otherwise this command
84*4882a593Smuzhiyun  *                          returns -EPERM.
85*4882a593Smuzhiyun  * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
86*4882a593Smuzhiyun  *                          Register the process intent to use
87*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
88*4882a593Smuzhiyun  *                          returns 0.
89*4882a593Smuzhiyun  * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
90*4882a593Smuzhiyun  *                          In addition to provide memory ordering
91*4882a593Smuzhiyun  *                          guarantees described in
92*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED, ensure
93*4882a593Smuzhiyun  *                          the caller thread, upon return from system
94*4882a593Smuzhiyun  *                          call, that all its running threads siblings
95*4882a593Smuzhiyun  *                          have executed a core serializing
96*4882a593Smuzhiyun  *                          instruction. (architectures are required to
97*4882a593Smuzhiyun  *                          guarantee that non-running threads issue
98*4882a593Smuzhiyun  *                          core serializing instructions before they
99*4882a593Smuzhiyun  *                          resume user-space execution). This only
100*4882a593Smuzhiyun  *                          covers threads from the same process as the
101*4882a593Smuzhiyun  *                          caller thread. This command returns 0 on
102*4882a593Smuzhiyun  *                          success. The "expedited" commands complete
103*4882a593Smuzhiyun  *                          faster than the non-expedited ones, they
104*4882a593Smuzhiyun  *                          never block, but have the downside of
105*4882a593Smuzhiyun  *                          causing extra overhead. If this command is
106*4882a593Smuzhiyun  *                          not implemented by an architecture, -EINVAL
107*4882a593Smuzhiyun  *                          is returned. A process needs to register its
108*4882a593Smuzhiyun  *                          intent to use the private expedited sync
109*4882a593Smuzhiyun  *                          core command prior to using it, otherwise
110*4882a593Smuzhiyun  *                          this command returns -EPERM.
111*4882a593Smuzhiyun  * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
112*4882a593Smuzhiyun  *                          Register the process intent to use
113*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE.
114*4882a593Smuzhiyun  *                          If this command is not implemented by an
115*4882a593Smuzhiyun  *                          architecture, -EINVAL is returned.
116*4882a593Smuzhiyun  *                          Returns 0 on success.
117*4882a593Smuzhiyun  * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
118*4882a593Smuzhiyun  *                          Ensure the caller thread, upon return from
119*4882a593Smuzhiyun  *                          system call, that all its running thread
120*4882a593Smuzhiyun  *                          siblings have any currently running rseq
121*4882a593Smuzhiyun  *                          critical sections restarted if @flags
122*4882a593Smuzhiyun  *                          parameter is 0; if @flags parameter is
123*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_FLAG_CPU,
124*4882a593Smuzhiyun  *                          then this operation is performed only
125*4882a593Smuzhiyun  *                          on CPU indicated by @cpu_id. If this command is
126*4882a593Smuzhiyun  *                          not implemented by an architecture, -EINVAL
127*4882a593Smuzhiyun  *                          is returned. A process needs to register its
128*4882a593Smuzhiyun  *                          intent to use the private expedited rseq
129*4882a593Smuzhiyun  *                          command prior to using it, otherwise
130*4882a593Smuzhiyun  *                          this command returns -EPERM.
131*4882a593Smuzhiyun  * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
132*4882a593Smuzhiyun  *                          Register the process intent to use
133*4882a593Smuzhiyun  *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
134*4882a593Smuzhiyun  *                          If this command is not implemented by an
135*4882a593Smuzhiyun  *                          architecture, -EINVAL is returned.
136*4882a593Smuzhiyun  *                          Returns 0 on success.
137*4882a593Smuzhiyun  * @MEMBARRIER_CMD_SHARED:
138*4882a593Smuzhiyun  *                          Alias to MEMBARRIER_CMD_GLOBAL. Provided for
139*4882a593Smuzhiyun  *                          header backward compatibility.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * Command to be passed to the membarrier system call. The commands need to
142*4882a593Smuzhiyun  * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
143*4882a593Smuzhiyun  * the value 0.
144*4882a593Smuzhiyun  */
145*4882a593Smuzhiyun enum membarrier_cmd {
146*4882a593Smuzhiyun 	MEMBARRIER_CMD_QUERY					= 0,
147*4882a593Smuzhiyun 	MEMBARRIER_CMD_GLOBAL					= (1 << 0),
148*4882a593Smuzhiyun 	MEMBARRIER_CMD_GLOBAL_EXPEDITED				= (1 << 1),
149*4882a593Smuzhiyun 	MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED		= (1 << 2),
150*4882a593Smuzhiyun 	MEMBARRIER_CMD_PRIVATE_EXPEDITED			= (1 << 3),
151*4882a593Smuzhiyun 	MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED		= (1 << 4),
152*4882a593Smuzhiyun 	MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE		= (1 << 5),
153*4882a593Smuzhiyun 	MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE	= (1 << 6),
154*4882a593Smuzhiyun 	MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ			= (1 << 7),
155*4882a593Smuzhiyun 	MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ		= (1 << 8),
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* Alias for header backward compatibility. */
158*4882a593Smuzhiyun 	MEMBARRIER_CMD_SHARED			= MEMBARRIER_CMD_GLOBAL,
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun enum membarrier_cmd_flag {
162*4882a593Smuzhiyun 	MEMBARRIER_CMD_FLAG_CPU		= (1 << 0),
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #endif /* _UAPI_LINUX_MEMBARRIER_H */
166