xref: /OK3568_Linux_fs/kernel/include/linux/sched/mm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_SCHED_MM_H
3*4882a593Smuzhiyun #define _LINUX_SCHED_MM_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/atomic.h>
7*4882a593Smuzhiyun #include <linux/sched.h>
8*4882a593Smuzhiyun #include <linux/mm_types.h>
9*4882a593Smuzhiyun #include <linux/gfp.h>
10*4882a593Smuzhiyun #include <linux/sync_core.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * Routines for handling mm_structs
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun extern struct mm_struct *mm_alloc(void);
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /**
18*4882a593Smuzhiyun  * mmgrab() - Pin a &struct mm_struct.
19*4882a593Smuzhiyun  * @mm: The &struct mm_struct to pin.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Make sure that @mm will not get freed even after the owning task
22*4882a593Smuzhiyun  * exits. This doesn't guarantee that the associated address space
23*4882a593Smuzhiyun  * will still exist later on and mmget_not_zero() has to be used before
24*4882a593Smuzhiyun  * accessing it.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * This is a preferred way to pin @mm for a longer/unbounded amount
27*4882a593Smuzhiyun  * of time.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * Use mmdrop() to release the reference acquired by mmgrab().
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32*4882a593Smuzhiyun  * of &mm_struct.mm_count vs &mm_struct.mm_users.
33*4882a593Smuzhiyun  */
mmgrab(struct mm_struct * mm)34*4882a593Smuzhiyun static inline void mmgrab(struct mm_struct *mm)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	atomic_inc(&mm->mm_count);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun extern void __mmdrop(struct mm_struct *mm);
40*4882a593Smuzhiyun 
mmdrop(struct mm_struct * mm)41*4882a593Smuzhiyun static inline void mmdrop(struct mm_struct *mm)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	/*
44*4882a593Smuzhiyun 	 * The implicit full barrier implied by atomic_dec_and_test() is
45*4882a593Smuzhiyun 	 * required by the membarrier system call before returning to
46*4882a593Smuzhiyun 	 * user-space, after storing to rq->curr.
47*4882a593Smuzhiyun 	 */
48*4882a593Smuzhiyun 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49*4882a593Smuzhiyun 		__mmdrop(mm);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * mmget() - Pin the address space associated with a &struct mm_struct.
54*4882a593Smuzhiyun  * @mm: The address space to pin.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * Make sure that the address space of the given &struct mm_struct doesn't
57*4882a593Smuzhiyun  * go away. This does not protect against parts of the address space being
58*4882a593Smuzhiyun  * modified or freed, however.
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * Never use this function to pin this address space for an
61*4882a593Smuzhiyun  * unbounded/indefinite amount of time.
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * Use mmput() to release the reference acquired by mmget().
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
66*4882a593Smuzhiyun  * of &mm_struct.mm_count vs &mm_struct.mm_users.
67*4882a593Smuzhiyun  */
mmget(struct mm_struct * mm)68*4882a593Smuzhiyun static inline void mmget(struct mm_struct *mm)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	atomic_inc(&mm->mm_users);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
mmget_not_zero(struct mm_struct * mm)73*4882a593Smuzhiyun static inline bool mmget_not_zero(struct mm_struct *mm)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	return atomic_inc_not_zero(&mm->mm_users);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* mmput gets rid of the mappings and all user-space */
79*4882a593Smuzhiyun extern void mmput(struct mm_struct *);
80*4882a593Smuzhiyun #ifdef CONFIG_MMU
81*4882a593Smuzhiyun /* same as above but performs the slow path from the async context. Can
82*4882a593Smuzhiyun  * be called from the atomic context as well
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun void mmput_async(struct mm_struct *);
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Grab a reference to a task's mm, if it is not already going away */
88*4882a593Smuzhiyun extern struct mm_struct *get_task_mm(struct task_struct *task);
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Grab a reference to a task's mm, if it is not already going away
91*4882a593Smuzhiyun  * and ptrace_may_access with the mode parameter passed to it
92*4882a593Smuzhiyun  * succeeds.
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
95*4882a593Smuzhiyun /* Remove the current tasks stale references to the old mm_struct on exit() */
96*4882a593Smuzhiyun extern void exit_mm_release(struct task_struct *, struct mm_struct *);
97*4882a593Smuzhiyun /* Remove the current tasks stale references to the old mm_struct on exec() */
98*4882a593Smuzhiyun extern void exec_mm_release(struct task_struct *, struct mm_struct *);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #ifdef CONFIG_MEMCG
101*4882a593Smuzhiyun extern void mm_update_next_owner(struct mm_struct *mm);
102*4882a593Smuzhiyun #else
mm_update_next_owner(struct mm_struct * mm)103*4882a593Smuzhiyun static inline void mm_update_next_owner(struct mm_struct *mm)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun #endif /* CONFIG_MEMCG */
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #ifdef CONFIG_MMU
109*4882a593Smuzhiyun #ifndef arch_get_mmap_end
110*4882a593Smuzhiyun #define arch_get_mmap_end(addr)	(TASK_SIZE)
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #ifndef arch_get_mmap_base
114*4882a593Smuzhiyun #define arch_get_mmap_base(addr, base) (base)
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun extern void arch_pick_mmap_layout(struct mm_struct *mm,
118*4882a593Smuzhiyun 				  struct rlimit *rlim_stack);
119*4882a593Smuzhiyun extern unsigned long
120*4882a593Smuzhiyun arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
121*4882a593Smuzhiyun 		       unsigned long, unsigned long);
122*4882a593Smuzhiyun extern unsigned long
123*4882a593Smuzhiyun arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
124*4882a593Smuzhiyun 			  unsigned long len, unsigned long pgoff,
125*4882a593Smuzhiyun 			  unsigned long flags);
126*4882a593Smuzhiyun #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)127*4882a593Smuzhiyun static inline void arch_pick_mmap_layout(struct mm_struct *mm,
128*4882a593Smuzhiyun 					 struct rlimit *rlim_stack) {}
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun 
in_vfork(struct task_struct * tsk)131*4882a593Smuzhiyun static inline bool in_vfork(struct task_struct *tsk)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	bool ret;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/*
136*4882a593Smuzhiyun 	 * need RCU to access ->real_parent if CLONE_VM was used along with
137*4882a593Smuzhiyun 	 * CLONE_PARENT.
138*4882a593Smuzhiyun 	 *
139*4882a593Smuzhiyun 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
140*4882a593Smuzhiyun 	 * imply CLONE_VM
141*4882a593Smuzhiyun 	 *
142*4882a593Smuzhiyun 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
143*4882a593Smuzhiyun 	 * ->real_parent is not necessarily the task doing vfork(), so in
144*4882a593Smuzhiyun 	 * theory we can't rely on task_lock() if we want to dereference it.
145*4882a593Smuzhiyun 	 *
146*4882a593Smuzhiyun 	 * And in this case we can't trust the real_parent->mm == tsk->mm
147*4882a593Smuzhiyun 	 * check, it can be false negative. But we do not care, if init or
148*4882a593Smuzhiyun 	 * another oom-unkillable task does this it should blame itself.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	rcu_read_lock();
151*4882a593Smuzhiyun 	ret = tsk->vfork_done &&
152*4882a593Smuzhiyun 			rcu_dereference(tsk->real_parent)->mm == tsk->mm;
153*4882a593Smuzhiyun 	rcu_read_unlock();
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return ret;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun  * Applies per-task gfp context to the given allocation flags.
160*4882a593Smuzhiyun  * PF_MEMALLOC_NOIO implies GFP_NOIO
161*4882a593Smuzhiyun  * PF_MEMALLOC_NOFS implies GFP_NOFS
162*4882a593Smuzhiyun  */
current_gfp_context(gfp_t flags)163*4882a593Smuzhiyun static inline gfp_t current_gfp_context(gfp_t flags)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	unsigned int pflags = READ_ONCE(current->flags);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
168*4882a593Smuzhiyun 		/*
169*4882a593Smuzhiyun 		 * NOIO implies both NOIO and NOFS and it is a weaker context
170*4882a593Smuzhiyun 		 * so always make sure it makes precedence
171*4882a593Smuzhiyun 		 */
172*4882a593Smuzhiyun 		if (pflags & PF_MEMALLOC_NOIO)
173*4882a593Smuzhiyun 			flags &= ~(__GFP_IO | __GFP_FS);
174*4882a593Smuzhiyun 		else if (pflags & PF_MEMALLOC_NOFS)
175*4882a593Smuzhiyun 			flags &= ~__GFP_FS;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 	return flags;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
181*4882a593Smuzhiyun extern void __fs_reclaim_acquire(void);
182*4882a593Smuzhiyun extern void __fs_reclaim_release(void);
183*4882a593Smuzhiyun extern void fs_reclaim_acquire(gfp_t gfp_mask);
184*4882a593Smuzhiyun extern void fs_reclaim_release(gfp_t gfp_mask);
185*4882a593Smuzhiyun #else
__fs_reclaim_acquire(void)186*4882a593Smuzhiyun static inline void __fs_reclaim_acquire(void) { }
__fs_reclaim_release(void)187*4882a593Smuzhiyun static inline void __fs_reclaim_release(void) { }
fs_reclaim_acquire(gfp_t gfp_mask)188*4882a593Smuzhiyun static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)189*4882a593Smuzhiyun static inline void fs_reclaim_release(gfp_t gfp_mask) { }
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun  * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * This functions marks the beginning of the GFP_NOIO allocation scope.
196*4882a593Smuzhiyun  * All further allocations will implicitly drop __GFP_IO flag and so
197*4882a593Smuzhiyun  * they are safe for the IO critical section from the allocation recursion
198*4882a593Smuzhiyun  * point of view. Use memalloc_noio_restore to end the scope with flags
199*4882a593Smuzhiyun  * returned by this function.
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * This function is safe to be used from any context.
202*4882a593Smuzhiyun  */
memalloc_noio_save(void)203*4882a593Smuzhiyun static inline unsigned int memalloc_noio_save(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
206*4882a593Smuzhiyun 	current->flags |= PF_MEMALLOC_NOIO;
207*4882a593Smuzhiyun 	return flags;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun  * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
212*4882a593Smuzhiyun  * @flags: Flags to restore.
213*4882a593Smuzhiyun  *
214*4882a593Smuzhiyun  * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
215*4882a593Smuzhiyun  * Always make sure that the given flags is the return value from the
216*4882a593Smuzhiyun  * pairing memalloc_noio_save call.
217*4882a593Smuzhiyun  */
memalloc_noio_restore(unsigned int flags)218*4882a593Smuzhiyun static inline void memalloc_noio_restore(unsigned int flags)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun  * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * This functions marks the beginning of the GFP_NOFS allocation scope.
227*4882a593Smuzhiyun  * All further allocations will implicitly drop __GFP_FS flag and so
228*4882a593Smuzhiyun  * they are safe for the FS critical section from the allocation recursion
229*4882a593Smuzhiyun  * point of view. Use memalloc_nofs_restore to end the scope with flags
230*4882a593Smuzhiyun  * returned by this function.
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * This function is safe to be used from any context.
233*4882a593Smuzhiyun  */
memalloc_nofs_save(void)234*4882a593Smuzhiyun static inline unsigned int memalloc_nofs_save(void)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
237*4882a593Smuzhiyun 	current->flags |= PF_MEMALLOC_NOFS;
238*4882a593Smuzhiyun 	return flags;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun  * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
243*4882a593Smuzhiyun  * @flags: Flags to restore.
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
246*4882a593Smuzhiyun  * Always make sure that the given flags is the return value from the
247*4882a593Smuzhiyun  * pairing memalloc_nofs_save call.
248*4882a593Smuzhiyun  */
memalloc_nofs_restore(unsigned int flags)249*4882a593Smuzhiyun static inline void memalloc_nofs_restore(unsigned int flags)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
memalloc_noreclaim_save(void)254*4882a593Smuzhiyun static inline unsigned int memalloc_noreclaim_save(void)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	unsigned int flags = current->flags & PF_MEMALLOC;
257*4882a593Smuzhiyun 	current->flags |= PF_MEMALLOC;
258*4882a593Smuzhiyun 	return flags;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
memalloc_noreclaim_restore(unsigned int flags)261*4882a593Smuzhiyun static inline void memalloc_noreclaim_restore(unsigned int flags)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	current->flags = (current->flags & ~PF_MEMALLOC) | flags;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun #ifdef CONFIG_CMA
memalloc_nocma_save(void)267*4882a593Smuzhiyun static inline unsigned int memalloc_nocma_save(void)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	current->flags |= PF_MEMALLOC_NOCMA;
272*4882a593Smuzhiyun 	return flags;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
memalloc_nocma_restore(unsigned int flags)275*4882a593Smuzhiyun static inline void memalloc_nocma_restore(unsigned int flags)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun #else
memalloc_nocma_save(void)280*4882a593Smuzhiyun static inline unsigned int memalloc_nocma_save(void)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
memalloc_nocma_restore(unsigned int flags)285*4882a593Smuzhiyun static inline void memalloc_nocma_restore(unsigned int flags)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun #endif
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun #ifdef CONFIG_MEMCG
291*4882a593Smuzhiyun DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun  * set_active_memcg - Starts the remote memcg charging scope.
294*4882a593Smuzhiyun  * @memcg: memcg to charge.
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * This function marks the beginning of the remote memcg charging scope. All the
297*4882a593Smuzhiyun  * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
298*4882a593Smuzhiyun  * given memcg.
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  * NOTE: This function can nest. Users must save the return value and
301*4882a593Smuzhiyun  * reset the previous value after their own charging scope is over.
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)304*4882a593Smuzhiyun set_active_memcg(struct mem_cgroup *memcg)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct mem_cgroup *old;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (in_interrupt()) {
309*4882a593Smuzhiyun 		old = this_cpu_read(int_active_memcg);
310*4882a593Smuzhiyun 		this_cpu_write(int_active_memcg, memcg);
311*4882a593Smuzhiyun 	} else {
312*4882a593Smuzhiyun 		old = current->active_memcg;
313*4882a593Smuzhiyun 		current->active_memcg = memcg;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return old;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun #else
319*4882a593Smuzhiyun static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)320*4882a593Smuzhiyun set_active_memcg(struct mem_cgroup *memcg)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	return NULL;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun #endif
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #ifdef CONFIG_MEMBARRIER
327*4882a593Smuzhiyun enum {
328*4882a593Smuzhiyun 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0),
329*4882a593Smuzhiyun 	MEMBARRIER_STATE_PRIVATE_EXPEDITED			= (1U << 1),
330*4882a593Smuzhiyun 	MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY			= (1U << 2),
331*4882a593Smuzhiyun 	MEMBARRIER_STATE_GLOBAL_EXPEDITED			= (1U << 3),
332*4882a593Smuzhiyun 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY	= (1U << 4),
333*4882a593Smuzhiyun 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE		= (1U << 5),
334*4882a593Smuzhiyun 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY		= (1U << 6),
335*4882a593Smuzhiyun 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ			= (1U << 7),
336*4882a593Smuzhiyun };
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun enum {
339*4882a593Smuzhiyun 	MEMBARRIER_FLAG_SYNC_CORE	= (1U << 0),
340*4882a593Smuzhiyun 	MEMBARRIER_FLAG_RSEQ		= (1U << 1),
341*4882a593Smuzhiyun };
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
344*4882a593Smuzhiyun #include <asm/membarrier.h>
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun 
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)347*4882a593Smuzhiyun static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	if (current->mm != mm)
350*4882a593Smuzhiyun 		return;
351*4882a593Smuzhiyun 	if (likely(!(atomic_read(&mm->membarrier_state) &
352*4882a593Smuzhiyun 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
353*4882a593Smuzhiyun 		return;
354*4882a593Smuzhiyun 	sync_core_before_usermode();
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun extern void membarrier_exec_mmap(struct mm_struct *mm);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun #else
360*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)361*4882a593Smuzhiyun static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
362*4882a593Smuzhiyun 					     struct mm_struct *next,
363*4882a593Smuzhiyun 					     struct task_struct *tsk)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun #endif
membarrier_exec_mmap(struct mm_struct * mm)367*4882a593Smuzhiyun static inline void membarrier_exec_mmap(struct mm_struct *mm)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)370*4882a593Smuzhiyun static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun #endif
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun #endif /* _LINUX_SCHED_MM_H */
376