xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/spinlock.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __ASM_SPINLOCK_H
6*4882a593Smuzhiyun #define __ASM_SPINLOCK_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/qrwlock.h>
9*4882a593Smuzhiyun #include <asm/qspinlock.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /* See include/linux/spinlock.h */
12*4882a593Smuzhiyun #define smp_mb__after_spinlock()	smp_mb()
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Changing this will break osq_lock() thanks to the call inside
16*4882a593Smuzhiyun  * smp_cond_load_relaxed().
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * See:
19*4882a593Smuzhiyun  * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(int cpu)22*4882a593Smuzhiyun static inline bool vcpu_is_preempted(int cpu)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	return false;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #endif /* __ASM_SPINLOCK_H */
28