1*4882a593SmuzhiyunC Z6.0+pooncelock+poonceLock+pombonce
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun(*
4*4882a593Smuzhiyun * Result: Never
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This litmus test demonstrates how smp_mb__after_spinlock() may be
7*4882a593Smuzhiyun * used to ensure that accesses in different critical sections for a
8*4882a593Smuzhiyun * given lock running on different CPUs are nevertheless seen in order
9*4882a593Smuzhiyun * by CPUs not holding that lock.
10*4882a593Smuzhiyun *)
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun{}
13*4882a593Smuzhiyun
14*4882a593SmuzhiyunP0(int *x, int *y, spinlock_t *mylock)
15*4882a593Smuzhiyun{
16*4882a593Smuzhiyun	spin_lock(mylock);
17*4882a593Smuzhiyun	WRITE_ONCE(*x, 1);
18*4882a593Smuzhiyun	WRITE_ONCE(*y, 1);
19*4882a593Smuzhiyun	spin_unlock(mylock);
20*4882a593Smuzhiyun}
21*4882a593Smuzhiyun
22*4882a593SmuzhiyunP1(int *y, int *z, spinlock_t *mylock)
23*4882a593Smuzhiyun{
24*4882a593Smuzhiyun	int r0;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun	spin_lock(mylock);
27*4882a593Smuzhiyun	smp_mb__after_spinlock();
28*4882a593Smuzhiyun	r0 = READ_ONCE(*y);
29*4882a593Smuzhiyun	WRITE_ONCE(*z, 1);
30*4882a593Smuzhiyun	spin_unlock(mylock);
31*4882a593Smuzhiyun}
32*4882a593Smuzhiyun
33*4882a593SmuzhiyunP2(int *x, int *z)
34*4882a593Smuzhiyun{
35*4882a593Smuzhiyun	int r1;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun	WRITE_ONCE(*z, 2);
38*4882a593Smuzhiyun	smp_mb();
39*4882a593Smuzhiyun	r1 = READ_ONCE(*x);
40*4882a593Smuzhiyun}
41*4882a593Smuzhiyun
42*4882a593Smuzhiyunexists (1:r0=1 /\ z=2 /\ 2:r1=0)
43