xref: /OK3568_Linux_fs/kernel/tools/memory-model/litmus-tests/MP+polocks.litmus (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593SmuzhiyunC MP+polocks
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun(*
4*4882a593Smuzhiyun * Result: Never
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This litmus test demonstrates how lock acquisitions and releases can
7*4882a593Smuzhiyun * stand in for smp_load_acquire() and smp_store_release(), respectively.
8*4882a593Smuzhiyun * In other words, when holding a given lock (or indeed after releasing a
9*4882a593Smuzhiyun * given lock), a CPU is not only guaranteed to see the accesses that other
10*4882a593Smuzhiyun * CPUs made while previously holding that lock, it is also guaranteed
11*4882a593Smuzhiyun * to see all prior accesses by those other CPUs.
12*4882a593Smuzhiyun *)
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun{}
15*4882a593Smuzhiyun
16*4882a593SmuzhiyunP0(int *x, int *y, spinlock_t *mylock)
17*4882a593Smuzhiyun{
18*4882a593Smuzhiyun	WRITE_ONCE(*x, 1);
19*4882a593Smuzhiyun	spin_lock(mylock);
20*4882a593Smuzhiyun	WRITE_ONCE(*y, 1);
21*4882a593Smuzhiyun	spin_unlock(mylock);
22*4882a593Smuzhiyun}
23*4882a593Smuzhiyun
24*4882a593SmuzhiyunP1(int *x, int *y, spinlock_t *mylock)
25*4882a593Smuzhiyun{
26*4882a593Smuzhiyun	int r0;
27*4882a593Smuzhiyun	int r1;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun	spin_lock(mylock);
30*4882a593Smuzhiyun	r0 = READ_ONCE(*y);
31*4882a593Smuzhiyun	spin_unlock(mylock);
32*4882a593Smuzhiyun	r1 = READ_ONCE(*x);
33*4882a593Smuzhiyun}
34*4882a593Smuzhiyun
35*4882a593Smuzhiyunexists (1:r0=1 /\ 1:r1=0)
36