1*4882a593SmuzhiyunC MP+porevlocks 2*4882a593Smuzhiyun 3*4882a593Smuzhiyun(* 4*4882a593Smuzhiyun * Result: Never 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * This litmus test demonstrates how lock acquisitions and releases can 7*4882a593Smuzhiyun * stand in for smp_load_acquire() and smp_store_release(), respectively. 8*4882a593Smuzhiyun * In other words, when holding a given lock (or indeed after releasing a 9*4882a593Smuzhiyun * given lock), a CPU is not only guaranteed to see the accesses that other 10*4882a593Smuzhiyun * CPUs made while previously holding that lock, it is also guaranteed to 11*4882a593Smuzhiyun * see all prior accesses by those other CPUs. 12*4882a593Smuzhiyun *) 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun{} 15*4882a593Smuzhiyun 16*4882a593SmuzhiyunP0(int *x, int *y, spinlock_t *mylock) 17*4882a593Smuzhiyun{ 18*4882a593Smuzhiyun int r0; 19*4882a593Smuzhiyun int r1; 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun r0 = READ_ONCE(*y); 22*4882a593Smuzhiyun spin_lock(mylock); 23*4882a593Smuzhiyun r1 = READ_ONCE(*x); 24*4882a593Smuzhiyun spin_unlock(mylock); 25*4882a593Smuzhiyun} 26*4882a593Smuzhiyun 27*4882a593SmuzhiyunP1(int *x, int *y, spinlock_t *mylock) 28*4882a593Smuzhiyun{ 29*4882a593Smuzhiyun spin_lock(mylock); 30*4882a593Smuzhiyun WRITE_ONCE(*x, 1); 31*4882a593Smuzhiyun spin_unlock(mylock); 32*4882a593Smuzhiyun WRITE_ONCE(*y, 1); 33*4882a593Smuzhiyun} 34*4882a593Smuzhiyun 35*4882a593Smuzhiyunexists (0:r0=1 /\ 0:r1=0) 36