c438b7d860
The memory model has been updated to provide a stronger ordering guarantee for unlock(A)+lock(B) on the same CPU/thread. Therefore add two litmus tests describing this new guarantee, these tests are simple yet can clearly show the usage of the new guarantee, also they can serve as the self tests for the modification in the model. Co-developed-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
34 lines
564 B
Plaintext
34 lines
564 B
Plaintext
C MP+unlocklockonceonce+fencermbonceonce
|
|
|
|
(*
|
|
* Result: Never
|
|
*
|
|
* If two locked critical sections execute on the same CPU, stores in the
|
|
* first must propagate to each CPU before stores in the second do, even if
|
|
* the critical sections are protected by different locks.
|
|
*)
|
|
|
|
{}
|
|
|
|
P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
|
|
{
|
|
spin_lock(s);
|
|
WRITE_ONCE(*x, 1);
|
|
spin_unlock(s);
|
|
spin_lock(t);
|
|
WRITE_ONCE(*y, 1);
|
|
spin_unlock(t);
|
|
}
|
|
|
|
P1(int *x, int *y)
|
|
{
|
|
int r1;
|
|
int r2;
|
|
|
|
r1 = READ_ONCE(*y);
|
|
smp_rmb();
|
|
r2 = READ_ONCE(*x);
|
|
}
|
|
|
|
exists (1:r1=1 /\ 1:r2=0)
|