locking/pvqspinlock: Use try_cmpxchg() in qspinlock_paravirt.h
Use try_cmpxchg(*ptr, &old, new) instead of cmpxchg(*ptr, old, new) == old in qspinlock_paravirt.h x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg. No functional change intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Waiman Long <longman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/20240411192317.25432-2-ubizjak@gmail.com
This commit is contained in:
parent
6a97734f22
commit
fea0e1820b
@ -86,9 +86,10 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
|
||||
*/
|
||||
for (;;) {
|
||||
int val = atomic_read(&lock->val);
|
||||
u8 old = 0;
|
||||
|
||||
if (!(val & _Q_LOCKED_PENDING_MASK) &&
|
||||
(cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
|
||||
try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) {
|
||||
lockevent_inc(pv_lock_stealing);
|
||||
return true;
|
||||
}
|
||||
@ -211,8 +212,9 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
|
||||
int hopcnt = 0;
|
||||
|
||||
for_each_hash_entry(he, offset, hash) {
|
||||
struct qspinlock *old = NULL;
|
||||
hopcnt++;
|
||||
if (!cmpxchg(&he->lock, NULL, lock)) {
|
||||
if (try_cmpxchg(&he->lock, &old, lock)) {
|
||||
WRITE_ONCE(he->node, node);
|
||||
lockevent_pv_hop(hopcnt);
|
||||
return &he->lock;
|
||||
@ -355,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
|
||||
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
|
||||
{
|
||||
struct pv_node *pn = (struct pv_node *)node;
|
||||
|
||||
enum vcpu_state old = vcpu_halted;
|
||||
/*
|
||||
* If the vCPU is indeed halted, advance its state to match that of
|
||||
* pv_wait_node(). If OTOH this fails, the vCPU was running and will
|
||||
@ -372,8 +374,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
|
||||
* subsequent writes.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
|
||||
!= vcpu_halted)
|
||||
if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -541,15 +542,14 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
|
||||
#ifndef __pv_queued_spin_unlock
|
||||
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
u8 locked;
|
||||
u8 locked = _Q_LOCKED_VAL;
|
||||
|
||||
/*
|
||||
* We must not unlock if SLOW, because in that case we must first
|
||||
* unhash. Otherwise it would be possible to have multiple @lock
|
||||
* entries, which would be BAD.
|
||||
*/
|
||||
locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
|
||||
if (likely(locked == _Q_LOCKED_VAL))
|
||||
if (try_cmpxchg_release(&lock->locked, &locked, 0))
|
||||
return;
|
||||
|
||||
__pv_queued_spin_unlock_slowpath(lock, locked);
|
||||
|
Loading…
Reference in New Issue
Block a user