[RFC][PATCH 2/3] locking/qrwlock: Use smp_cond_load_acquire()

From: Peter Zijlstra
Date: Mon Apr 04 2016 - 08:38:01 EST


Use smp_cond_load_acquire() to make better use of the hardware
assisted 'spin' wait on arm64.

Arguably the second hunk is the more horrid abuse possible, but
avoids having to use cmpwait (see next patch) directly. Also, this
makes 'clever' (ab)use of the cond+rmb acquire to omit the acquire
from cmpxchg().

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/locking/qrwlock.c | 18 ++++--------------
1 file changed, 4 insertions(+), 14 deletions(-)

--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -53,10 +53,7 @@ struct __qrwlock {
static __always_inline void
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
{
- while ((cnts & _QW_WMASK) == _QW_LOCKED) {
- cpu_relax_lowlatency();
- cnts = atomic_read_acquire(&lock->cnts);
- }
+ smp_cond_load_acquire(&lock->cnts.counter, (VAL & _QW_WMASK) != _QW_LOCKED);
}

/**
@@ -109,8 +106,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath)
*/
void queued_write_lock_slowpath(struct qrwlock *lock)
{
- u32 cnts;
-
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);

@@ -134,15 +129,10 @@ void queued_write_lock_slowpath(struct q
}

/* When no more readers, set the locked flag */
- for (;;) {
- cnts = atomic_read(&lock->cnts);
- if ((cnts == _QW_WAITING) &&
- (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
- _QW_LOCKED) == _QW_WAITING))
- break;
+ smp_cond_load_acquire(&lock->cnts.counter,
+ (VAL == _QW_WAITING) &&
+ atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, _QW_LOCKED) == _QW_WAITING);

- cpu_relax_lowlatency();
- }
unlock:
arch_spin_unlock(&lock->wait_lock);
}