[PATCH -v4 7/8] locking/mutex: Simplify some ww_mutex code in __mutex_lock_common()

From: Peter Zijlstra
Date: Fri Oct 07 2016 - 11:07:11 EST


From: Waiman Long <Waiman.Long@xxxxxxx>

This patch removes some of the redundant ww_mutex code in
__mutex_lock_common().

Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Imre Deak <imre.deak@xxxxxxxxx>
Cc: Jason Low <jason.low2@xxxxxxx>
Cc: "Paul E. McKenney" <paulmck@xxxxxxxxxx>
Cc: Ding Tianhong <dingtianhong@xxxxxxxxxx>
Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx>
Cc: Will Deacon <Will.Deacon@xxxxxxx>
Signed-off-by: Waiman Long <Waiman.Long@xxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/locking/mutex.c | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -580,10 +580,11 @@ __mutex_lock_common(struct mutex *lock,
struct mutex_waiter waiter;
unsigned long flags;
bool first = false;
+ struct ww_mutex *ww;
int ret;

if (use_ww_ctx) {
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ ww = container_of(lock, struct ww_mutex, base);
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
}
@@ -595,12 +596,8 @@ __mutex_lock_common(struct mutex *lock,
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx) {
- struct ww_mutex *ww;
- ww = container_of(lock, struct ww_mutex, base);
-
+ if (use_ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
- }
preempt_enable();
return 0;
}
@@ -680,10 +677,8 @@ __mutex_lock_common(struct mutex *lock,
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);

- if (use_ww_ctx) {
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ if (use_ww_ctx)
ww_mutex_set_context_slowpath(ww, ww_ctx);
- }

spin_unlock_mutex(&lock->wait_lock, flags);
preempt_enable();