diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-03-23 15:56:10 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-04-04 11:44:05 +0200 |
commit | aa2bfe55366552cb7e93e8709d66e698d79ccc47 (patch) | |
tree | c88ac6fbcf4a8ef48d40a1d9e40ff40ffeab0758 /kernel/locking | |
parent | 85e2d4f992868ad78dc8bb2c077b652fcfb3661a (diff) | |
download | linux-stable-aa2bfe55366552cb7e93e8709d66e698d79ccc47.tar.gz linux-stable-aa2bfe55366552cb7e93e8709d66e698d79ccc47.tar.bz2 linux-stable-aa2bfe55366552cb7e93e8709d66e698d79ccc47.zip |
rtmutex: Clean up
Previous patches changed the meaning of the return value of
rt_mutex_slowunlock(); update comments and code to reflect this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: juri.lelli@arm.com
Cc: bigeasy@linutronix.de
Cc: xlpang@redhat.com
Cc: rostedt@goodmis.org
Cc: mathieu.desnoyers@efficios.com
Cc: jdesfossez@efficios.com
Cc: bristot@redhat.com
Link: http://lkml.kernel.org/r/20170323150216.255058238@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/rtmutex.c | 28 | ||||
-rw-r--r-- | kernel/locking/rtmutex_common.h | 2 |
2 files changed, 14 insertions, 16 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8faf472c430f..4b1015ef0dc7 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1330,7 +1330,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) /* * Slow path to release a rt-mutex. - * Return whether the current task needs to undo a potential priority boosting. + * + * Return whether the current task needs to call rt_mutex_postunlock(). */ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, struct wake_q_head *wake_q) @@ -1401,8 +1402,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - /* check PI boosting */ - return true; + return true; /* call rt_mutex_postunlock() */ } /* @@ -1449,15 +1449,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, } /* - * Undo pi boosting (if necessary) and wake top waiter. + * Performs the wakeup of the the top-waiter and re-enables preemption. */ -void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost) +void rt_mutex_postunlock(struct wake_q_head *wake_q) { wake_up_q(wake_q); /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ - if (deboost) - preempt_enable(); + preempt_enable(); } static inline void @@ -1466,14 +1465,12 @@ rt_mutex_fastunlock(struct rt_mutex *lock, struct wake_q_head *wqh)) { DEFINE_WAKE_Q(wake_q); - bool deboost; if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) return; - deboost = slowfn(lock, &wake_q); - - rt_mutex_postunlock(&wake_q, deboost); + if (slowfn(lock, &wake_q)) + rt_mutex_postunlock(&wake_q); } /** @@ -1593,19 +1590,20 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, */ preempt_disable(); - return true; /* deboost and wakeups */ + return true; /* call postunlock() */ } void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) { DEFINE_WAKE_Q(wake_q); - bool deboost; + bool postunlock; raw_spin_lock_irq(&lock->wait_lock); - deboost = __rt_mutex_futex_unlock(lock, &wake_q); + postunlock = __rt_mutex_futex_unlock(lock, &wake_q); raw_spin_unlock_irq(&lock->wait_lock); - rt_mutex_postunlock(&wake_q, deboost); + if (postunlock) + rt_mutex_postunlock(&wake_q); } /** diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index a09c02982391..9e36aeddce18 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -122,7 +122,7 @@ extern void rt_mutex_futex_unlock(struct rt_mutex *lock); extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, struct wake_q_head *wqh); -extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost); +extern void rt_mutex_postunlock(struct wake_q_head *wake_q); #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" |