summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-19 22:59:33 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-19 22:59:33 -0500
commit8565d26bcb2ff6df646e946d2913fcf706d46b66 (patch)
tree21ffaccc3cbac5e558d51c20cfbecbfec86a02c4 /kernel/locking/rtmutex.c
parent85831e56a1d0c75a1560e61acbb8591e9f11c6b7 (diff)
parentec835f8104a21f4d4eeb9d316ee71d2b4a7f00de (diff)
downloadlinux-8565d26bcb2ff6df646e946d2913fcf706d46b66.tar.gz
linux-8565d26bcb2ff6df646e946d2913fcf706d46b66.tar.bz2
linux-8565d26bcb2ff6df646e946d2913fcf706d46b66.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The BPF verifier conflict was some minor contextual issue. The TUN conflict was less trivial. Cong Wang fixed a memory leak of tfile->tx_array in 'net'. This is an skb_array. But meanwhile in net-next tun changed tfile->tx_arry into tfile->tx_ring which is a ptr_ring. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 6f3dba6e4e9e..65cc0cb984e6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
return ret;
}
+static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
+{
+ int ret = try_to_take_rt_mutex(lock, current, NULL);
+
+ /*
+ * try_to_take_rt_mutex() sets the lock waiters bit
+ * unconditionally. Clean this up.
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ return ret;
+}
+
/*
* Slow path try-lock function:
*/
@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
- ret = try_to_take_rt_mutex(lock, current, NULL);
-
- /*
- * try_to_take_rt_mutex() sets the lock waiters bit
- * unconditionally. Clean this up.
- */
- fixup_rt_mutex_waiters(lock);
+ ret = __rt_mutex_slowtrylock(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
return rt_mutex_slowtrylock(lock);
}
+int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+ return __rt_mutex_slowtrylock(lock);
+}
+
/**
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided