summaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-10-03 19:25:27 +0100
committerIngo Molnar <mingo@kernel.org>2017-10-10 11:50:18 +0200
commita8a217c22116eff6c120d753c9934089fb229af0 (patch)
treed56f1283ba7153f9ead9fb92c1a1794143c6e4a3 /arch/tile
parent26c4eb192c6224e5297496cead36404b62fb071b (diff)
downloadlinux-a8a217c22116eff6c120d753c9934089fb229af0.tar.gz
linux-a8a217c22116eff6c120d753c9934089fb229af0.tar.bz2
linux-a8a217c22116eff6c120d753c9934089fb229af0.zip
locking/core: Remove {read,spin,write}_can_lock()
Outside of the locking code itself, {read,spin,write}_can_lock() have no users in tree. Apparmor (the last remaining user of write_can_lock()) got moved over to lockdep by the previous patch. This patch removes the use of {read,spin,write}_can_lock() from the BUILD_LOCK_OPS macro, deferring to the trylock operation for testing the lock status, and subsequently removes the unused macros altogether. They aren't guaranteed to work in a concurrent environment and can give incorrect results in the case of qrwlock. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/spinlock_32.h16
-rw-r--r--arch/tile/include/asm/spinlock_64.h18
2 files changed, 0 insertions, 34 deletions
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index cba8ba9b8da6..91d05f21cba9 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -80,22 +80,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define _RD_COUNT_WIDTH 8
/**
- * arch_read_can_lock() - would read_trylock() succeed?
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
- return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
-}
-
-/**
- * arch_write_can_lock() - would write_trylock() succeed?
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
-/**
* arch_read_lock() - acquire a read lock.
*/
void arch_read_lock(arch_rwlock_t *rwlock);
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 9a2c2d605752..c802f48badf4 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -93,24 +93,6 @@ static inline int arch_write_val_locked(int val)
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
}
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return !arch_write_val_locked(rw->lock);
-}
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return rw->lock == 0;
-}
-
extern void __read_lock_failed(arch_rwlock_t *rw);
static inline void arch_read_lock(arch_rwlock_t *rw)