summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-07 21:55:58 -0500
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:34 -0400
commit64ac24e738823161693bf791f87adc802cf529ff (patch)
tree19c0b0cf314d4394ca580c05b86cdf874ce0a167 /include
parente48b3deee475134585eed03e7afebe4bf9e0dba9 (diff)
downloadlinux-stable-64ac24e738823161693bf791f87adc802cf529ff.tar.gz
linux-stable-64ac24e738823161693bf791f87adc802cf529ff.tar.bz2
linux-stable-64ac24e738823161693bf791f87adc802cf529ff.zip
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/semaphore.h150
-rw-r--r--include/asm-arm/semaphore-helper.h84
-rw-r--r--include/asm-arm/semaphore.h99
-rw-r--r--include/asm-avr32/semaphore.h109
-rw-r--r--include/asm-blackfin/semaphore-helper.h82
-rw-r--r--include/asm-blackfin/semaphore.h106
-rw-r--r--include/asm-cris/semaphore-helper.h78
-rw-r--r--include/asm-cris/semaphore.h134
-rw-r--r--include/asm-frv/semaphore.h156
-rw-r--r--include/asm-h8300/semaphore-helper.h85
-rw-r--r--include/asm-h8300/semaphore.h191
-rw-r--r--include/asm-ia64/semaphore.h100
-rw-r--r--include/asm-m32r/semaphore.h145
-rw-r--r--include/asm-m68k/semaphore-helper.h142
-rw-r--r--include/asm-m68k/semaphore.h164
-rw-r--r--include/asm-m68knommu/semaphore-helper.h82
-rw-r--r--include/asm-m68knommu/semaphore.h154
-rw-r--r--include/asm-mips/semaphore.h109
-rw-r--r--include/asm-mn10300/semaphore.h170
-rw-r--r--include/asm-parisc/semaphore-helper.h89
-rw-r--r--include/asm-parisc/semaphore.h146
-rw-r--r--include/asm-powerpc/semaphore.h95
-rw-r--r--include/asm-s390/semaphore.h108
-rw-r--r--include/asm-sh/semaphore-helper.h89
-rw-r--r--include/asm-sh/semaphore.h116
-rw-r--r--include/asm-sparc/semaphore.h193
-rw-r--r--include/asm-sparc64/semaphore.h54
-rw-r--r--include/asm-um/semaphore.h7
-rw-r--r--include/asm-v850/semaphore.h85
-rw-r--r--include/asm-x86/semaphore.h6
-rw-r--r--include/asm-x86/semaphore_32.h175
-rw-r--r--include/asm-x86/semaphore_64.h180
-rw-r--r--include/asm-xtensa/semaphore.h100
-rw-r--r--include/linux/semaphore.h77
34 files changed, 100 insertions, 3760 deletions
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index f1e9278a9fe2..d9b2034ed1d2 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -1,149 +1 @@
-#ifndef _ALPHA_SEMAPHORE_H
-#define _ALPHA_SEMAPHORE_H
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1996, 2000 Richard Henderson
- */
-
-#include <asm/current.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/compiler.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- /*
- * Logically,
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- * except that gcc produces better initializing by parts yet.
- */
-
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void down(struct semaphore *);
-extern void __down_failed(struct semaphore *);
-extern int down_interruptible(struct semaphore *);
-extern int __down_failed_interruptible(struct semaphore *);
-extern int down_trylock(struct semaphore *);
-extern void up(struct semaphore *);
-extern void __up_wakeup(struct semaphore *);
-
-/*
- * Hidden out of line code is fun, but extremely messy. Rely on newer
- * compilers to do a respectable job with this. The contention cases
- * are handled out of line in arch/alpha/kernel/semaphore.c.
- */
-
-static inline void __down(struct semaphore *sem)
-{
- long count;
- might_sleep();
- count = atomic_dec_return(&sem->count);
- if (unlikely(count < 0))
- __down_failed(sem);
-}
-
-static inline int __down_interruptible(struct semaphore *sem)
-{
- long count;
- might_sleep();
- count = atomic_dec_return(&sem->count);
- if (unlikely(count < 0))
- return __down_failed_interruptible(sem);
- return 0;
-}
-
-/*
- * down_trylock returns 0 on success, 1 if we failed to get the lock.
- */
-
-static inline int __down_trylock(struct semaphore *sem)
-{
- long ret;
-
- /* "Equivalent" C:
-
- do {
- ret = ldl_l;
- --ret;
- if (ret < 0)
- break;
- ret = stl_c = ret;
- } while (ret == 0);
- */
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " subl %0,1,%0\n"
- " blt %0,2f\n"
- " stl_c %0,%1\n"
- " beq %0,3f\n"
- " mb\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (ret), "=m" (sem->count)
- : "m" (sem->count));
-
- return ret < 0;
-}
-
-static inline void __up(struct semaphore *sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up_wakeup(sem);
-}
-
-#if !defined(CONFIG_DEBUG_SEMAPHORE)
-extern inline void down(struct semaphore *sem)
-{
- __down(sem);
-}
-extern inline int down_interruptible(struct semaphore *sem)
-{
- return __down_interruptible(sem);
-}
-extern inline int down_trylock(struct semaphore *sem)
-{
- return __down_trylock(sem);
-}
-extern inline void up(struct semaphore *sem)
-{
- __up(sem);
-}
-#endif
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h
deleted file mode 100644
index 1d7f1987edb9..000000000000
--- a/include/asm-arm/semaphore-helper.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef ASMARM_SEMAPHORE_HELPER_H
-#define ASMARM_SEMAPHORE_HELPER_H
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->count) <= 0)
- sem->waking++;
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking non zero interruptible
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_try_lock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
- atomic_inc(&sem->count);
- else {
- sem->waking--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 1c8b441f89e3..d9b2034ed1d2 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -1,98 +1 @@
-/*
- * linux/include/asm-arm/semaphore.h
- */
-#ifndef __ASM_ARM_SEMAPHORE_H
-#define __ASM_ARM_SEMAPHORE_H
-
-#include <linux/linkage.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-#include <asm/locks.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INIT(name, cnt) \
-{ \
- .count = ATOMIC_INIT(cnt), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INIT(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-/*
- * special register calling convention
- */
-asmlinkage void __down_failed(void);
-asmlinkage int __down_interruptible_failed(void);
-asmlinkage int __down_trylock_failed(void);
-asmlinkage void __up_wakeup(void);
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down" is the actual routine that waits...
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- __down_op(sem, __down_failed);
-}
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_interruptible" is the actual routine that waits...
- */
-static inline int down_interruptible (struct semaphore * sem)
-{
- might_sleep();
- return __down_op_ret(sem, __down_interruptible_failed);
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
- return __down_op_ret(sem, __down_trylock_failed);
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- __up_op(sem, __up_wakeup);
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h
index feaf1d453386..d9b2034ed1d2 100644
--- a/include/asm-avr32/semaphore.h
+++ b/include/asm-avr32/semaphore.h
@@ -1,108 +1 @@
-/*
- * SMP- and interrupt-safe semaphores.
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * Based on include/asm-i386/semaphore.h
- * Copyright (C) 1996 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_AVR32_SEMAPHORE_H
-#define __ASM_AVR32_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-void __down(struct semaphore * sem);
-int __down_interruptible(struct semaphore * sem);
-void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/i386/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (unlikely(atomic_dec_return (&sem->count) < 0))
- __down (sem);
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (unlikely(atomic_dec_return (&sem->count) < 0))
- ret = __down_interruptible (sem);
- return ret;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return (&sem->count) <= 0))
- __up (sem);
-}
-
-#endif /*__ASM_AVR32_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h
deleted file mode 100644
index 9082b0dc3eb5..000000000000
--- a/include/asm-blackfin/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Based on M68K version, Lineo Inc. May 2001 */
-
-#ifndef _BFIN_SEMAPHORE_HELPER_H
-#define _BFIN_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- */
-
-#include <asm/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore *sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret = 0;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 0;
- } else
- atomic_inc(&sem->count);
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* _BFIN_SEMAPHORE_HELPER_H */
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h
index 533f90fb2e4e..d9b2034ed1d2 100644
--- a/include/asm-blackfin/semaphore.h
+++ b/include/asm-blackfin/semaphore.h
@@ -1,105 +1 @@
-#ifndef _BFIN_SEMAPHORE_H
-#define _BFIN_SEMAPHORE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * BFIN version by akbar hussain Lineo Inc April 2001
- *
- */
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore *sem);
-asmlinkage int __down_interruptible(struct semaphore *sem);
-asmlinkage int __down_trylock(struct semaphore *sem);
-asmlinkage void __up(struct semaphore *sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits.
- */
-static inline void down(struct semaphore *sem)
-{
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- int ret = 0;
-
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
- return (ret);
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
- int ret = 0;
-
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_trylock(sem);
- return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore *sem)
-{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
-
-#endif /* __ASSEMBLY__ */
-#endif /* _BFIN_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
deleted file mode 100644
index 27bfeca1b981..000000000000
--- a/include/asm-cris/semaphore-helper.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
- *
- * SMP- and interrupt-safe semaphores helper functions. Generic versions, no
- * optimizations whatsoever...
- *
- */
-
-#ifndef _ASM_SEMAPHORE_HELPER_H
-#define _ASM_SEMAPHORE_HELPER_H
-
-#include <asm/atomic.h>
-#include <linux/errno.h>
-
-#define read(a) ((a)->counter)
-#define inc(a) (((a)->counter)++)
-#define dec(a) (((a)->counter)--)
-
-#define count_inc(a) ((*(a))++)
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- local_irq_save(flags);
- if (read(&sem->waking) > 0) {
- dec(&sem->waking);
- ret = 1;
- }
- local_irq_restore(flags);
- return ret;
-}
-
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret = 0;
- unsigned long flags;
-
- local_irq_save(flags);
- if (read(&sem->waking) > 0) {
- dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- inc(&sem->count);
- ret = -EINTR;
- }
- local_irq_restore(flags);
- return ret;
-}
-
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- unsigned long flags;
-
- local_irq_save(flags);
- if (read(&sem->waking) <= 0)
- inc(&sem->count);
- else {
- dec(&sem->waking);
- ret = 0;
- }
- local_irq_restore(flags);
- return ret;
-}
-
-#endif /* _ASM_SEMAPHORE_HELPER_H */
-
-
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 31a4ac448195..d9b2034ed1d2 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -1,133 +1 @@
-/* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */
-
-/* On the i386 these are coded in asm, perhaps we should as well. Later.. */
-
-#ifndef _CRIS_SEMAPHORE_H
-#define _CRIS_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * CRIS semaphores, implemented in C-only so far.
- */
-
-struct semaphore {
- atomic_t count;
- atomic_t waking;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .waking = ATOMIC_INIT(0), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/* notice - we probably can do cli/sti here instead of saving */
-
-static inline void down(struct semaphore * sem)
-{
- unsigned long flags;
- int failed;
-
- might_sleep();
-
- /* atomically decrement the semaphores count, and if its negative, we wait */
- cris_atomic_save(sem, flags);
- failed = --(sem->count.counter) < 0;
- cris_atomic_restore(sem, flags);
- if(failed) {
- __down(sem);
- }
-}
-
-/*
- * This version waits in interruptible state so that the waiting
- * process can be killed. The down_interruptible routine
- * returns negative for signalled and zero for semaphore acquired.
- */
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- unsigned long flags;
- int failed;
-
- might_sleep();
-
- /* atomically decrement the semaphores count, and if its negative, we wait */
- cris_atomic_save(sem, flags);
- failed = --(sem->count.counter) < 0;
- cris_atomic_restore(sem, flags);
- if(failed)
- failed = __down_interruptible(sem);
- return(failed);
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- int failed;
-
- cris_atomic_save(sem, flags);
- failed = --(sem->count.counter) < 0;
- cris_atomic_restore(sem, flags);
- if(failed)
- failed = __down_trylock(sem);
- return(failed);
-
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- unsigned long flags;
- int wakeup;
-
- /* atomically increment the semaphores count, and if it was negative, we wake people */
- cris_atomic_save(sem, flags);
- wakeup = ++(sem->count.counter) <= 0;
- cris_atomic_restore(sem, flags);
- if(wakeup) {
- __up(sem);
- }
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index d7aaa1911a1a..d9b2034ed1d2 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -1,155 +1 @@
-/* semaphore.h: semaphores for the FR-V
- *
- * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_SEMAPHORE_H
-#define _ASM_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-/*
- * the semaphore definition
- * - if counter is >0 then there are tokens available on the semaphore for down to collect
- * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct semaphore {
- unsigned counter;
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_SEMAPHORE
- unsigned __magic;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
-# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
-{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore *sem, unsigned long flags);
-extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
-extern void __up(struct semaphore *sem);
-
-static inline void down(struct semaphore *sem)
-{
- unsigned long flags;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (likely(sem->counter > 0)) {
- sem->counter--;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- else {
- __down(sem, flags);
- }
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (likely(sem->counter > 0)) {
- sem->counter--;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- else {
- ret = __down_interruptible(sem, flags);
- }
- return ret;
-}
-
-/*
- * non-blockingly attempt to down() a semaphore.
- * - returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int success = 0;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->counter > 0) {
- sem->counter--;
- success = 1;
- }
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- return !success;
-}
-
-static inline void up(struct semaphore *sem)
-{
- unsigned long flags;
-
-#ifdef CONFIG_DEBUG_SEMAPHORE
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
- __up(sem);
- else
- sem->counter++;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
- return sem->counter;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h
deleted file mode 100644
index 4fea36be5fd8..000000000000
--- a/include/asm-h8300/semaphore-helper.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _H8300_SEMAPHORE_HELPER_H
-#define _H8300_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * based on
- * m68k version by Andreas Schwab
- */
-
-#include <linux/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 1;
- if (sem->sleepers <= 0)
- atomic_inc(&sem->count);
- else {
- sem->sleepers--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index f3ffff83ff09..d9b2034ed1d2 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -1,190 +1 @@
-#ifndef _H8300_SEMAPHORE_H
-#define _H8300_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * H8/300 version by Yoshinori Sato
- */
-
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- might_sleep();
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r3l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %2, er1\n\t"
- "dec.l #1,er1\n\t"
- "mov.l er1,%0\n\t"
- "bpl 1f\n\t"
- "ldc r3l,ccr\n\t"
- "mov.l %1,er0\n\t"
- "jsr @___down\n\t"
- "bra 2f\n"
- "1:\n\t"
- "ldc r3l,ccr\n"
- "2:"
- : "=m"(*count)
- : "g"(sem),"m"(*count)
- : "cc", "er1", "er2", "er3");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- might_sleep();
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r1l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %3, er2\n\t"
- "dec.l #1,er2\n\t"
- "mov.l er2,%1\n\t"
- "bpl 1f\n\t"
- "ldc r1l,ccr\n\t"
- "mov.l %2,er0\n\t"
- "jsr @___down_interruptible\n\t"
- "bra 2f\n"
- "1:\n\t"
- "ldc r1l,ccr\n\t"
- "sub.l %0,%0\n\t"
- "2:\n\t"
- : "=r" (count),"=m" (*count)
- : "g"(sem),"m"(*count)
- : "cc", "er1", "er2", "er3");
- return (int)count;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r3l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %3,er2\n\t"
- "dec.l #1,er2\n\t"
- "mov.l er2,%0\n\t"
- "bpl 1f\n\t"
- "ldc r3l,ccr\n\t"
- "jmp @3f\n\t"
- LOCK_SECTION_START(".align 2\n\t")
- "3:\n\t"
- "mov.l %2,er0\n\t"
- "jsr @___down_trylock\n\t"
- "jmp @2f\n\t"
- LOCK_SECTION_END
- "1:\n\t"
- "ldc r3l,ccr\n\t"
- "sub.l %1,%1\n"
- "2:"
- : "=m" (*count),"=r"(count)
- : "g"(sem),"m"(*count)
- : "cc", "er1","er2", "er3");
- return (int)count;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- register atomic_t *count asm("er0");
-
- count = &(sem->count);
- __asm__ __volatile__(
- "stc ccr,r3l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %2,er1\n\t"
- "inc.l #1,er1\n\t"
- "mov.l er1,%0\n\t"
- "ldc r3l,ccr\n\t"
- "sub.l er2,er2\n\t"
- "cmp.l er2,er1\n\t"
- "bgt 1f\n\t"
- "mov.l %1,er0\n\t"
- "jsr @___up\n"
- "1:"
- : "=m"(*count)
- : "g"(sem),"m"(*count)
- : "cc", "er1", "er2", "er3");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index d8393d11288d..d9b2034ed1d2 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -1,99 +1 @@
-#ifndef _ASM_IA64_SEMAPHORE_H
-#define _ASM_IA64_SEMAPHORE_H
-
-/*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
- * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void
-sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void
-init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void
-init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down (struct semaphore * sem);
-extern int __down_interruptible (struct semaphore * sem);
-extern int __down_trylock (struct semaphore * sem);
-extern void __up (struct semaphore * sem);
-
-/*
- * Atomically decrement the semaphore's count. If it goes negative,
- * block the calling thread in the TASK_UNINTERRUPTIBLE state.
- */
-static inline void
-down (struct semaphore *sem)
-{
- might_sleep();
- if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
- __down(sem);
-}
-
-/*
- * Atomically decrement the semaphore's count. If it goes negative,
- * block the calling thread in the TASK_INTERRUPTIBLE state.
- */
-static inline int
-down_interruptible (struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int
-down_trylock (struct semaphore *sem)
-{
- int ret = 0;
-
- if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
- ret = __down_trylock(sem);
- return ret;
-}
-
-static inline void
-up (struct semaphore * sem)
-{
- if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
- __up(sem);
-}
-
-#endif /* _ASM_IA64_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index b5bf95a6f2b4..d9b2034ed1d2 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -1,144 +1 @@
-#ifndef _ASM_M32R_SEMAPHORE_H
-#define _ASM_M32R_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
- */
-
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-#include <asm/assembler.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * Atomically decrement the semaphore's count. If it goes negative,
- * block the calling thread in the TASK_UNINTERRUPTIBLE state.
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- __down(sem);
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int result = 0;
-
- might_sleep();
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- result = __down_interruptible(sem);
-
- return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- long count;
- int result = 0;
-
- local_irq_save(flags);
- __asm__ __volatile__ (
- "# down_trylock \n\t"
- DCACHE_CLEAR("%0", "r4", "%1")
- M32R_LOCK" %0, @%1; \n\t"
- "addi %0, #-1; \n\t"
- M32R_UNLOCK" %0, @%1; \n\t"
- : "=&r" (count)
- : "r" (&sem->count)
- : "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r4"
-#endif /* CONFIG_CHIP_M32700_TS1 */
- );
- local_irq_restore(flags);
-
- if (unlikely(count < 0))
- result = __down_trylock(sem);
-
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_M32R_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h
deleted file mode 100644
index eef30ba0b499..000000000000
--- a/include/asm-m68k/semaphore-helper.h
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef _M68K_SEMAPHORE_HELPER_H
-#define _M68K_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-#include <linux/errno.h>
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-#ifndef CONFIG_RMW_INSNS
-extern spinlock_t semaphore_wake_lock;
-#endif
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
-#ifndef CONFIG_RMW_INSNS
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
- int tmp1, tmp2;
-
- __asm__ __volatile__
- ("1: movel %1,%2\n"
- " jle 2f\n"
- " subql #1,%2\n"
- " casl %1,%2,%3\n"
- " jne 1b\n"
- " moveq #1,%0\n"
- "2:"
- : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
- : "m" (sem->waking), "0" (0), "1" (sem->waking));
-#endif
-
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret;
-#ifndef CONFIG_RMW_INSNS
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
- int tmp1, tmp2;
-
- __asm__ __volatile__
- ("1: movel %1,%2\n"
- " jle 2f\n"
- " subql #1,%2\n"
- " casl %1,%2,%3\n"
- " jne 1b\n"
- " moveq #1,%0\n"
- " jra %a4\n"
- "2:"
- : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
- : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
- if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
-next:
-#endif
-
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret;
-#ifndef CONFIG_RMW_INSNS
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 1;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 0;
- } else
- atomic_inc(&sem->count);
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-#else
- int tmp1, tmp2;
-
- __asm__ __volatile__
- ("1: movel %1,%2\n"
- " jle 2f\n"
- " subql #1,%2\n"
- " casl %1,%2,%3\n"
- " jne 1b\n"
- " moveq #0,%0\n"
- "2:"
- : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
- : "m" (sem->waking), "0" (1), "1" (sem->waking));
- if (ret)
- atomic_inc(&sem->count);
-#endif
- return ret;
-}
-
-#endif
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index 64d6b119bb0a..d9b2034ed1d2 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -1,163 +1 @@
-#ifndef _M68K_SEMAPHORE_H
-#define _M68K_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/stringify.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-struct semaphore {
- atomic_t count;
- atomic_t waking;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .waking = ATOMIC_INIT(0), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
-
- might_sleep();
- __asm__ __volatile__(
- "| atomic down operation\n\t"
- "subql #1,%0@\n\t"
- "jmi 2f\n\t"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\tpea 1b\n\t"
- "jbra __down_failed\n"
- LOCK_SECTION_END
- : /* no outputs */
- : "a" (sem1)
- : "memory");
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
- register int result __asm__ ("%d0");
-
- might_sleep();
- __asm__ __volatile__(
- "| atomic interruptible down operation\n\t"
- "subql #1,%1@\n\t"
- "jmi 2f\n\t"
- "clrl %0\n"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\tpea 1b\n\t"
- "jbra __down_failed_interruptible\n"
- LOCK_SECTION_END
- : "=d" (result)
- : "a" (sem1)
- : "memory");
- return result;
-}
-
-static inline int down_trylock(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
- register int result __asm__ ("%d0");
-
- __asm__ __volatile__(
- "| atomic down trylock operation\n\t"
- "subql #1,%1@\n\t"
- "jmi 2f\n\t"
- "clrl %0\n"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\tpea 1b\n\t"
- "jbra __down_failed_trylock\n"
- LOCK_SECTION_END
- : "=d" (result)
- : "a" (sem1)
- : "memory");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore *sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
-
- __asm__ __volatile__(
- "| atomic up operation\n\t"
- "addql #1,%0@\n\t"
- "jle 2f\n"
- "1:\n"
- LOCK_SECTION_START(".even\n\t")
- "2:\t"
- "pea 1b\n\t"
- "jbra __up_wakeup\n"
- LOCK_SECTION_END
- : /* no outputs */
- : "a" (sem1)
- : "memory");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h
deleted file mode 100644
index 43da7bc483c7..000000000000
--- a/include/asm-m68knommu/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef _M68K_SEMAPHORE_HELPER_H
-#define _M68K_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-/*
- * These two _must_ execute atomically wrt each other.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- */
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 0;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- */
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- ret = 1;
- if (atomic_read(&sem->waking) > 0) {
- atomic_dec(&sem->waking);
- ret = 0;
- } else
- atomic_inc(&sem->count);
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 5779eb6c0689..d9b2034ed1d2 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -1,153 +1 @@
-#ifndef _M68K_SEMAPHORE_H
-#define _M68K_SEMAPHORE_H
-
-#define RW_LOCK_BIAS 0x01000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-/*
- * Interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * m68k version by Andreas Schwab
- */
-
-
-struct semaphore {
- atomic_t count;
- atomic_t waking;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .waking = ATOMIC_INIT(0), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/m68k/lib/semaphore.S
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- __asm__ __volatile__(
- "| atomic down operation\n\t"
- "movel %0, %%a1\n\t"
- "lea %%pc@(1f), %%a0\n\t"
- "subql #1, %%a1@\n\t"
- "jmi __down_failed\n"
- "1:"
- : /* no outputs */
- : "g" (sem)
- : "cc", "%a0", "%a1", "memory");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret;
-
- might_sleep();
- __asm__ __volatile__(
- "| atomic down operation\n\t"
- "movel %1, %%a1\n\t"
- "lea %%pc@(1f), %%a0\n\t"
- "subql #1, %%a1@\n\t"
- "jmi __down_failed_interruptible\n\t"
- "clrl %%d0\n"
- "1: movel %%d0, %0\n"
- : "=d" (ret)
- : "g" (sem)
- : "cc", "%d0", "%a0", "%a1", "memory");
- return(ret);
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- register struct semaphore *sem1 __asm__ ("%a1") = sem;
- register int result __asm__ ("%d0");
-
- __asm__ __volatile__(
- "| atomic down trylock operation\n\t"
- "subql #1,%1@\n\t"
- "jmi 2f\n\t"
- "clrl %0\n"
- "1:\n"
- ".section .text.lock,\"ax\"\n"
- ".even\n"
- "2:\tpea 1b\n\t"
- "jbra __down_failed_trylock\n"
- ".previous"
- : "=d" (result)
- : "a" (sem1)
- : "memory");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- __asm__ __volatile__(
- "| atomic up operation\n\t"
- "movel %0, %%a1\n\t"
- "lea %%pc@(1f), %%a0\n\t"
- "addql #1, %%a1@\n\t"
- "jle __up_wakeup\n"
- "1:"
- : /* no outputs */
- : "g" (sem)
- : "cc", "%a0", "%a1", "memory");
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index fdf8042b784b..d9b2034ed1d2 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,108 +1 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle
- * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
- *
- * In all honesty, little of the old MIPS code left - the PPC64 variant was
- * just looking nice and portable so I ripped it. Credits to whoever wrote
- * it.
- */
-#ifndef __ASM_SEMAPHORE_H
-#define __ASM_SEMAPHORE_H
-
-/*
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- /*
- * Note that any negative value of count is equivalent to 0,
- * but additionally indicates that some process(es) might be
- * sleeping on `wait'.
- */
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- /*
- * Try to get the semaphore, take the slow path if we fail.
- */
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
-
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h
index 5a9e1ad0b253..d9b2034ed1d2 100644
--- a/include/asm-mn10300/semaphore.h
+++ b/include/asm-mn10300/semaphore.h
@@ -1,169 +1 @@
-/* MN10300 Semaphores
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_SEMAPHORE_H
-#define _ASM_SEMAPHORE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-
-#define SEMAPHORE_DEBUG 0
-
-/*
- * the semaphore definition
- * - if count is >0 then there are tokens available on the semaphore for down
- * to collect
- * - if count is <=0 then there are no spare tokens, and anyone that wants one
- * must wait
- * - if wait_list is not empty, then there are processes waiting for the
- * semaphore
- */
-struct semaphore {
- atomic_t count; /* it's not really atomic, it's
- * just that certain modules
- * expect to be able to access
- * it directly */
- spinlock_t wait_lock;
- struct list_head wait_list;
-#if SEMAPHORE_DEBUG
- unsigned __magic;
-#endif
-};
-
-#if SEMAPHORE_DEBUG
-# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-
-#define __SEMAPHORE_INITIALIZER(name, init_count) \
-{ \
- .count = ATOMIC_INIT(init_count), \
- .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- .wait_list = LIST_HEAD_INIT((name).wait_list) \
- __SEM_DEBUG_INIT(name) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
-
-static inline void sema_init(struct semaphore *sem, int val)
-{
- *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
-}
-
-static inline void init_MUTEX(struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED(struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore *sem, unsigned long flags);
-extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
-extern void __up(struct semaphore *sem);
-
-static inline void down(struct semaphore *sem)
-{
- unsigned long flags;
- int count;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- count = atomic_read(&sem->count);
- if (likely(count > 0)) {
- atomic_set(&sem->count, count - 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- } else {
- __down(sem, flags);
- }
-}
-
-static inline int down_interruptible(struct semaphore *sem)
-{
- unsigned long flags;
- int count, ret = 0;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- count = atomic_read(&sem->count);
- if (likely(count > 0)) {
- atomic_set(&sem->count, count - 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- } else {
- ret = __down_interruptible(sem, flags);
- }
- return ret;
-}
-
-/*
- * non-blockingly attempt to down() a semaphore.
- * - returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int count, success = 0;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- count = atomic_read(&sem->count);
- if (likely(count > 0)) {
- atomic_set(&sem->count, count - 1);
- success = 1;
- }
- spin_unlock_irqrestore(&sem->wait_lock, flags);
- return !success;
-}
-
-static inline void up(struct semaphore *sem)
-{
- unsigned long flags;
-
-#if SEMAPHORE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- spin_lock_irqsave(&sem->wait_lock, flags);
- if (!list_empty(&sem->wait_list))
- __up(sem);
- else
- atomic_set(&sem->count, atomic_read(&sem->count) + 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
- return atomic_read(&sem->count);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h
deleted file mode 100644
index 387f7c1277a2..000000000000
--- a/include/asm-parisc/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _ASM_PARISC_SEMAPHORE_HELPER_H
-#define _ASM_PARISC_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->waking);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
- atomic_inc(&sem->count);
- else {
- sem->waking--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index a16271cdc748..d9b2034ed1d2 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -1,145 +1 @@
-/* SMP- and interrupt-safe semaphores.
- * PA-RISC version by Matthew Wilcox
- *
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org >
- * Copyright (C) 2000 Grant Grundler < grundler a debian org >
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_PARISC_SEMAPHORE_H
-#define _ASM_PARISC_SEMAPHORE_H
-
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/system.h>
-
-/*
- * The `count' is initialised to the number of people who are allowed to
- * take the lock. (Normally we want a mutex, so this is `1'). if
- * `count' is positive, the lock can be taken. if it's 0, no-one is
- * waiting on it. if it's -1, at least one task is waiting.
- */
-struct semaphore {
- spinlock_t sentry;
- int count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .sentry = SPIN_LOCK_UNLOCKED, \
- .count = n, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-static inline int sem_getcount(struct semaphore *sem)
-{
- return sem->count;
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/* Semaphores can be `tried' from irq context. So we have to disable
- * interrupts while we're messing with the semaphore. Sorry.
- */
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- spin_lock_irq(&sem->sentry);
- if (sem->count > 0) {
- sem->count--;
- } else {
- __down(sem);
- }
- spin_unlock_irq(&sem->sentry);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
- might_sleep();
- spin_lock_irq(&sem->sentry);
- if (sem->count > 0) {
- sem->count--;
- } else {
- ret = __down_interruptible(sem);
- }
- spin_unlock_irq(&sem->sentry);
- return ret;
-}
-
-/*
- * down_trylock returns 0 on success, 1 if we failed to get the lock.
- * May not sleep, but must preserve irq state
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- unsigned long flags;
- int count;
-
- spin_lock_irqsave(&sem->sentry, flags);
- count = sem->count - 1;
- if (count >= 0)
- sem->count = count;
- spin_unlock_irqrestore(&sem->sentry, flags);
- return (count < 0);
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sem->sentry, flags);
- if (sem->count < 0) {
- __up(sem);
- } else {
- sem->count++;
- }
- spin_unlock_irqrestore(&sem->sentry, flags);
-}
-
-#endif /* _ASM_PARISC_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
index 48dd32e07749..d9b2034ed1d2 100644
--- a/include/asm-powerpc/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,94 +1 @@
-#ifndef _ASM_POWERPC_SEMAPHORE_H
-#define _ASM_POWERPC_SEMAPHORE_H
-
-/*
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- /*
- * Note that any negative value of count is equivalent to 0,
- * but additionally indicates that some process(es) might be
- * sleeping on `wait'.
- */
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- /*
- * Try to get the semaphore, take the slow path if we fail.
- */
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
-
- if (unlikely(atomic_dec_return(&sem->count) < 0))
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- return atomic_dec_if_positive(&sem->count) < 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
- if (unlikely(atomic_inc_return(&sem->count) <= 0))
- __up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_POWERPC_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 0e7001ad8392..d9b2034ed1d2 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -1,107 +1 @@
-/*
- * include/asm-s390/semaphore.h
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *
- * Derived from "include/asm-i386/semaphore.h"
- * (C) Copyright 1996 Linus Torvalds
- */
-
-#ifndef _S390_SEMAPHORE_H
-#define _S390_SEMAPHORE_H
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- /*
- * Note that any negative value of count is equivalent to 0,
- * but additionally indicates that some process(es) might be
- * sleeping on `wait'.
- */
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
- { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- int old_val, new_val;
-
- /*
- * This inline assembly atomically implements the equivalent
- * to the following C code:
- * old_val = sem->count.counter;
- * if ((new_val = old_val) > 0)
- * sem->count.counter = --new_val;
- * In the ppc code this is called atomic_dec_if_positive.
- */
- asm volatile(
- " l %0,0(%3)\n"
- "0: ltr %1,%0\n"
- " jle 1f\n"
- " ahi %1,-1\n"
- " cs %0,%1,0(%3)\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
- : "a" (&sem->count.counter), "m" (sem->count.counter)
- : "cc", "memory");
- return old_val <= 0;
-}
-
-static inline void up(struct semaphore * sem)
-{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h
deleted file mode 100644
index bd8230c369ca..000000000000
--- a/include/asm-sh/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef __ASM_SH_SEMAPHORE_HELPER_H
-#define __ASM_SH_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers <= 0)
- atomic_inc(&sem->count);
- else {
- sem->sleepers--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 9e5a37c4dce2..d9b2034ed1d2 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -1,115 +1 @@
-#ifndef __ASM_SH_SEMAPHORE_H
-#define __ASM_SH_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-/*
- * SMP- and interrupt-safe semaphores.
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * SuperH verison by Niibe Yutaka
- * (Currently no asm implementation but generic C code...)
- */
-
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/wait.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-#if 0
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-#endif
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- int ret = 0;
-
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_trylock(sem);
- return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
-
-#endif
-#endif /* __ASM_SH_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 8018f9f4d497..d9b2034ed1d2 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -1,192 +1 @@
-#ifndef _SPARC_SEMAPHORE_H
-#define _SPARC_SEMAPHORE_H
-
-/* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic24_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC24_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic24_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static inline void down(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- might_sleep();
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "bl 2f\n\t"
- " nop\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "ba 1b\n\t"
- " restore %%l5, %%g0, %%g5\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__down)
- : "g3", "g4", "g7", "memory", "cc");
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- might_sleep();
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "bl 2f\n\t"
- " clr %%g2\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "mov %%l5, %%g5\n\t"
- "ba 1b\n\t"
- " restore %%o0, %%g0, %%g2\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__down_interruptible)
- : "g3", "g4", "g7", "memory", "cc");
-
- return increment;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "bl 2f\n\t"
- " clr %%g2\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "mov %%l5, %%g5\n\t"
- "ba 1b\n\t"
- " restore %%o0, %%g0, %%g2\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__down_trylock)
- : "g3", "g4", "g7", "memory", "cc");
-
- return increment;
-}
-
-static inline void up(struct semaphore * sem)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
-
- ptr = &(sem->count.counter);
- increment = 1;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_add\n\t"
- " add %%o7, 8, %%o7\n\t"
- "tst %%g2\n\t"
- "ble 2f\n\t"
- " nop\n"
- "1:\n\t"
- ".subsection 2\n"
- "2:\n\t"
- "save %%sp, -64, %%sp\n\t"
- "mov %%g1, %%l1\n\t"
- "mov %%g5, %%l5\n\t"
- "call %3\n\t"
- " mov %%g1, %%o0\n\t"
- "mov %%l1, %%g1\n\t"
- "ba 1b\n\t"
- " restore %%l5, %%g0, %%g5\n\t"
- ".previous\n"
- : "=&r" (increment)
- : "0" (increment), "r" (ptr), "i" (__up)
- : "g3", "g4", "g7", "memory", "cc");
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_SPARC_SEMAPHORE_H) */
+#include <linux/semaphore.h>
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7f7c0c4e024f..d9b2034ed1d2 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -1,53 +1 @@
-#ifndef _SPARC64_SEMAPHORE_H
-#define _SPARC64_SEMAPHORE_H
-
-/* These are actually reasonable on the V9.
- *
- * See asm-ppc/semaphore.h for implementation commentary,
- * only sparc64 specific issues are commented here.
- */
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, count) \
- { ATOMIC_INIT(count), \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern void up(struct semaphore *sem);
-extern void down(struct semaphore *sem);
-extern int down_trylock(struct semaphore *sem);
-extern int down_interruptible(struct semaphore *sem);
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_SPARC64_SEMAPHORE_H) */
+#include <linux/semaphore.h>
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h
index ff13c34de421..d9b2034ed1d2 100644
--- a/include/asm-um/semaphore.h
+++ b/include/asm-um/semaphore.h
@@ -1,6 +1 @@
-#ifndef __UM_SEMAPHORE_H
-#define __UM_SEMAPHORE_H
-
-#include "asm/arch/semaphore.h"
-
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index 10ed0ccf37df..d9b2034ed1d2 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -1,84 +1 @@
-#ifndef __V850_SEMAPHORE_H__
-#define __V850_SEMAPHORE_H__
-
-#include <linux/linkage.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-#include <asm/atomic.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
- { ATOMIC_INIT (count), 0, \
- __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) }
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER (name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init (sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init (sem, 0);
-}
-
-/*
- * special register calling convention
- */
-asmlinkage void __down_failed (void);
-asmlinkage int __down_interruptible_failed (void);
-asmlinkage int __down_trylock_failed (void);
-asmlinkage void __up_wakeup (void);
-
-extern void __down (struct semaphore * sem);
-extern int __down_interruptible (struct semaphore * sem);
-extern int __down_trylock (struct semaphore * sem);
-extern void __up (struct semaphore * sem);
-
-static inline void down (struct semaphore * sem)
-{
- might_sleep();
- if (atomic_dec_return (&sem->count) < 0)
- __down (sem);
-}
-
-static inline int down_interruptible (struct semaphore * sem)
-{
- int ret = 0;
- might_sleep();
- if (atomic_dec_return (&sem->count) < 0)
- ret = __down_interruptible (sem);
- return ret;
-}
-
-static inline int down_trylock (struct semaphore *sem)
-{
- int ret = 0;
- if (atomic_dec_return (&sem->count) < 0)
- ret = __down_trylock (sem);
- return ret;
-}
-
-static inline void up (struct semaphore * sem)
-{
- if (atomic_inc_return (&sem->count) <= 0)
- __up (sem);
-}
-
-#endif /* __V850_SEMAPHORE_H__ */
+#include <linux/semaphore.h>
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
index 572c0b67a6b0..d9b2034ed1d2 100644
--- a/include/asm-x86/semaphore.h
+++ b/include/asm-x86/semaphore.h
@@ -1,5 +1 @@
-#ifdef CONFIG_X86_32
-# include "semaphore_32.h"
-#else
-# include "semaphore_64.h"
-#endif
+#include <linux/semaphore.h>
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h
deleted file mode 100644
index ac96d3804d0c..000000000000
--- a/include/asm-x86/semaphore_32.h
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef _I386_SEMAPHORE_H
-#define _I386_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
- * the original code and to make semaphore waits
- * interruptible so that processes waiting on
- * semaphores can be killed.
- * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
- * functions in asm/sempahore-helper.h while fixing a
- * potential and subtle race discovered by Ulrich Schmid
- * in down_interruptible(). Since I started to play here I
- * also implemented the `trylock' semaphore operation.
- * 1999-07-02 Artur Skawina <skawina@geocities.com>
- * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
- * do this). Changed calling sequences from push/jmp to
- * traditional call/ret.
- * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
- * Some hacks to ensure compatibility with recent
- * GCC snapshots, to avoid stack corruption when compiling
- * with -fomit-frame-pointer. It's not sure if this will
- * be fixed in GCC, as our previous implementation was a
- * bit dubious.
- *
- * If you would like to see an analysis of this implementation, please
- * ftp to gcom.com and download the file
- * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
- *
- */
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-extern asmregparm void __down_failed(atomic_t *count_ptr);
-extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
-extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
-extern asmregparm void __up_wakeup(atomic_t *count_ptr);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/i386/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
- __asm__ __volatile__(
- "# atomic down operation\n\t"
- LOCK_PREFIX "decl %0\n\t" /* --sem->count */
- "jns 2f\n"
- "\tlea %0,%%eax\n\t"
- "call __down_failed\n"
- "2:"
- :"+m" (sem->count)
- :
- :"memory","ax");
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int result;
-
- might_sleep();
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "lea %1,%%eax\n\t"
- "call __down_failed_interruptible\n"
- "2:"
- :"=&a" (result), "+m" (sem->count)
- :
- :"memory");
- return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- int result;
-
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "lea %1,%%eax\n\t"
- "call __down_failed_trylock\n\t"
- "2:\n"
- :"=&a" (result), "+m" (sem->count)
- :
- :"memory");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
- __asm__ __volatile__(
- "# atomic up operation\n\t"
- LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
- "jg 1f\n\t"
- "lea %0,%%eax\n\t"
- "call __up_wakeup\n"
- "1:"
- :"+m" (sem->count)
- :
- :"memory","ax");
-}
-
-#endif
-#endif
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h
deleted file mode 100644
index 79694306bf7d..000000000000
--- a/include/asm-x86/semaphore_64.h
+++ /dev/null
@@ -1,180 +0,0 @@
-#ifndef _X86_64_SEMAPHORE_H
-#define _X86_64_SEMAPHORE_H
-
-#include <linux/linkage.h>
-
-#ifdef __KERNEL__
-
-/*
- * SMP- and interrupt-safe semaphores..
- *
- * (C) Copyright 1996 Linus Torvalds
- *
- * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
- * the original code and to make semaphore waits
- * interruptible so that processes waiting on
- * semaphores can be killed.
- * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
- * functions in asm/sempahore-helper.h while fixing a
- * potential and subtle race discovered by Ulrich Schmid
- * in down_interruptible(). Since I started to play here I
- * also implemented the `trylock' semaphore operation.
- * 1999-07-02 Artur Skawina <skawina@geocities.com>
- * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
- * do this). Changed calling sequences from push/jmp to
- * traditional call/ret.
- * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
- * Some hacks to ensure compatibility with recent
- * GCC snapshots, to avoid stack corruption when compiling
- * with -fomit-frame-pointer. It's not sure if this will
- * be fixed in GCC, as our previous implementation was a
- * bit dubious.
- *
- * If you would like to see an analysis of this implementation, please
- * ftp to gcom.com and download the file
- * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
- *
- */
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <asm/rwlock.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-#include <linux/stringify.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-/*
- * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
- *
- * i'd rather use the more flexible initialization above, but sadly
- * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
- */
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage int __down_failed_trylock(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_failed" is a special asm handler that calls the C
- * routine that actually waits. See arch/x86_64/kernel/semaphore.c
- */
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- __asm__ __volatile__(
- "# atomic down operation\n\t"
- LOCK_PREFIX "decl %0\n\t" /* --sem->count */
- "jns 1f\n\t"
- "call __down_failed\n"
- "1:"
- :"=m" (sem->count)
- :"D" (sem)
- :"memory");
-}
-
-/*
- * Interruptible try to acquire a semaphore. If we obtained
- * it, return zero. If we were interrupted, returns -EINTR
- */
-static inline int down_interruptible(struct semaphore * sem)
-{
- int result;
-
- might_sleep();
-
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "call __down_failed_interruptible\n"
- "2:\n"
- :"=&a" (result), "=m" (sem->count)
- :"D" (sem)
- :"memory");
- return result;
-}
-
-/*
- * Non-blockingly attempt to down() a semaphore.
- * Returns zero if we acquired it
- */
-static inline int down_trylock(struct semaphore * sem)
-{
- int result;
-
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "call __down_failed_trylock\n\t"
- "2:\n"
- :"=&a" (result), "=m" (sem->count)
- :"D" (sem)
- :"memory","cc");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-static inline void up(struct semaphore * sem)
-{
- __asm__ __volatile__(
- "# atomic up operation\n\t"
- LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
- "jg 1f\n\t"
- "call __up_wakeup\n"
- "1:"
- :"=m" (sem->count)
- :"D" (sem)
- :"memory");
-}
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index 3e04167cd9dc..d9b2034ed1d2 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -1,99 +1 @@
-/*
- * linux/include/asm-xtensa/semaphore.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SEMAPHORE_H
-#define _XTENSA_SEMAPHORE_H
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
- atomic_t count;
- int sleepers;
- wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name,n) \
-{ \
- .count = ATOMIC_INIT(n), \
- .sleepers = 0, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-}
-
-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
- atomic_set(&sem->count, val);
- sem->sleepers = 0;
- init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
- sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
- sema_init(sem, 0);
-}
-
-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
-
-extern spinlock_t semaphore_wake_lock;
-
-static inline void down(struct semaphore * sem)
-{
- might_sleep();
-
- if (atomic_sub_return(1, &sem->count) < 0)
- __down(sem);
-}
-
-static inline int down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
-
- might_sleep();
-
- if (atomic_sub_return(1, &sem->count) < 0)
- ret = __down_interruptible(sem);
- return ret;
-}
-
-static inline int down_trylock(struct semaphore * sem)
-{
- int ret = 0;
-
- if (atomic_sub_return(1, &sem->count) < 0)
- ret = __down_trylock(sem);
- return ret;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- */
-static inline void up(struct semaphore * sem)
-{
- if (atomic_add_return(1, &sem->count) <= 0)
- __up(sem);
-}
-
-#endif /* _XTENSA_SEMAPHORE_H */
+#include <linux/semaphore.h>
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
new file mode 100644
index 000000000000..b3c691b089b2
--- /dev/null
+++ b/include/linux/semaphore.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ * Author: Matthew Wilcox <willy@linux.intel.com>
+ *
+ * Distributed under the terms of the GNU GPL, version 2
+ *
+ * Counting semaphores allow up to <n> tasks to acquire the semaphore
+ * simultaneously.
+ */
+#ifndef __LINUX_SEMAPHORE_H
+#define __LINUX_SEMAPHORE_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * The spinlock controls access to the other members of the semaphore.
+ * 'count' is decremented by every task which calls down*() and incremented
+ * by every call to up(). Thus, if it is positive, it indicates how many
+ * more tasks may acquire the lock. If it is negative, it indicates how
+ * many tasks are waiting for the lock. Tasks waiting for the lock are
+ * kept on the wait_list.
+ */
+struct semaphore {
+ spinlock_t lock;
+ int count;
+ struct list_head wait_list;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n) \
+{ \
+ .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+ .count = n, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+}
+
+#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
+
+static inline void sema_init(struct semaphore *sem, int val)
+{
+ static struct lock_class_key __key;
+ *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
+ lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
+}
+
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
+
+/*
+ * Attempt to acquire the semaphore. If another task is already holding the
+ * semaphore, sleep until the semaphore is released.
+ */
+extern void down(struct semaphore *sem);
+
+/*
+ * As down(), except the sleep may be interrupted by a signal. If it is,
+ * this function will return -EINTR.
+ */
+extern int __must_check down_interruptible(struct semaphore *sem);
+
+/*
+ * As down(), except this function will not sleep. It will return 0 if it
+ * acquired the semaphore and 1 if the semaphore was contended. This
+ * function may be called from any context, including interrupt and softirq.
+ */
+extern int __must_check down_trylock(struct semaphore *sem);
+
+/*
+ * Release the semaphore. Unlike mutexes, up() may be called from any
+ * context and even by tasks which have never called down().
+ */
+extern void up(struct semaphore *sem);
+
+#endif /* __LINUX_SEMAPHORE_H */