summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/atomic.h2
-rw-r--r--include/asm-alpha/cache.h1
-rw-r--r--include/asm-alpha/compiler.h2
-rw-r--r--include/asm-alpha/dma-mapping.h2
-rw-r--r--include/asm-alpha/futex.h49
-rw-r--r--include/asm-alpha/hardirq.h2
-rw-r--r--include/asm-alpha/mman.h1
-rw-r--r--include/asm-alpha/mutex.h9
-rw-r--r--include/asm-alpha/processor.h21
9 files changed, 16 insertions, 73 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 6183eab006d4..fc77f7413083 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -176,6 +176,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -216,4 +217,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#include <asm-generic/atomic.h>
#endif /* _ALPHA_ATOMIC_H */
diff --git a/include/asm-alpha/cache.h b/include/asm-alpha/cache.h
index e69b29501a5f..e6d4d1695e25 100644
--- a/include/asm-alpha/cache.h
+++ b/include/asm-alpha/cache.h
@@ -20,6 +20,5 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
#endif
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
index 0a4a8b40dfcd..00c6f57ad9a7 100644
--- a/include/asm-alpha/compiler.h
+++ b/include/asm-alpha/compiler.h
@@ -98,9 +98,7 @@
#undef inline
#undef __inline__
#undef __inline
-#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
-#endif
#endif /* __ALPHA_COMPILER_H */
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index 680f7ecbb28f..9dc7256cf979 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -16,7 +16,7 @@
#define dma_free_coherent(dev, size, va, addr) \
pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr)
#define dma_map_page(dev, page, off, size, dir) \
- pci_map_single(alpha_gendev_to_pci(dev), page, off, size, dir)
+ pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir)
#define dma_unmap_page(dev, addr, size, dir) \
pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir)
#define dma_map_sg(dev, sg, nents, dir) \
diff --git a/include/asm-alpha/futex.h b/include/asm-alpha/futex.h
index 9feff4ce1424..6a332a9f099c 100644
--- a/include/asm-alpha/futex.h
+++ b/include/asm-alpha/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index c0593f9b21e1..7bb6a36c96a1 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -13,6 +13,8 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+void ack_bad_irq(unsigned int irq);
+
#define HARDIRQ_BITS 12
/*
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
index eb9c279045ef..f6439532a262 100644
--- a/include/asm-alpha/mman.h
+++ b/include/asm-alpha/mman.h
@@ -42,6 +42,7 @@
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_SPACEAVAIL 5 /* ensure resources are available */
#define MADV_DONTNEED 6 /* don't need these pages */
+#define MADV_REMOVE 7 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-alpha/mutex.h b/include/asm-alpha/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-alpha/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 059780a7d3d7..bb1a7a3abb8b 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -77,7 +77,6 @@ unsigned long get_wchan(struct task_struct *p);
#define spin_lock_prefetch(lock) do { } while (0)
#endif
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
extern inline void prefetch(const void *ptr)
{
__builtin_prefetch(ptr, 0, 3);
@@ -95,24 +94,4 @@ extern inline void spin_lock_prefetch(const void *ptr)
}
#endif
-#else
-extern inline void prefetch(const void *ptr)
-{
- __asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
-}
-
-extern inline void prefetchw(const void *ptr)
-{
- __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
-}
-
-#ifdef CONFIG_SMP
-extern inline void spin_lock_prefetch(const void *ptr)
-{
- __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
-}
-#endif
-
-#endif /* GCC 3.1 */
-
#endif /* __ASM_ALPHA_PROCESSOR_H */