summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/bitops.h133
-rw-r--r--include/asm-alpha/fpu.h4
-rw-r--r--include/asm-alpha/io.h84
-rw-r--r--include/asm-alpha/mmu_context.h5
-rw-r--r--include/asm-alpha/mmzone.h19
-rw-r--r--include/asm-alpha/page.h4
-rw-r--r--include/asm-alpha/poll.h4
-rw-r--r--include/asm-alpha/topology.h4
8 files changed, 24 insertions, 233 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 302201f1a097..3f88715e811e 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -261,7 +261,7 @@ static inline unsigned long ffz_b(unsigned long x)
static inline unsigned long ffz(unsigned long word)
{
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
return __kernel_cttz(~word);
#else
@@ -281,7 +281,7 @@ static inline unsigned long ffz(unsigned long word)
*/
static inline unsigned long __ffs(unsigned long word)
{
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
return __kernel_cttz(word);
#else
@@ -313,20 +313,20 @@ static inline int ffs(int word)
/*
* fls: find last bit set.
*/
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
static inline int fls(int word)
{
return 64 - __kernel_ctlz(word & 0xffffffff);
}
#else
-#define fls generic_fls
+#include <asm-generic/bitops/fls.h>
#endif
-#define fls64 generic_fls64
+#include <asm-generic/bitops/fls64.h>
/* Compute powers of two for the given integer. */
static inline long floor_log2(unsigned long word)
{
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
return 63 - __kernel_ctlz(word);
#else
long bit;
@@ -347,7 +347,7 @@ static inline long ceil_log2(unsigned long word)
* of bits set) of a N-bit word
*/
-#if defined(__alpha_cix__) && defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
static inline unsigned long hweight64(unsigned long w)
{
@@ -358,112 +358,12 @@ static inline unsigned long hweight64(unsigned long w)
#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
#else
-static inline unsigned long hweight64(unsigned long w)
-{
- unsigned long result;
- for (result = 0; w ; w >>= 1)
- result += (w & 1);
- return result;
-}
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#endif
#endif /* __KERNEL__ */
-/*
- * Find next zero bit in a bitmap reasonably efficiently..
- */
-static inline unsigned long
-find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- p += offset >> 6;
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (64-offset);
- if (size < 64)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
- found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
- found_middle:
- return result + ffz(tmp);
-}
-
-/*
- * Find next one bit in a bitmap reasonably efficiently.
- */
-static inline unsigned long
-find_next_bit(const void * addr, unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- unsigned long result = offset & ~63UL;
- unsigned long tmp;
-
- p += offset >> 6;
- if (offset >= size)
- return size;
- size -= result;
- offset &= 63UL;
- if (offset) {
- tmp = *(p++);
- tmp &= ~0UL << offset;
- if (size < 64)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 64;
- result += 64;
- }
- while (size & ~63UL) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += 64;
- size -= 64;
- }
- if (!size)
- return result;
- tmp = *p;
- found_first:
- tmp &= ~0UL >> (64 - size);
- if (!tmp)
- return result + size;
- found_middle:
- return result + __ffs(tmp);
-}
-
-/*
- * The optimizer actually does good code for this case.
- */
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
+#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
@@ -487,21 +387,12 @@ sched_find_first_bit(unsigned long b[3])
return __ffs(b0) + ofs;
}
+#include <asm-generic/bitops/ext2-non-atomic.h>
-#define ext2_set_bit __test_and_set_bit
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit __test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
index c203fc2fa5cd..ecb17a72acc3 100644
--- a/include/asm-alpha/fpu.h
+++ b/include/asm-alpha/fpu.h
@@ -130,7 +130,7 @@ rdfpcr(void)
{
unsigned long tmp, ret;
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
__asm__ __volatile__ (
"ftoit $f0,%0\n\t"
"mf_fpcr $f0\n\t"
@@ -154,7 +154,7 @@ wrfpcr(unsigned long val)
{
unsigned long tmp;
-#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
__asm__ __volatile__ (
"ftoit $f0,%0\n\t"
"itoft %1,$f0\n\t"
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 871dd7ad909d..3ebbeee753e9 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -534,9 +534,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#define eth_io_copy_and_sum(skb,src,len,unused) \
memcpy_fromio((skb)->data,src,len)
-#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
- isa_memcpy_fromio((skb)->data,src,len)
-
static inline int
check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length)
@@ -550,87 +547,6 @@ check_signature(const volatile void __iomem *io_addr,
return 1;
}
-
-/*
- * ISA space is mapped to some machine-specific location on Alpha.
- * Call into the existing hooks to get the address translated.
- */
-
-static inline u8
-isa_readb(unsigned long offset)
-{
- void __iomem *addr = ioremap(offset, 1);
- u8 ret = readb(addr);
- iounmap(addr);
- return ret;
-}
-
-static inline u16
-isa_readw(unsigned long offset)
-{
- void __iomem *addr = ioremap(offset, 2);
- u16 ret = readw(addr);
- iounmap(addr);
- return ret;
-}
-
-static inline u32
-isa_readl(unsigned long offset)
-{
- void __iomem *addr = ioremap(offset, 2);
- u32 ret = readl(addr);
- iounmap(addr);
- return ret;
-}
-
-static inline void
-isa_writeb(u8 b, unsigned long offset)
-{
- void __iomem *addr = ioremap(offset, 2);
- writeb(b, addr);
- iounmap(addr);
-}
-
-static inline void
-isa_writew(u16 w, unsigned long offset)
-{
- void __iomem *addr = ioremap(offset, 2);
- writew(w, addr);
- iounmap(addr);
-}
-
-static inline void
-isa_writel(u32 l, unsigned long offset)
-{
- void __iomem *addr = ioremap(offset, 2);
- writel(l, addr);
- iounmap(addr);
-}
-
-static inline void
-isa_memset_io(unsigned long offset, u8 val, long n)
-{
- void __iomem *addr = ioremap(offset, n);
- memset_io(addr, val, n);
- iounmap(addr);
-}
-
-static inline void
-isa_memcpy_fromio(void *dest, unsigned long offset, long n)
-{
- void __iomem *addr = ioremap(offset, n);
- memcpy_fromio(dest, addr, n);
- iounmap(addr);
-}
-
-static inline void
-isa_memcpy_toio(unsigned long offset, const void *src, long n)
-{
- void __iomem *addr = ioremap(offset, n);
- memcpy_toio(addr, src, n);
- iounmap(addr);
-}
-
/*
* The Alpha Jensen hardware for some rather strange reason puts
* the RTC clock at 0x170 instead of 0x70. Probably due to some
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 6f92482cc96c..0c017fc181c1 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- mm->context[i] = 0;
+ for_each_online_cpu(i)
+ mm->context[i] = 0;
if (tsk != current)
task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h
index a011ef4cf3d3..192d80c875b0 100644
--- a/include/asm-alpha/mmzone.h
+++ b/include/asm-alpha/mmzone.h
@@ -59,9 +59,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define local_mapnr(kvaddr) \
- ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
-
/*
* Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
* and returns the kaddr corresponding to first physical page in the
@@ -86,8 +83,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
pte_t pte; \
unsigned long pfn; \
\
- pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
- pfn += page_zone(page)->zone_start_pfn << 32; \
+ pfn = page_to_pfn(page) << 32; \
pte_val(pte) = pfn | pgprot_val(pgprot); \
\
pte; \
@@ -104,19 +100,8 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
__xx; \
})
-#define pfn_to_page(pfn) \
-({ \
- unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \
- (NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \
-})
-
-#define page_to_pfn(page) \
- ((page) - page_zone(page)->zone_mem_map + \
- (page_zone(page)->zone_start_pfn))
-
#define page_to_pa(page) \
- ((( (page) - page_zone(page)->zone_mem_map ) \
- + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
+ (page_to_pfn(page) << PAGE_SHIFT)
#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
#define pfn_valid(pfn) \
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index fa0b41b164a7..61bcf70b5eac 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -85,8 +85,6 @@ typedef unsigned long pgprot_t;
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#ifndef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn) (mem_map + (pfn))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
@@ -95,9 +93,9 @@ typedef unsigned long pgprot_t;
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
#endif /* __KERNEL__ */
+#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
#endif /* _ALPHA_PAGE_H */
diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h
index 34f333b762a0..76f89356b6a7 100644
--- a/include/asm-alpha/poll.h
+++ b/include/asm-alpha/poll.h
@@ -12,7 +12,9 @@
#define POLLWRNORM (1 << 8)
#define POLLWRBAND (1 << 9)
#define POLLMSG (1 << 10)
-#define POLLREMOVE (1 << 11)
+#define POLLREMOVE (1 << 12)
+#define POLLRDHUP (1 << 13)
+
struct pollfd {
int fd;
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
index eb740e280d9c..420ccde6b916 100644
--- a/include/asm-alpha/topology.h
+++ b/include/asm-alpha/topology.h
@@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node)
cpumask_t node_cpu_mask = CPU_MASK_NONE;
int cpu;
- for(cpu = 0; cpu < NR_CPUS; cpu++) {
- if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
+ for_each_online_cpu(cpu) {
+ if (cpu_to_node(cpu) == node)
cpu_set(cpu, node_cpu_mask);
}