summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-01-23 11:03:31 +0900
committerTejun Heo <tj@kernel.org>2009-01-23 11:03:31 +0900
commit658a9a2c34914e8114eea9c4d85d4ecd3ee33b98 (patch)
treeaa5c9db51ee01db9f961ebe69a944cecb72d7945
parent3819cd489ec5d18a4cbd2f05acdc516473caa105 (diff)
downloadlinux-658a9a2c34914e8114eea9c4d85d4ecd3ee33b98.tar.gz
linux-658a9a2c34914e8114eea9c4d85d4ecd3ee33b98.tar.bz2
linux-658a9a2c34914e8114eea9c4d85d4ecd3ee33b98.zip
x86: sync hardirq_{32,64}.h
Impact: better code generation and removal of unused field for 32bit In general, use the 64-bit version. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/include/asm/hardirq_32.h14
-rw-r--r--arch/x86/include/asm/hardirq_64.h10
2 files changed, 15 insertions, 9 deletions
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
index a70ed050fdef..e5a332c28c98 100644
--- a/arch/x86/include/asm/hardirq_32.h
+++ b/arch/x86/include/asm/hardirq_32.h
@@ -14,6 +14,7 @@ typedef struct {
unsigned int irq_tlb_count;
unsigned int irq_thermal_count;
unsigned int irq_spurious_count;
+ unsigned int irq_threshold_count;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
@@ -22,11 +23,16 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
-#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
+#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
-void ack_bad_irq(unsigned int irq);
-#include <linux/irq_cpustat.h>
+#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
+
+extern void ack_bad_irq(unsigned int irq);
#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
index 873c3c7bffcc..392e7d614579 100644
--- a/arch/x86/include/asm/hardirq_64.h
+++ b/arch/x86/include/asm/hardirq_64.h
@@ -22,16 +22,16 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
-#define __ARCH_IRQ_STAT 1
+#define __ARCH_IRQ_STAT
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
-#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
+#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
-#define __ARCH_SET_SOFTIRQ_PENDING 1
+#define __ARCH_SET_SOFTIRQ_PENDING
-#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
-#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);