summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/irq_ia64.c27
-rw-r--r--arch/ia64/kernel/smp.c68
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c65
3 files changed, 150 insertions, 10 deletions
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 456f57b087ca..9a5f41be760b 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -39,6 +39,7 @@
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/system.h>
+#include <asm/tlbflush.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
@@ -127,8 +128,10 @@ void destroy_irq(unsigned int irq)
#ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
+# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
#else
# define IS_RESCHEDULE(vec) (0)
+# define IS_LOCAL_TLB_FLUSH(vec) (0)
#endif
/*
* That's where the IVT branches when we get an external
@@ -180,8 +183,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
- if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
+ if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
+ smp_local_flush_tlb();
+ kstat_this_cpu.irqs[vector]++;
+ } else if (unlikely(IS_RESCHEDULE(vector)))
+ kstat_this_cpu.irqs[vector]++;
else {
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
@@ -227,8 +233,11 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
- if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
+ if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
+ smp_local_flush_tlb();
+ kstat_this_cpu.irqs[vector]++;
+ } else if (unlikely(IS_RESCHEDULE(vector)))
+ kstat_this_cpu.irqs[vector]++;
else {
struct pt_regs *old_regs = set_irq_regs(NULL);
@@ -260,12 +269,12 @@ void ia64_process_pending_intr(void)
#ifdef CONFIG_SMP
-extern irqreturn_t handle_IPI (int irq, void *dev_id);
static irqreturn_t dummy_handler (int irq, void *dev_id)
{
BUG();
}
+extern irqreturn_t handle_IPI (int irq, void *dev_id);
static struct irqaction ipi_irqaction = {
.handler = handle_IPI,
@@ -278,6 +287,13 @@ static struct irqaction resched_irqaction = {
.flags = IRQF_DISABLED,
.name = "resched"
};
+
+static struct irqaction tlb_irqaction = {
+ .handler = dummy_handler,
+ .flags = SA_INTERRUPT,
+ .name = "tlb_flush"
+};
+
#endif
void
@@ -303,6 +319,7 @@ init_IRQ (void)
#ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
+ register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
#endif
#ifdef CONFIG_PERFMON
pfm_init_percpu();
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 55ddd809b02d..221de3804560 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -50,6 +50,18 @@
#include <asm/mca.h>
/*
+ * Note: alignment of 4 entries/cacheline was empirically determined
+ * to be a good tradeoff between hot cachelines & spreading the array
+ * across too many cacheline.
+ */
+static struct local_tlb_flush_counts {
+ unsigned int count;
+} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
+
+static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+
+
+/*
* Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner.
*/
@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+/*
+ * Called with preeemption disabled.
+ */
+static void
+smp_send_local_flush_tlb (int cpu)
+{
+ platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_local_flush_tlb(void)
+{
+ /*
+ * Use atomic ops. Otherwise, the load/increment/store sequence from
+ * a "++" operation can have the line stolen between the load & store.
+ * The overhead of the atomic op in negligible in this case & offers
+ * significant benefit for the brief periods where lots of cpus
+ * are simultaneously flushing TLBs.
+ */
+ ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
+ local_flush_tlb_all();
+}
+
+#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
+
+void
+smp_flush_tlb_cpumask(cpumask_t xcpumask)
+{
+ unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
+ cpumask_t cpumask = xcpumask;
+ int mycpu, cpu, flush_mycpu = 0;
+
+ preempt_disable();
+ mycpu = smp_processor_id();
+
+ for_each_cpu_mask(cpu, cpumask)
+ counts[cpu] = local_tlb_flush_counts[cpu].count;
+
+ mb();
+ for_each_cpu_mask(cpu, cpumask) {
+ if (cpu == mycpu)
+ flush_mycpu = 1;
+ else
+ smp_send_local_flush_tlb(cpu);
+ }
+
+ if (flush_mycpu)
+ smp_local_flush_tlb();
+
+ for_each_cpu_mask(cpu, cpumask)
+ while(counts[cpu] == local_tlb_flush_counts[cpu].count)
+ udelay(FLUSH_DELAY);
+
+ preempt_enable();
+}
+
void
smp_flush_tlb_all (void)
{
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 601747b1e22a..5d318b579fb1 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -46,6 +46,9 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
+/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
+static int sn2_flush_opt = 0;
+
extern unsigned long
sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long,
@@ -76,6 +79,8 @@ struct ptc_stats {
unsigned long shub_itc_clocks;
unsigned long shub_itc_clocks_max;
unsigned long shub_ptc_flushes_not_my_mm;
+ unsigned long shub_ipi_flushes;
+ unsigned long shub_ipi_flushes_itc_clocks;
};
#define sn2_ptctest 0
@@ -121,6 +126,18 @@ void sn_tlb_migrate_finish(struct mm_struct *mm)
flush_tlb_mm(mm);
}
+static void
+sn2_ipi_flush_all_tlb(struct mm_struct *mm)
+{
+ unsigned long itc;
+
+ itc = ia64_get_itc();
+ smp_flush_tlb_cpumask(mm->cpu_vm_mask);
+ itc = ia64_get_itc() - itc;
+ __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
+ __get_cpu_var(ptcstats).shub_ipi_flushes++;
+}
+
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @mm: mm_struct containing virtual address range
@@ -154,7 +171,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
short nasids[MAX_NUMNODES], nix;
nodemask_t nodes_flushed;
- int active, max_active, deadlock;
+ int active, max_active, deadlock, flush_opt = sn2_flush_opt;
+
+ if (flush_opt > 2) {
+ sn2_ipi_flush_all_tlb(mm);
+ return;
+ }
nodes_clear(nodes_flushed);
i = 0;
@@ -189,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
return;
}
+ if (flush_opt == 2) {
+ sn2_ipi_flush_all_tlb(mm);
+ preempt_enable();
+ return;
+ }
+
itc = ia64_get_itc();
nix = 0;
for_each_node_mask(cnode, nodes_flushed)
@@ -256,6 +284,8 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
}
if (active >= max_active || i == (nix - 1)) {
if ((deadlock = wait_piowc())) {
+ if (flush_opt == 1)
+ goto done;
sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
if (reset_max_active_on_deadlock())
max_active = 1;
@@ -267,6 +297,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
start += (1UL << nbits);
} while (start < end);
+done:
itc2 = ia64_get_itc() - itc2;
__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
@@ -279,6 +310,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
+ if (flush_opt == 1 && deadlock) {
+ __get_cpu_var(ptcstats).deadlocks++;
+ sn2_ipi_flush_all_tlb(mm);
+ }
+
preempt_enable();
}
@@ -425,24 +461,42 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
if (!cpu) {
seq_printf(file,
- "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
- seq_printf(file, "# ptctest %d\n", sn2_ptctest);
+ "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
+ seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
}
if (cpu < NR_CPUS && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
+ seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
- stat->deadlocks2);
+ stat->deadlocks2,
+ stat->shub_ipi_flushes,
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
}
return 0;
}
+static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
+{
+ int cpu;
+ char optstr[64];
+
+ if (copy_from_user(optstr, user, count))
+ return -EFAULT;
+ optstr[count - 1] = '\0';
+ sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
+
+ for_each_online_cpu(cpu)
+ memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
+
+ return count;
+}
+
static struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next,
@@ -458,6 +512,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
static const struct file_operations proc_sn2_ptc_operations = {
.open = sn2_ptc_proc_open,
.read = seq_read,
+ .write = sn2_ptc_proc_write,
.llseek = seq_lseek,
.release = seq_release,
};