summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/clockchips.h5
-rw-r--r--kernel/time/tick-broadcast.c39
2 files changed, 36 insertions, 8 deletions
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 66346521cb65..494d33ea78f8 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -55,6 +55,11 @@ enum clock_event_nofitiers {
#define CLOCK_EVT_FEAT_C3STOP 0x000008
#define CLOCK_EVT_FEAT_DUMMY 0x000010
+/*
+ * Core shall set the interrupt affinity dynamically in broadcast mode
+ */
+#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
+
/**
* struct clock_event_device - clock event device descriptor
* @event_handler: Assigned by the framework to be called by the low
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 70dd98ce18d7..380910db7157 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -401,13 +401,34 @@ struct cpumask *tick_get_broadcast_oneshot_mask(void)
return tick_broadcast_oneshot_mask;
}
-static int tick_broadcast_set_event(struct clock_event_device *bc,
+/*
+ * Set broadcast interrupt affinity
+ */
+static void tick_broadcast_set_affinity(struct clock_event_device *bc,
+ const struct cpumask *cpumask)
+{
+ if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
+ return;
+
+ if (cpumask_equal(bc->cpumask, cpumask))
+ return;
+
+ bc->cpumask = cpumask;
+ irq_set_affinity(bc->irq, bc->cpumask);
+}
+
+static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
ktime_t expires, int force)
{
+ int ret;
+
if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
- return clockevents_program_event(bc, expires, force);
+ ret = clockevents_program_event(bc, expires, force);
+ if (!ret)
+ tick_broadcast_set_affinity(bc, cpumask_of(cpu));
+ return ret;
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -436,7 +457,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
ktime_t now, next_event;
- int cpu;
+ int cpu, next_cpu = 0;
raw_spin_lock(&tick_broadcast_lock);
again:
@@ -447,10 +468,12 @@ again:
/* Find all expired events */
for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
td = &per_cpu(tick_cpu_device, cpu);
- if (td->evtdev->next_event.tv64 <= now.tv64)
+ if (td->evtdev->next_event.tv64 <= now.tv64) {
cpumask_set_cpu(cpu, tmpmask);
- else if (td->evtdev->next_event.tv64 < next_event.tv64)
+ } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
next_event.tv64 = td->evtdev->next_event.tv64;
+ next_cpu = cpu;
+ }
}
/*
@@ -473,7 +496,7 @@ again:
* Rearm the broadcast device. If event expired,
* repeat the above
*/
- if (tick_broadcast_set_event(dev, next_event, 0))
+ if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
goto again;
}
raw_spin_unlock(&tick_broadcast_lock);
@@ -515,7 +538,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
if (dev->next_event.tv64 < bc->next_event.tv64)
- tick_broadcast_set_event(bc, dev->next_event, 1);
+ tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
}
} else {
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
@@ -581,7 +604,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
tick_broadcast_init_next_event(tmpmask,
tick_next_period);
- tick_broadcast_set_event(bc, tick_next_period, 1);
+ tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
} else {