summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-03-31 20:49:00 +0530
committerIngo Molnar <mingo@kernel.org>2015-04-02 17:46:00 +0200
commitb337a9380f7effd60d082569dd7e0b97a7549730 (patch)
tree2cf1f6d3e69a57782d3d0ed71533aace12e00150
parent345527b1edce8df719e0884500c76832a18211c3 (diff)
downloadlinux-b337a9380f7effd60d082569dd7e0b97a7549730.tar.gz
linux-b337a9380f7effd60d082569dd7e0b97a7549730.tar.bz2
linux-b337a9380f7effd60d082569dd7e0b97a7549730.zip
timer: Allocate per-cpu tvec_base's statically
Memory for the 'tvec_base' array is allocated separately for the boot CPU (statically) and non-boot CPUs (dynamically). The reason is because __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've made NULL special, hint: lock_timer_base()) and we cannot get a compile time pointer to per-cpu entries because we don't know where we'll map the section, even for the boot cpu. This can be simplified a bit by statically allocating per-cpu memory. The only disadvantage is that memory for one of the structures will stay unused, i.e. for the boot CPU, which uses boot_tvec_bases. This will also guarantee that tvec_base is cacheline aligned. Even though tvec_base has ____cacheline_aligned stuck on, kzalloc_node() does not actually respect that (but guarantees a minimum u64 alignment). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/17cdf560f2727f687ab159707d0aa591f8a2f82d.1427814611.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/time/timer.c48
1 files changed, 19 insertions, 29 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 2d3f5c504939..f3cc653f876c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -90,8 +90,19 @@ struct tvec_base {
struct tvec tv5;
} ____cacheline_aligned;
+/*
+ * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
+ * made NULL special, hint: lock_timer_base()) and we cannot get a compile time
+ * pointer to per-cpu entries because we don't know where we'll map the section,
+ * even for the boot cpu.
+ *
+ * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
+ * rest of them.
+ */
struct tvec_base boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
+
static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
/* Functions below help us manage 'deferrable' flag */
@@ -1534,46 +1545,25 @@ EXPORT_SYMBOL(schedule_timeout_uninterruptible);
static int init_timers_cpu(int cpu)
{
- int j;
- struct tvec_base *base;
+ struct tvec_base *base = per_cpu(tvec_bases, cpu);
static char tvec_base_done[NR_CPUS];
+ int j;
if (!tvec_base_done[cpu]) {
- static char boot_done;
+ static char boot_cpu_skipped;
- if (boot_done) {
- /*
- * The APs use this path later in boot
- */
- base = kzalloc_node(sizeof(*base), GFP_KERNEL,
- cpu_to_node(cpu));
- if (!base)
- return -ENOMEM;
-
- /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
- if (WARN_ON(base != tbase_get_base(base))) {
- kfree(base);
- return -ENOMEM;
- }
- per_cpu(tvec_bases, cpu) = base;
+ if (!boot_cpu_skipped) {
+ boot_cpu_skipped = 1; /* skip the boot cpu */
} else {
- /*
- * This is for the boot CPU - we use compile-time
- * static initialisation because per-cpu memory isn't
- * ready yet and because the memory allocators are not
- * initialised either.
- */
- boot_done = 1;
- base = &boot_tvec_bases;
+ base = per_cpu_ptr(&__tvec_bases, cpu);
+ per_cpu(tvec_bases, cpu) = base;
}
+
spin_lock_init(&base->lock);
tvec_base_done[cpu] = 1;
base->cpu = cpu;
- } else {
- base = per_cpu(tvec_bases, cpu);
}
-
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);