summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/jiffies.c8
-rw-r--r--kernel/time/tick-common.c8
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c137
-rw-r--r--kernel/time/timecompare.c193
-rw-r--r--kernel/time/timekeeping.c14
7 files changed, 76 insertions, 287 deletions
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index e2fd74b8e8c2..ff7d9d2ab504 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
obj-y += timeconv.o posix-clock.o alarmtimer.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 6629bf7b5285..7a925ba456fb 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -58,7 +58,7 @@ static cycle_t jiffies_read(struct clocksource *cs)
return (cycle_t) jiffies;
}
-struct clocksource clocksource_jiffies = {
+static struct clocksource clocksource_jiffies = {
.name = "jiffies",
.rating = 1, /* lowest valid rating*/
.read = jiffies_read,
@@ -67,6 +67,8 @@ struct clocksource clocksource_jiffies = {
.shift = JIFFIES_SHIFT,
};
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
+
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
@@ -74,9 +76,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ecad4e4..b1600a6973f4 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b901fed..cf3e59ed6dc0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a40260885265..d58e552d9fd1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/*
- * The time, when the last jiffy update happened. Protected by xtime_lock.
+ * The time, when the last jiffy update happened. Protected by jiffies_lock.
*/
static ktime_t last_jiffies_update;
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
ktime_t delta;
/*
- * Do a quick check without holding xtime_lock:
+ * Do a quick check without holding jiffies_lock:
*/
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 < tick_period.tv64)
return;
- /* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ /* Reevalute with jiffies_lock held */
+ write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
/*
@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
return period;
}
+
+static void tick_sched_do_timer(ktime_t now)
+{
+ int cpu = smp_processor_id();
+
+#ifdef CONFIG_NO_HZ
+ /*
+ * Check if the do_timer duty was dropped. We don't care about
+ * concurrency: This happens only when the cpu in charge went
+ * into a long sleep. If two cpus happen to assign themself to
+ * this duty, then the jiffies update is still serialized by
+ * jiffies_lock.
+ */
+ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
+ tick_do_timer_cpu = cpu;
+#endif
+
+ /* Check, if the jiffies need an update */
+ if (tick_do_timer_cpu == cpu)
+ tick_do_update_jiffies64(now);
+}
+
+static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
+{
+#ifdef CONFIG_NO_HZ
+ /*
+ * When we are idle and the tick is stopped, we have to touch
+ * the watchdog as we might not schedule for a really long
+ * time. This happens on complete idle SMP systems while
+ * waiting on the login prompt. We also increment the "start of
+ * idle" jiffy stamp so the idle accounting adjustment we do
+ * when we go busy again does not account too much ticks.
+ */
+ if (ts->tick_stopped) {
+ touch_softlockup_watchdog();
+ if (is_idle_task(current))
+ ts->idle_jiffies++;
+ }
+#endif
+ update_process_times(user_mode(regs));
+ profile_tick(CPU_PROFILING);
+}
+
/*
* NOHZ - aka dynamic tick functionality
*/
@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
@@ -526,6 +569,8 @@ void tick_nohz_irq_exit(void)
if (!ts->inidle)
return;
+ /* Cancel the timer because CPU already waken up from the C-states*/
+ menu_hrtimer_cancel();
__tick_nohz_idle_enter(ts);
}
@@ -621,6 +666,8 @@ void tick_nohz_idle_exit(void)
ts->inidle = 0;
+ /* Cancel the timer because CPU already waken up from the C-states*/
+ menu_hrtimer_cancel();
if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
@@ -648,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
struct pt_regs *regs = get_irq_regs();
- int cpu = smp_processor_id();
ktime_t now = ktime_get();
dev->next_event.tv64 = KTIME_MAX;
- /*
- * Check if the do_timer duty was dropped. We don't care about
- * concurrency: This happens only when the cpu in charge went
- * into a long sleep. If two cpus happen to assign themself to
- * this duty, then the jiffies update is still serialized by
- * xtime_lock.
- */
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
- tick_do_timer_cpu = cpu;
-
- /* Check, if the jiffies need an update */
- if (tick_do_timer_cpu == cpu)
- tick_do_update_jiffies64(now);
-
- /*
- * When we are idle and the tick is stopped, we have to touch
- * the watchdog as we might not schedule for a really long
- * time. This happens on complete idle SMP systems while
- * waiting on the login prompt. We also increment the "start
- * of idle" jiffy stamp so the idle accounting adjustment we
- * do when we go busy again does not account too much ticks.
- */
- if (ts->tick_stopped) {
- touch_softlockup_watchdog();
- ts->idle_jiffies++;
- }
-
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING);
+ tick_sched_do_timer(now);
+ tick_sched_handle(ts, regs);
while (tick_nohz_reprogram(ts, now)) {
now = ktime_get();
@@ -794,7 +813,7 @@ void tick_check_idle(int cpu)
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* We rearm the timer until we get disabled by the idle code.
- * Called with interrupts disabled and timer->base->cpu_base->lock held.
+ * Called with interrupts disabled.
*/
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
@@ -802,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
container_of(timer, struct tick_sched, sched_timer);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
- int cpu = smp_processor_id();
-#ifdef CONFIG_NO_HZ
- /*
- * Check if the do_timer duty was dropped. We don't care about
- * concurrency: This happens only when the cpu in charge went
- * into a long sleep. If two cpus happen to assign themself to
- * this duty, then the jiffies update is still serialized by
- * xtime_lock.
- */
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
- tick_do_timer_cpu = cpu;
-#endif
-
- /* Check, if the jiffies need an update */
- if (tick_do_timer_cpu == cpu)
- tick_do_update_jiffies64(now);
+ tick_sched_do_timer(now);
/*
* Do not call, when we are not in irq context and have
* no valid regs pointer
*/
- if (regs) {
- /*
- * When we are idle and the tick is stopped, we have to touch
- * the watchdog as we might not schedule for a really long
- * time. This happens on complete idle SMP systems while
- * waiting on the login prompt. We also increment the "start of
- * idle" jiffy stamp so the idle accounting adjustment we do
- * when we go busy again does not account too much ticks.
- */
- if (ts->tick_stopped) {
- touch_softlockup_watchdog();
- if (is_idle_task(current))
- ts->idle_jiffies++;
- }
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING);
- }
+ if (regs)
+ tick_sched_handle(ts, regs);
hrtimer_forward(timer, now, tick_period);
@@ -874,7 +863,7 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
- /* Offset the tick to avert xtime_lock contention. */
+ /* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
u64 offset = ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus());
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
deleted file mode 100644
index a9ae369925ce..000000000000
--- a/kernel/time/timecompare.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2009 Intel Corporation.
- * Author: Patrick Ohly <patrick.ohly@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/timecompare.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/math64.h>
-#include <linux/kernel.h>
-
-/*
- * fixed point arithmetic scale factor for skew
- *
- * Usually one would measure skew in ppb (parts per billion, 1e9), but
- * using a factor of 2 simplifies the math.
- */
-#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
-
-ktime_t timecompare_transform(struct timecompare *sync,
- u64 source_tstamp)
-{
- u64 nsec;
-
- nsec = source_tstamp + sync->offset;
- nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
- TIMECOMPARE_SKEW_RESOLUTION;
-
- return ns_to_ktime(nsec);
-}
-EXPORT_SYMBOL_GPL(timecompare_transform);
-
-int timecompare_offset(struct timecompare *sync,
- s64 *offset,
- u64 *source_tstamp)
-{
- u64 start_source = 0, end_source = 0;
- struct {
- s64 offset;
- s64 duration_target;
- } buffer[10], sample, *samples;
- int counter = 0, i;
- int used;
- int index;
- int num_samples = sync->num_samples;
-
- if (num_samples > ARRAY_SIZE(buffer)) {
- samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
- if (!samples) {
- samples = buffer;
- num_samples = ARRAY_SIZE(buffer);
- }
- } else {
- samples = buffer;
- }
-
- /* run until we have enough valid samples, but do not try forever */
- i = 0;
- counter = 0;
- while (1) {
- u64 ts;
- ktime_t start, end;
-
- start = sync->target();
- ts = timecounter_read(sync->source);
- end = sync->target();
-
- if (!i)
- start_source = ts;
-
- /* ignore negative durations */
- sample.duration_target = ktime_to_ns(ktime_sub(end, start));
- if (sample.duration_target >= 0) {
- /*
- * assume symetric delay to and from source:
- * average target time corresponds to measured
- * source time
- */
- sample.offset =
- (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
- ts;
-
- /* simple insertion sort based on duration */
- index = counter - 1;
- while (index >= 0) {
- if (samples[index].duration_target <
- sample.duration_target)
- break;
- samples[index + 1] = samples[index];
- index--;
- }
- samples[index + 1] = sample;
- counter++;
- }
-
- i++;
- if (counter >= num_samples || i >= 100000) {
- end_source = ts;
- break;
- }
- }
-
- *source_tstamp = (end_source + start_source) / 2;
-
- /* remove outliers by only using 75% of the samples */
- used = counter * 3 / 4;
- if (!used)
- used = counter;
- if (used) {
- /* calculate average */
- s64 off = 0;
- for (index = 0; index < used; index++)
- off += samples[index].offset;
- *offset = div_s64(off, used);
- }
-
- if (samples && samples != buffer)
- kfree(samples);
-
- return used;
-}
-EXPORT_SYMBOL_GPL(timecompare_offset);
-
-void __timecompare_update(struct timecompare *sync,
- u64 source_tstamp)
-{
- s64 offset;
- u64 average_time;
-
- if (!timecompare_offset(sync, &offset, &average_time))
- return;
-
- if (!sync->last_update) {
- sync->last_update = average_time;
- sync->offset = offset;
- sync->skew = 0;
- } else {
- s64 delta_nsec = average_time - sync->last_update;
-
- /* avoid division by negative or small deltas */
- if (delta_nsec >= 10000) {
- s64 delta_offset_nsec = offset - sync->offset;
- s64 skew; /* delta_offset_nsec *
- TIMECOMPARE_SKEW_RESOLUTION /
- delta_nsec */
- u64 divisor;
-
- /* div_s64() is limited to 32 bit divisor */
- skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
- divisor = delta_nsec;
- while (unlikely(divisor >= ((s64)1) << 32)) {
- /* divide both by 2; beware, right shift
- of negative value has undefined
- behavior and can only be used for
- the positive divisor */
- skew = div_s64(skew, 2);
- divisor >>= 1;
- }
- skew = div_s64(skew, divisor);
-
- /*
- * Calculate new overall skew as 4/16 the
- * old value and 12/16 the new one. This is
- * a rather arbitrary tradeoff between
- * only using the latest measurement (0/16 and
- * 16/16) and even more weight on past measurements.
- */
-#define TIMECOMPARE_NEW_SKEW_PER_16 12
- sync->skew =
- div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
- sync->skew +
- TIMECOMPARE_NEW_SKEW_PER_16 * skew,
- 16);
- sync->last_update = average_time;
- sync->offset = offset;
- }
- }
-}
-EXPORT_SYMBOL_GPL(__timecompare_update);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 69f5342e8d1c..cbc6acb0db3f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -26,12 +26,6 @@
static struct timekeeper timekeeper;
-/*
- * This read-write spinlock protects us from races in SMP while
- * playing with xtime.
- */
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
@@ -1349,9 +1343,7 @@ struct timespec get_monotonic_coarse(void)
}
/*
- * The 64-bit jiffies value is not atomic - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * jiffies is defined in the linker script...
+ * Must hold jiffies_lock
*/
void do_timer(unsigned long ticks)
{
@@ -1439,7 +1431,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}