summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kernel/vdso.c2
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/tile/kernel/time.c2
-rw-r--r--arch/x86/kernel/vsyscall_gtod.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/timekeeper_internal.h7
-rw-r--r--kernel/time/timekeeping.c23
10 files changed, 24 insertions, 26 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 50384fec56c4..574672f001f7 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -224,7 +224,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
- vdso_data->cs_cycle_last = tk->clock->cycle_last;
+ vdso_data->cs_cycle_last = tk->cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 71c52bc7c28d..11dc42da7daf 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -441,7 +441,7 @@ void update_vsyscall_tz(void)
}
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
- struct clocksource *c, u32 mult)
+ struct clocksource *c, u32 mult, cycles_t cycle_last)
{
write_seqcount_begin(&fsyscall_gtod_data.seq);
@@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
fsyscall_gtod_data.clk_mult = mult;
fsyscall_gtod_data.clk_shift = c->shift;
fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
- fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
+ fsyscall_gtod_data.clk_cycle_last = cycle_last;
/* copy kernel time structures */
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 9fff9cdcc519..368ab374d33c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs)
}
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
- struct clocksource *clock, u32 mult)
+ struct clocksource *clock, u32 mult, cycle_t cycle_last)
{
u64 new_tb_to_xs, new_stamp_xsec;
u32 frac_sec;
@@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
* We expect the caller to have done the first increment of
* vdso_data->tb_update_count already.
*/
- vdso_data->tb_orig_stamp = clock->cycle_last;
+ vdso_data->tb_orig_stamp = cycle_last;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wtm->tv_sec;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0931b110c826..97950f392613 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -220,7 +220,7 @@ void update_vsyscall(struct timekeeper *tk)
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
+ vdso_data->xtime_tod_stamp = tk->cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->wtom_clock_sec =
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index ae70155c2f16..d22d5bfc1e4e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -269,7 +269,7 @@ void update_vsyscall(struct timekeeper *tk)
/* Userspace gettimeofday will spin while this value is odd. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = clock->cycle_last;
+ vdso_data->xtime_tod_stamp = tk->cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->wtom_clock_sec = wtm->tv_sec;
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
index 9531fbb123ba..c3cb3c144591 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/kernel/vsyscall_gtod.c
@@ -32,7 +32,7 @@ void update_vsyscall(struct timekeeper *tk)
/* copy vsyscall data */
vdata->vclock_mode = tk->clock->archdata.vclock_mode;
- vdata->cycle_last = tk->clock->cycle_last;
+ vdata->cycle_last = tk->cycle_last;
vdata->mask = tk->clock->mask;
vdata->mult = tk->mult;
vdata->shift = tk->shift;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63832f5110b6..7b25125f3f42 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1001,7 +1001,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
/* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
- vdata->clock.cycle_last = tk->clock->cycle_last;
+ vdata->clock.cycle_last = tk->cycle_last;
vdata->clock.mask = tk->clock->mask;
vdata->clock.mult = tk->mult;
vdata->clock.shift = tk->shift;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a16b497d5159..653f0e2b6ca9 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
- * @cycle_last: most recent cycle counter value seen by ::read()
* @owner: module reference, must be set by clocksource in modules
*/
struct clocksource {
@@ -171,7 +170,6 @@ struct clocksource {
* clocksource itself is cacheline aligned.
*/
cycle_t (*read)(struct clocksource *cs);
- cycle_t cycle_last;
cycle_t mask;
u32 mult;
u32 shift;
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 2e20275a7083..cb88096222c0 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,6 +29,8 @@
struct timekeeper {
/* Current clocksource used for timekeeping. */
struct clocksource *clock;
+ /* Last cycle value */
+ cycle_t cycle_last;
/* NTP adjusted clock multiplier */
u32 mult;
/* The shift value of the current clocksource. */
@@ -62,8 +64,6 @@ struct timekeeper {
/* Number of clock cycles in one NTP interval. */
cycle_t cycle_interval;
- /* Last cycle value (also stored in clock->cycle_last) */
- cycle_t cycle_last;
/* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
/* shifted nano seconds left over when rounding cycle_interval */
@@ -91,7 +91,8 @@ extern void update_vsyscall_tz(void);
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
- struct clocksource *c, u32 mult);
+ struct clocksource *c, u32 mult,
+ cycles_t cycle_last);
extern void update_vsyscall_tz(void);
#else
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 531805013786..4e748c404749 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -121,7 +121,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
old_clock = tk->clock;
tk->clock = clock;
- tk->cycle_last = clock->cycle_last = clock->read(clock);
+ tk->cycle_last = clock->read(clock);
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
@@ -182,7 +182,7 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
cycle_now = clock->read(clock);
/* calculate the delta since the last update_wall_time: */
- delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask);
+ delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
nsec = delta * tk->mult + tk->xtime_nsec;
nsec >>= tk->shift;
@@ -202,7 +202,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
cycle_now = clock->read(clock);
/* calculate the delta since the last update_wall_time: */
- delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask);
+ delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
/* convert delta to nanoseconds. */
nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
@@ -218,7 +218,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
struct timespec xt;
xt = tk_xtime(tk);
- update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
+ update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult,
+ tk->cycle_last);
}
static inline void old_vsyscall_fixup(struct timekeeper *tk)
@@ -342,8 +343,8 @@ static void timekeeping_forward_now(struct timekeeper *tk)
clock = tk->clock;
cycle_now = clock->read(clock);
- delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask);
- tk->cycle_last = clock->cycle_last = cycle_now;
+ delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
+ tk->cycle_last = cycle_now;
tk->xtime_nsec += delta * tk->mult;
@@ -1020,13 +1021,13 @@ static void timekeeping_resume(void)
*/
cycle_now = clock->read(clock);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
- cycle_now > clock->cycle_last) {
+ cycle_now > tk->cycle_last) {
u64 num, max = ULLONG_MAX;
u32 mult = clock->mult;
u32 shift = clock->shift;
s64 nsec = 0;
- cycle_delta = clocksource_delta(cycle_now, clock->cycle_last,
+ cycle_delta = clocksource_delta(cycle_now, tk->cycle_last,
clock->mask);
/*
@@ -1053,7 +1054,7 @@ static void timekeeping_resume(void)
__timekeeping_inject_sleeptime(tk, &ts_delta);
/* Re-base the last cycle value */
- tk->cycle_last = clock->cycle_last = cycle_now;
+ tk->cycle_last = cycle_now;
tk->ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1433,7 +1434,7 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(clock->read(clock), clock->cycle_last,
+ offset = clocksource_delta(clock->read(clock), tk->cycle_last,
clock->mask);
#endif
@@ -1477,8 +1478,6 @@ void update_wall_time(void)
clock_set |= accumulate_nsecs_to_secs(tk);
write_seqcount_begin(&tk_core.seq);
- /* Update clock->cycle_last with the new value */
- clock->cycle_last = tk->cycle_last;
/*
* Update the real timekeeper.
*