diff options
author | Brian Norris <computersforpeace@gmail.com> | 2014-08-19 11:57:23 -0700 |
---|---|---|
committer | Brian Norris <computersforpeace@gmail.com> | 2014-08-19 11:57:23 -0700 |
commit | 5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3 (patch) | |
tree | 090c7c069bc6c0f2b368ed8d0af861c275525411 /arch/x86/kernel/tsc.c | |
parent | b25046b1e5e3f1423434da77ccc859f2f779d1ce (diff) | |
parent | 54ea17a597b00e46b3720e75dd7595cd5dfa5670 (diff) | |
download | linux-stable-5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3.tar.gz linux-stable-5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3.tar.bz2 linux-stable-5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3.zip |
Merge l2-mtd/next into l2-mtd/master
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r-- | arch/x86/kernel/tsc.c | 28 |
1 files changed, 12 insertions, 16 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ea030319b321..b6025f9e36c6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -234,9 +234,6 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) return ns; } -/* XXX surely we already have this someplace in the kernel?! */ -#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) - static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) { unsigned long long tsc_now, ns_now; @@ -259,7 +256,9 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) * time function is continuous; see the comment near struct * cyc2ns_data. */ - data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); + data->cyc2ns_mul = + DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, + cpu_khz); data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; data->cyc2ns_offset = ns_now - mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); @@ -951,7 +950,7 @@ core_initcall(cpufreq_tsc); static struct clocksource clocksource_tsc; /* - * We compare the TSC to the cycle_last value in the clocksource + * We used to compare the TSC to the cycle_last value in the clocksource * structure to avoid a nasty time-warp. This can be observed in a * very small window right after one CPU updated cycle_last under * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which @@ -961,26 +960,23 @@ static struct clocksource clocksource_tsc; * due to the unsigned delta calculation of the time keeping core * code, which is necessary to support wrapping clocksources like pm * timer. + * + * This sanity check is now done in the core timekeeping code. + * checking the result of read_tsc() - cycle_last for being negative. + * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. */ static cycle_t read_tsc(struct clocksource *cs) { - cycle_t ret = (cycle_t)get_cycles(); - - return ret >= clocksource_tsc.cycle_last ? - ret : clocksource_tsc.cycle_last; -} - -static void resume_tsc(struct clocksource *cs) -{ - if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) - clocksource_tsc.cycle_last = 0; + return (cycle_t)get_cycles(); } +/* + * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() + */ static struct clocksource clocksource_tsc = { .name = "tsc", .rating = 300, .read = read_tsc, - .resume = resume_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, |