summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-04-02 09:25:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-04-02 09:25:10 -0700
commit128c434a70996a3738c7ca4aa2ee91942e4c48f0 (patch)
tree65646fafea62ef939e49ef57a31edec4c5b049fb /arch/x86
parent0a89b5eb818aee2efe8b092bb303aec56486a3d3 (diff)
parent658b299580da2830a69eea44a8b6da1c2579a32e (diff)
downloadlinux-128c434a70996a3738c7ca4aa2ee91942e4c48f0.tar.gz
linux-128c434a70996a3738c7ca4aa2ee91942e4c48f0.tar.bz2
linux-128c434a70996a3738c7ca4aa2ee91942e4c48f0.zip
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "This update provides: - make the scheduler clock switch to unstable mode smooth so the timestamps stay at microseconds granularity instead of switching to tick granularity. - unbreak perf test tsc by taking the new offset into account which was added in order to proveide better sched clock continuity - switching sched clock to unstable mode runs all clock related computations which affect the sched clock output itself from a work queue. In case of preemption sched clock uses half updated data and provides wrong timestamps. Keep the math in the protected context and delegate only the static key switch to workqueue context. - remove a duplicate header include" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/headers: Remove duplicate #include <linux/sched/debug.h> line sched/clock: Fix broken stable to unstable transfer sched/clock, x86/perf: Fix "perf test tsc" sched/clock: Fix clear_sched_clock_stable() preempt wobbly
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/events/core.c9
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/kernel/tsc.c4
3 files changed, 10 insertions, 5 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2aa1ad194db2..580b60f5ac83 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2256,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now)
{
struct cyc2ns_data *data;
+ u64 offset;
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
@@ -2263,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
!!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
userpg->pmc_width = x86_pmu.cntval_bits;
- if (!sched_clock_stable())
+ if (!using_native_sched_clock() || !sched_clock_stable())
return;
data = cyc2ns_read_begin();
+ offset = data->cyc2ns_offset + __sched_clock_offset;
+
/*
* Internal timekeeping for enabled/running/stopped times
* is always in the local_clock domain.
@@ -2275,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time = 1;
userpg->time_mult = data->cyc2ns_mul;
userpg->time_shift = data->cyc2ns_shift;
- userpg->time_offset = data->cyc2ns_offset - now;
+ userpg->time_offset = offset - now;
/*
* cap_user_time_zero doesn't make sense when we're using a different
@@ -2283,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
*/
if (!event->attr.use_clockid) {
userpg->cap_user_time_zero = 1;
- userpg->time_zero = data->cyc2ns_offset;
+ userpg->time_zero = offset;
}
cyc2ns_read_end(data);
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index a04eabd43d06..27e9f9d769b8 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void);
extern int no_timer_check;
+extern bool using_native_sched_clock(void);
+
/*
* We use the full linear equation: f(x) = a + b*x, in order to allow
* a continuous function in the face of dynamic freq changes.
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c73a7f9e881a..714dfba6a1e7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -328,7 +328,7 @@ unsigned long long sched_clock(void)
return paravirt_sched_clock();
}
-static inline bool using_native_sched_clock(void)
+bool using_native_sched_clock(void)
{
return pv_time_ops.sched_clock == native_sched_clock;
}
@@ -336,7 +336,7 @@ static inline bool using_native_sched_clock(void)
unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
-static inline bool using_native_sched_clock(void) { return true; }
+bool using_native_sched_clock(void) { return true; }
#endif
int check_tsc_unstable(void)