summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2012-11-27 23:28:56 -0200
committerMarcelo Tosatti <mtosatti@redhat.com>2012-11-27 23:29:10 -0200
commit3dc4f7cfb7441e5e0fed3a02fc81cdaabd28300a (patch)
treeaa4061837bded4fdb3bb4d879f5ef7797675f570 /arch/x86
parent71056ae22d43f58d7e0f793af18ace2eaf5b74eb (diff)
downloadlinux-3dc4f7cfb7441e5e0fed3a02fc81cdaabd28300a.tar.gz
linux-3dc4f7cfb7441e5e0fed3a02fc81cdaabd28300a.tar.bz2
linux-3dc4f7cfb7441e5e0fed3a02fc81cdaabd28300a.zip
x86: kvm guest: pvclock vsyscall support
Hook into generic pvclock vsyscall code, with the aim to allow userspace to have visibility into pvclock data. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_guest.h6
-rw-r--r--arch/x86/kernel/kvm.c13
-rw-r--r--arch/x86/kernel/kvmclock.c54
3 files changed, 61 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
new file mode 100644
index 000000000000..a92b1763c419
--- /dev/null
+++ b/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_KVM_GUEST_H
+#define _ASM_X86_KVM_GUEST_H
+
+int kvm_setup_vsyscall_timeinfo(void);
+
+#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4180a874c764..a91c6b482b48 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -42,6 +42,7 @@
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
+#include <asm/kvm_guest.h>
static int kvmapf = 1;
@@ -62,6 +63,15 @@ static int parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
+static int kvmclock_vsyscall = 1;
+static int parse_no_kvmclock_vsyscall(char *arg)
+{
+ kvmclock_vsyscall = 0;
+ return 0;
+}
+
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
@@ -471,6 +481,9 @@ void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
+ if (kvmclock_vsyscall)
+ kvm_setup_vsyscall_timeinfo();
+
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index c7d75678886e..220a360010f8 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -40,11 +40,7 @@ static int parse_no_kvmclock(char *arg)
early_param("no-kvmclock", parse_no_kvmclock);
/* The hypervisor will put information about time periodically here */
-struct pvclock_aligned_vcpu_time_info {
- struct pvclock_vcpu_time_info clock;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
-static struct pvclock_aligned_vcpu_time_info *hv_clock;
+static struct pvclock_vsyscall_time_info *hv_clock;
static struct pvclock_wall_clock wall_clock;
/*
@@ -67,7 +63,7 @@ static unsigned long kvm_get_wallclock(void)
preempt_disable();
cpu = smp_processor_id();
- vcpu_time = &hv_clock[cpu].clock;
+ vcpu_time = &hv_clock[cpu].pvti;
pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
preempt_enable();
@@ -88,7 +84,7 @@ static cycle_t kvm_clock_read(void)
preempt_disable_notrace();
cpu = smp_processor_id();
- src = &hv_clock[cpu].clock;
+ src = &hv_clock[cpu].pvti;
ret = pvclock_clocksource_read(src);
preempt_enable_notrace();
return ret;
@@ -116,7 +112,7 @@ static unsigned long kvm_get_tsc_khz(void)
preempt_disable();
cpu = smp_processor_id();
- src = &hv_clock[cpu].clock;
+ src = &hv_clock[cpu].pvti;
tsc_khz = pvclock_tsc_khz(src);
preempt_enable();
return tsc_khz;
@@ -143,7 +139,7 @@ bool kvm_check_and_clear_guest_paused(void)
if (!hv_clock)
return ret;
- src = &hv_clock[cpu].clock;
+ src = &hv_clock[cpu].pvti;
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
src->flags &= ~PVCLOCK_GUEST_STOPPED;
ret = true;
@@ -164,7 +160,7 @@ int kvm_register_clock(char *txt)
{
int cpu = smp_processor_id();
int low, high, ret;
- struct pvclock_vcpu_time_info *src = &hv_clock[cpu].clock;
+ struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti;
low = (int)__pa(src) | 1;
high = ((u64)__pa(src) >> 32);
@@ -235,7 +231,7 @@ void __init kvmclock_init(void)
printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
- mem = memblock_alloc(sizeof(struct pvclock_aligned_vcpu_time_info) * NR_CPUS,
+ mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS,
PAGE_SIZE);
if (!mem)
return;
@@ -244,7 +240,7 @@ void __init kvmclock_init(void)
if (kvm_register_clock("boot clock")) {
hv_clock = NULL;
memblock_free(mem,
- sizeof(struct pvclock_aligned_vcpu_time_info)*NR_CPUS);
+ sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
return;
}
pv_time_ops.sched_clock = kvm_clock_read;
@@ -269,3 +265,37 @@ void __init kvmclock_init(void)
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
}
+
+int __init kvm_setup_vsyscall_timeinfo(void)
+{
+#ifdef CONFIG_X86_64
+ int cpu;
+ int ret;
+ u8 flags;
+ struct pvclock_vcpu_time_info *vcpu_time;
+ unsigned int size;
+
+ size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ vcpu_time = &hv_clock[cpu].pvti;
+ flags = pvclock_read_flags(vcpu_time);
+
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
+ preempt_enable();
+ return 1;
+ }
+
+ if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
+ preempt_enable();
+ return ret;
+ }
+
+ preempt_enable();
+
+ kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
+ return 0;
+}