summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-20 17:06:25 +0200
committerThomas Gleixner <tglx@linutronix.de>2009-08-31 09:35:47 +0200
commit2d826404f0bdcac2a4dd7e3c446b70d6a3b63b78 (patch)
tree7db2dc0fbde3a25a89f1fc1514152567f612ccde /arch/x86/include
parent47926214d8b2bef13b2be57c500194a804f16198 (diff)
downloadlinux-2d826404f0bdcac2a4dd7e3c446b70d6a3b63b78.tar.gz
linux-2d826404f0bdcac2a4dd7e3c446b70d6a3b63b78.tar.bz2
linux-2d826404f0bdcac2a4dd7e3c446b70d6a3b63b78.zip
x86: Move tsc_calibration to x86_init_ops
TSC calibration is modified by the vmware hypervisor and paravirt by separate means. Moorestown wants to add its own calibration routine as well. So make calibrate_tsc a proper x86_init_ops function and override it by paravirt or by the early setup of the vmware hypervisor. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/timer.h5
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/vmware.h2
-rw-r--r--arch/x86/include/asm/x86_init.h9
6 files changed, 13 insertions, 9 deletions
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 369f5c5d09a1..b78c0941e422 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,7 +20,7 @@
#ifndef ASM_X86__HYPERVISOR_H
#define ASM_X86__HYPERVISOR_H
-extern unsigned long get_hypervisor_tsc_freq(void);
extern void init_hypervisor(struct cpuinfo_x86 *c);
+extern void init_hypervisor_platform(void);
#endif
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 11a4ba7b209c..1e458a553303 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -210,7 +210,6 @@ static inline unsigned long long paravirt_sched_clock(void)
{
return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
}
-#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
static inline unsigned long long paravirt_read_pmc(int counter)
{
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 65228ccc5f0d..5469630b27f5 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -8,7 +8,6 @@
#define TICK_SIZE (tick_nsec / 1000)
unsigned long long native_sched_clock(void);
-unsigned long native_calibrate_tsc(void);
extern int recalibrate_cpu_khz(void);
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
@@ -19,10 +18,6 @@ extern int timer_ack;
extern int no_timer_check;
-#ifndef CONFIG_PARAVIRT
-#define calibrate_tsc() native_calibrate_tsc()
-#endif
-
/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 38ae163cc91b..c0427295e8f5 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -48,7 +48,8 @@ static __always_inline cycles_t vget_cycles(void)
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
-int check_tsc_unstable(void);
+extern int check_tsc_unstable(void);
+extern unsigned long native_calibrate_tsc(void);
/*
* Boot-time check whether the TSCs are synchronized across
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index c11b7e100d83..e49ed6d2fd4e 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -20,7 +20,7 @@
#ifndef ASM_X86__VMWARE_H
#define ASM_X86__VMWARE_H
-extern unsigned long vmware_get_tsc_khz(void);
+extern void vmware_platform_setup(void);
extern int vmware_platform(void);
extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index f8bdd2271a04..20df51871713 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -112,8 +112,17 @@ struct x86_cpuinit_ops {
void (*setup_percpu_clockev)(void);
};
+/**
+ * struct x86_platform_ops - platform specific runtime functions
+ * @calibrate_tsc: calibrate TSC
+ */
+struct x86_platform_ops {
+ unsigned long (*calibrate_tsc)(void);
+};
+
extern struct x86_init_ops x86_init;
extern struct x86_cpuinit_ops x86_cpuinit;
+extern struct x86_platform_ops x86_platform;
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);