summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlok Kataria <akataria@vmware.com>2008-06-20 15:06:33 -0700
committerIngo Molnar <mingo@elte.hu>2008-06-23 22:51:33 +0200
commit3da757daf86e498872855f0b5e101f763ba79499 (patch)
treebffffafed7322c66a1b886b661cfd8a8a7f5a924
parente01b70ef3eb3080fecc35e15f68cd274c0a48163 (diff)
downloadlinux-stable-3da757daf86e498872855f0b5e101f763ba79499.tar.gz
linux-stable-3da757daf86e498872855f0b5e101f763ba79499.tar.bz2
linux-stable-3da757daf86e498872855f0b5e101f763ba79499.zip
x86: use cpu_khz for loops_per_jiffy calculation
On the x86 platform we can use the value of tsc_khz computed during tsc calibration to calculate the loops_per_jiffy value. Its very important to keep the error in lpj values to minimum as any error in that may result in kernel panic in check_timer. In virtualization environment, On a highly overloaded host the guest delay calibration may sometimes result in errors beyond the ~50% that timer_irq_works can handle, resulting in the guest panicking. Does some formating changes to lpj_setup code to now have a single printk to print the bogomips value. We do this only for the boot processor because the AP's can have different base frequencies or the BIOS might boot a AP at a different frequency. Signed-off-by: Alok N Kataria <akataria@vmware.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Daniel Hecht <dhecht@vmware.com> Cc: Tim Mann <mann@vmware.com> Cc: Zach Amsden <zach@vmware.com> Cc: Sahil Rihan <srihan@vmware.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/time_64.c2
-rw-r--r--arch/x86/kernel/tsc_32.c5
-rw-r--r--include/linux/delay.h1
-rw-r--r--init/calibrate.c36
4 files changed, 27 insertions, 17 deletions
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index c737849e2ef7..12b4a71bd074 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -123,6 +123,8 @@ void __init time_init(void)
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
cpu_khz = calculate_cpu_khz();
+ lpj_tsc = ((unsigned long)tsc_khz * 1000)/HZ;
+
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index 068759db63dd..be729035b30b 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -401,6 +401,7 @@ static inline void check_geode_tsc_reliable(void) { }
void __init tsc_init(void)
{
int cpu;
+ u64 lpj;
if (!cpu_has_tsc || tsc_disabled) {
/* Disable the TSC in case of !cpu_has_tsc */
@@ -421,6 +422,10 @@ void __init tsc_init(void)
return;
}
+ lpj = ((u64)tsc_khz * 1000);
+ do_div(lpj, HZ);
+ lpj_tsc = lpj;
+
printk("Detected %lu.%03lu MHz processor.\n",
(unsigned long)cpu_khz / 1000,
(unsigned long)cpu_khz % 1000);
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 54552d21296e..01aec60590ab 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -41,6 +41,7 @@ static inline void ndelay(unsigned long x)
#define ndelay(x) ndelay(x)
#endif
+extern unsigned long lpj_tsc;
void calibrate_delay(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
diff --git a/init/calibrate.c b/init/calibrate.c
index ecb3822d4f70..86286974dada 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -8,7 +8,9 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/timex.h>
+#include <linux/smp.h>
+unsigned long lpj_tsc;
unsigned long preset_lpj;
static int __init lpj_setup(char *str)
{
@@ -108,6 +110,10 @@ static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;}
* This is the number of bits of precision for the loops_per_jiffy. Each
* bit takes on average 1.5/HZ seconds. This (like the original) is a little
* better than 1%
+ * For the boot cpu we can skip the delay calibration and assign it a value
+ * calculated based on the tsc frequency.
+ * For the rest of the CPUs we cannot assume that the tsc frequency is same as
+ * the cpu frequency, hence do the calibration for those.
*/
#define LPS_PREC 8
@@ -118,20 +124,20 @@ void __cpuinit calibrate_delay(void)
if (preset_lpj) {
loops_per_jiffy = preset_lpj;
- printk("Calibrating delay loop (skipped)... "
- "%lu.%02lu BogoMIPS preset\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
+ printk(KERN_INFO
+ "Calibrating delay loop (skipped) preset value.. ");
+ } else if ((smp_processor_id() == 0) && lpj_tsc) {
+ loops_per_jiffy = lpj_tsc;
+ printk(KERN_INFO
+ "Calibrating delay loop (skipped), "
+ "using tsc calculated value.. ");
} else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
- printk("Calibrating delay using timer specific routine.. ");
- printk("%lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100,
- loops_per_jiffy);
+ printk(KERN_INFO
+ "Calibrating delay using timer specific routine.. ");
} else {
loops_per_jiffy = (1<<12);
- printk(KERN_DEBUG "Calibrating delay loop... ");
+ printk(KERN_INFO "Calibrating delay loop... ");
while ((loops_per_jiffy <<= 1) != 0) {
/* wait for "start of" clock tick */
ticks = jiffies;
@@ -161,12 +167,8 @@ void __cpuinit calibrate_delay(void)
if (jiffies != ticks) /* longer than 1 tick */
loops_per_jiffy &= ~loopbit;
}
-
- /* Round the value and print it */
- printk("%lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100,
- loops_per_jiffy);
}
-
+ printk(KERN_INFO "%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
}