summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-30 10:13:35 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-31 17:20:49 +0200
commit18e4e36c66d6edbdefc639692206cdf01e468713 (patch)
tree271a70892fa5808459a1cfc30aa3b5631cd6647d
parent50526968e99afbca34924abcb04658b6dd5c5ea5 (diff)
downloadlinux-18e4e36c66d6edbdefc639692206cdf01e468713.tar.gz
linux-18e4e36c66d6edbdefc639692206cdf01e468713.tar.bz2
linux-18e4e36c66d6edbdefc639692206cdf01e468713.zip
sched: eliminate scd->prev_raw
eliminate prev_raw and use tick_raw instead. It's enough to base the current time on the scheduler tick timestamp alone - the monotonicity and maximum checks will prevent any damage. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de>
-rw-r--r--kernel/sched_clock.c6
1 files changed, 1 insertions, 5 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index b96559cb96a5..4b8474c966dc 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -53,7 +53,6 @@ struct sched_clock_data {
raw_spinlock_t lock;
unsigned long tick_jiffies;
- u64 prev_raw;
u64 tick_raw;
u64 tick_gtod;
u64 clock;
@@ -84,7 +83,6 @@ void sched_clock_init(void)
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
scd->tick_jiffies = now_jiffies;
- scd->prev_raw = 0;
scd->tick_raw = 0;
scd->tick_gtod = ktime_now;
scd->clock = ktime_now;
@@ -105,7 +103,7 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
long delta_jiffies = now_jiffies - scd->tick_jiffies;
u64 clock = scd->clock;
u64 min_clock, max_clock;
- s64 delta = now - scd->prev_raw;
+ s64 delta = now - scd->tick_raw;
WARN_ON_ONCE(!irqs_disabled());
min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
@@ -130,7 +128,6 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
if (unlikely(clock < min_clock))
clock = min_clock;
- scd->prev_raw = now;
scd->tick_jiffies = now_jiffies;
scd->clock = clock;
}
@@ -234,7 +231,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
* rq clock:
*/
__raw_spin_lock(&scd->lock);
- scd->prev_raw = now;
scd->clock += delta_ns;
__raw_spin_unlock(&scd->lock);