diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-12-25 11:38:40 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-12-25 17:21:22 +0100 |
commit | 2456e855354415bfaeb7badaa14e11b3e02c8466 (patch) | |
tree | 6fc81500645174c246c3fdb568cba32aa01960c6 /kernel | |
parent | a5a1d1c2914b5316924c7893eb683a5420ebd3be (diff) | |
download | linux-stable-2456e855354415bfaeb7badaa14e11b3e02c8466.tar.gz linux-stable-2456e855354415bfaeb7badaa14e11b3e02c8466.tar.bz2 linux-stable-2456e855354415bfaeb7badaa14e11b3e02c8466.zip |
ktime: Get rid of the union
ktime is a union because the initial implementation stored the time in
scalar nanoseconds on 64 bit machine and in a endianess optimized timespec
variant for 32bit machines. The Y2038 cleanup removed the timespec variant
and switched everything to scalar nanoseconds. The union remained, but
become completely pointless.
Get rid of the union and just keep ktime_t as simple typedef of type s64.
The conversion was done with coccinelle and some manual mopping up.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 4 | ||||
-rw-r--r-- | kernel/signal.c | 6 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 20 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 6 | ||||
-rw-r--r-- | kernel/time/hrtimer.c | 52 | ||||
-rw-r--r-- | kernel/time/itimer.c | 10 | ||||
-rw-r--r-- | kernel/time/ntp.c | 2 | ||||
-rw-r--r-- | kernel/time/posix-timers.c | 20 | ||||
-rw-r--r-- | kernel/time/tick-broadcast-hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 24 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 22 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 6 |
13 files changed, 88 insertions, 88 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 9246d9f593d1..0842c8ca534b 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2459,7 +2459,7 @@ retry: restart->fn = futex_wait_restart; restart->futex.uaddr = uaddr; restart->futex.val = val; - restart->futex.time = abs_time->tv64; + restart->futex.time = *abs_time; restart->futex.bitset = bitset; restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; @@ -2480,7 +2480,7 @@ static long futex_wait_restart(struct restart_block *restart) ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { - t.tv64 = restart->futex.time; + t = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; diff --git a/kernel/signal.c b/kernel/signal.c index f5d4e275345e..ff046b73ff2d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -587,7 +587,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) struct hrtimer *tmr = &tsk->signal->real_timer; if (!hrtimer_is_queued(tmr) && - tsk->signal->it_real_incr.tv64 != 0) { + tsk->signal->it_real_incr != 0) { hrtimer_forward(tmr, tmr->base->get_time(), tsk->signal->it_real_incr); hrtimer_restart(tmr); @@ -2766,7 +2766,7 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) int do_sigtimedwait(const sigset_t *which, siginfo_t *info, const struct timespec *ts) { - ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; + ktime_t *to = NULL, timeout = KTIME_MAX; struct task_struct *tsk = current; sigset_t mask = *which; int sig, ret = 0; @@ -2786,7 +2786,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info, spin_lock_irq(&tsk->sighand->siglock); sig = dequeue_signal(tsk, &mask, info); - if (!sig && timeout.tv64) { + if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested * while we are sleeping in so that we'll be awakened when diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 3921cf7fea8e..ab6ac077bdb7 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -254,13 +254,13 @@ static int alarmtimer_suspend(struct device *dev) if (!next) continue; delta = ktime_sub(next->expires, base->gettime()); - if (!min.tv64 || (delta.tv64 < min.tv64)) { + if (!min || (delta < min)) { expires = next->expires; min = delta; type = i; } } - if (min.tv64 == 0) + if (min == 0) return 0; if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { @@ -328,7 +328,7 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) delta = ktime_sub(absexp, base->gettime()); spin_lock_irqsave(&freezer_delta_lock, flags); - if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) { + if (!freezer_delta || (delta < freezer_delta)) { freezer_delta = delta; freezer_expires = absexp; freezer_alarmtype = type; @@ -453,10 +453,10 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) delta = ktime_sub(now, alarm->node.expires); - if (delta.tv64 < 0) + if (delta < 0) return 0; - if (unlikely(delta.tv64 >= interval.tv64)) { + if (unlikely(delta >= interval)) { s64 incr = ktime_to_ns(interval); overrun = ktime_divns(delta, incr); @@ -464,7 +464,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) alarm->node.expires = ktime_add_ns(alarm->node.expires, incr*overrun); - if (alarm->node.expires.tv64 > now.tv64) + if (alarm->node.expires > now) return overrun; /* * This (and the ktime_add() below) is the @@ -522,7 +522,7 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, } /* Re-add periodic timers */ - if (ptr->it.alarm.interval.tv64) { + if (ptr->it.alarm.interval) { ptr->it_overrun += alarm_forward(alarm, now, ptr->it.alarm.interval); result = ALARMTIMER_RESTART; @@ -730,7 +730,7 @@ static int update_rmtp(ktime_t exp, enum alarmtimer_type type, rem = ktime_sub(exp, alarm_bases[type].gettime()); - if (rem.tv64 <= 0) + if (rem <= 0) return 0; rmt = ktime_to_timespec(rem); @@ -755,7 +755,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) struct alarm alarm; int ret = 0; - exp.tv64 = restart->nanosleep.expires; + exp = restart->nanosleep.expires; alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); if (alarmtimer_do_nsleep(&alarm, exp)) @@ -835,7 +835,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, restart = ¤t->restart_block; restart->fn = alarm_timer_nsleep_restart; restart->nanosleep.clockid = type; - restart->nanosleep.expires = exp.tv64; + restart->nanosleep.expires = exp; restart->nanosleep.rmtp = rmtp; ret = -ERESTART_RESTARTBLOCK; diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 2c5bc77c0bb0..97ac0951f164 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -179,7 +179,7 @@ void clockevents_switch_state(struct clock_event_device *dev, void clockevents_shutdown(struct clock_event_device *dev) { clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; } /** @@ -213,7 +213,7 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev) if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { printk_deferred(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; return -ETIME; } @@ -310,7 +310,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, int64_t delta; int rc; - if (unlikely(expires.tv64 < 0)) { + if (unlikely(expires < 0)) { WARN_ON_ONCE(1); return -ETIME; } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 161e340395d5..c7f780113884 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -171,7 +171,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) return 0; expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); - return expires.tv64 <= new_base->cpu_base->expires_next.tv64; + return expires <= new_base->cpu_base->expires_next; #else return 0; #endif @@ -313,7 +313,7 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) * We use KTIME_SEC_MAX here, the maximum timeout which we can * return to user space in a timespec: */ - if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) + if (res < 0 || res < lhs || res < rhs) res = ktime_set(KTIME_SEC_MAX, 0); return res; @@ -465,8 +465,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) { struct hrtimer_clock_base *base = cpu_base->clock_base; - ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; unsigned int active = cpu_base->active_bases; + ktime_t expires, expires_next = KTIME_MAX; hrtimer_update_next_timer(cpu_base, NULL); for (; active; base++, active >>= 1) { @@ -479,7 +479,7 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) next = timerqueue_getnext(&base->active); timer = container_of(next, struct hrtimer, node); expires = ktime_sub(hrtimer_get_expires(timer), base->offset); - if (expires.tv64 < expires_next.tv64) { + if (expires < expires_next) { expires_next = expires; hrtimer_update_next_timer(cpu_base, timer); } @@ -489,8 +489,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) * the clock bases so the result might be negative. Fix it up * to prevent a false positive in clockevents_program_event(). */ - if (expires_next.tv64 < 0) - expires_next.tv64 = 0; + if (expires_next < 0) + expires_next = 0; return expires_next; } #endif @@ -561,10 +561,10 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) expires_next = __hrtimer_get_next_event(cpu_base); - if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) + if (skip_equal && expires_next == cpu_base->expires_next) return; - cpu_base->expires_next.tv64 = expires_next.tv64; + cpu_base->expires_next = expires_next; /* * If a hang was detected in the last timer interrupt then we @@ -622,10 +622,10 @@ static void hrtimer_reprogram(struct hrtimer *timer, * CLOCK_REALTIME timer might be requested with an absolute * expiry time which is less than base->offset. Set it to 0. */ - if (expires.tv64 < 0) - expires.tv64 = 0; + if (expires < 0) + expires = 0; - if (expires.tv64 >= cpu_base->expires_next.tv64) + if (expires >= cpu_base->expires_next) return; /* Update the pointer to the next expiring timer */ @@ -653,7 +653,7 @@ static void hrtimer_reprogram(struct hrtimer *timer, */ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { - base->expires_next.tv64 = KTIME_MAX; + base->expires_next = KTIME_MAX; base->hres_active = 0; } @@ -827,21 +827,21 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) delta = ktime_sub(now, hrtimer_get_expires(timer)); - if (delta.tv64 < 0) + if (delta < 0) return 0; if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) return 0; - if (interval.tv64 < hrtimer_resolution) - interval.tv64 = hrtimer_resolution; + if (interval < hrtimer_resolution) + interval = hrtimer_resolution; - if (unlikely(delta.tv64 >= interval.tv64)) { + if (unlikely(delta >= interval)) { s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); hrtimer_add_expires_ns(timer, incr * orun); - if (hrtimer_get_expires_tv64(timer) > now.tv64) + if (hrtimer_get_expires_tv64(timer) > now) return orun; /* * This (and the ktime_add() below) is the @@ -1104,7 +1104,7 @@ u64 hrtimer_get_next_event(void) raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!__hrtimer_hres_active(cpu_base)) - expires = __hrtimer_get_next_event(cpu_base).tv64; + expires = __hrtimer_get_next_event(cpu_base); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); @@ -1296,7 +1296,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) * are right-of a not yet expired timer, because that * timer will have to trigger a wakeup anyway. */ - if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) + if (basenow < hrtimer_get_softexpires_tv64(timer)) break; __run_hrtimer(cpu_base, base, timer, &basenow); @@ -1318,7 +1318,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; raw_spin_lock(&cpu_base->lock); entry_time = now = hrtimer_update_base(cpu_base); @@ -1331,7 +1331,7 @@ retry: * timers which run their callback and need to be requeued on * this CPU. */ - cpu_base->expires_next.tv64 = KTIME_MAX; + cpu_base->expires_next = KTIME_MAX; __hrtimer_run_queues(cpu_base, now); @@ -1379,13 +1379,13 @@ retry: cpu_base->hang_detected = 1; raw_spin_unlock(&cpu_base->lock); delta = ktime_sub(now, entry_time); - if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) - cpu_base->max_hang_time = (unsigned int) delta.tv64; + if ((unsigned int)delta > cpu_base->max_hang_time) + cpu_base->max_hang_time = (unsigned int) delta; /* * Limit it to a sensible value as we enforce a longer * delay. Give the CPU at least 100ms to catch up. */ - if (delta.tv64 > 100 * NSEC_PER_MSEC) + if (delta > 100 * NSEC_PER_MSEC) expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); else expires_next = ktime_add(now, delta); @@ -1495,7 +1495,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) ktime_t rem; rem = hrtimer_expires_remaining(timer); - if (rem.tv64 <= 0) + if (rem <= 0) return 0; rmt = ktime_to_timespec(rem); @@ -1693,7 +1693,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, * Optimize when a zero timeout value is given. It does not * matter whether this is an absolute or a relative time. */ - if (expires && !expires->tv64) { + if (expires && *expires == 0) { __set_current_state(TASK_RUNNING); return 0; } diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index a45afb7277c2..8c89143f9ebf 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -34,10 +34,10 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) * then we return 0 - which is correct. */ if (hrtimer_active(timer)) { - if (rem.tv64 <= 0) - rem.tv64 = NSEC_PER_USEC; + if (rem <= 0) + rem = NSEC_PER_USEC; } else - rem.tv64 = 0; + rem = 0; return ktime_to_timeval(rem); } @@ -216,12 +216,12 @@ again: goto again; } expires = timeval_to_ktime(value->it_value); - if (expires.tv64 != 0) { + if (expires != 0) { tsk->signal->it_real_incr = timeval_to_ktime(value->it_interval); hrtimer_start(timer, expires, HRTIMER_MODE_REL); } else - tsk->signal->it_real_incr.tv64 = 0; + tsk->signal->it_real_incr = 0; trace_itimer_state(ITIMER_REAL, value, 0); spin_unlock_irq(&tsk->sighand->siglock); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 6df8927c58a5..edf19cc53140 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -381,7 +381,7 @@ ktime_t ntp_get_next_leap(void) if ((time_state == TIME_INS) && (time_status & STA_INS)) return ktime_set(ntp_next_leap_sec, 0); - ret.tv64 = KTIME_MAX; + ret = KTIME_MAX; return ret; } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 42d7b9558741..9fe98b3777a2 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -359,7 +359,7 @@ static void schedule_next_timer(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; - if (timr->it.real.interval.tv64 == 0) + if (timr->it.real.interval == 0) return; timr->it_overrun += (unsigned int) hrtimer_forward(timer, @@ -449,7 +449,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); - if (timr->it.real.interval.tv64 != 0) + if (timr->it.real.interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { @@ -458,7 +458,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) * we will not get a call back to restart it AND * it should be restarted. */ - if (timr->it.real.interval.tv64 != 0) { + if (timr->it.real.interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* @@ -487,7 +487,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); - if (timr->it.real.interval.tv64 < kj.tv64) + if (timr->it.real.interval < kj) now = ktime_add(now, kj); } #endif @@ -743,7 +743,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) iv = timr->it.real.interval; /* interval timer ? */ - if (iv.tv64) + if (iv) cur_setting->it_interval = ktime_to_timespec(iv); else if (!hrtimer_active(timer) && (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) @@ -756,13 +756,13 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) * timer move the expiry time forward by intervals, so * expiry is > now. */ - if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || - (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) + if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || + (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); remaining = __hrtimer_expires_remaining_adjusted(timer, now); /* Return 0 only, when the timer is expired and not pending */ - if (remaining.tv64 <= 0) { + if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! @@ -839,7 +839,7 @@ common_timer_set(struct k_itimer *timr, int flags, common_timer_get(timr, old_setting); /* disable the timer */ - timr->it.real.interval.tv64 = 0; + timr->it.real.interval = 0; /* * careful here. If smp we could be in the "fire" routine which will * be spinning as we hold the lock. But this is ONLY an SMP issue. @@ -924,7 +924,7 @@ retry: static int common_timer_del(struct k_itimer *timer) { - timer->it.real.interval.tv64 = 0; + timer->it.real.interval = 0; if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) return TIMER_RETRY; diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 690b797f522e..a7bb8f33ae07 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -97,7 +97,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) - if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) + if (ce_broadcast_hrtimer.next_event != KTIME_MAX) return HRTIMER_RESTART; return HRTIMER_NORESTART; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index d2a20e83ebae..3109204c87cc 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -604,14 +604,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) bool bc_local; raw_spin_lock(&tick_broadcast_lock); - dev->next_event.tv64 = KTIME_MAX; - next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; + next_event = KTIME_MAX; cpumask_clear(tmpmask); now = ktime_get(); /* Find all expired events */ for_each_cpu(cpu, tick_broadcast_oneshot_mask) { td = &per_cpu(tick_cpu_device, cpu); - if (td->evtdev->next_event.tv64 <= now.tv64) { + if (td->evtdev->next_event <= now) { cpumask_set_cpu(cpu, tmpmask); /* * Mark the remote cpu in the pending mask, so @@ -619,8 +619,8 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) * timer in tick_broadcast_oneshot_control(). */ cpumask_set_cpu(cpu, tick_broadcast_pending_mask); - } else if (td->evtdev->next_event.tv64 < next_event.tv64) { - next_event.tv64 = td->evtdev->next_event.tv64; + } else if (td->evtdev->next_event < next_event) { + next_event = td->evtdev->next_event; next_cpu = cpu; } } @@ -657,7 +657,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) * - There are pending events on sleeping CPUs which were not * in the event mask */ - if (next_event.tv64 != KTIME_MAX) + if (next_event != KTIME_MAX) tick_broadcast_set_event(dev, next_cpu, next_event); raw_spin_unlock(&tick_broadcast_lock); @@ -672,7 +672,7 @@ static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) { if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) return 0; - if (bc->next_event.tv64 == KTIME_MAX) + if (bc->next_event == KTIME_MAX) return 0; return bc->bound_on == cpu ? -EBUSY : 0; } @@ -688,7 +688,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { if (broadcast_needs_cpu(bc, smp_processor_id())) return; - if (dev->next_event.tv64 < bc->next_event.tv64) + if (dev->next_event < bc->next_event) return; } clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); @@ -754,7 +754,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) */ if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { ret = -EBUSY; - } else if (dev->next_event.tv64 < bc->next_event.tv64) { + } else if (dev->next_event < bc->next_event) { tick_broadcast_set_event(bc, cpu, dev->next_event); /* * In case of hrtimer broadcasts the @@ -789,7 +789,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) /* * Bail out if there is no next event. */ - if (dev->next_event.tv64 == KTIME_MAX) + if (dev->next_event == KTIME_MAX) goto out; /* * If the pending bit is not set, then we are @@ -824,7 +824,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) * nohz fixups. */ now = ktime_get(); - if (dev->next_event.tv64 <= now.tv64) { + if (dev->next_event <= now) { cpumask_set_cpu(cpu, tick_broadcast_force_mask); goto out; } @@ -897,7 +897,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_next_period); tick_broadcast_set_event(bc, cpu, tick_next_period); } else - bc->next_event.tv64 = KTIME_MAX; + bc->next_event = KTIME_MAX; } else { /* * The first cpu which switches to oneshot mode sets diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index b51344652330..6b009c207671 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -28,7 +28,7 @@ int tick_program_event(ktime_t expires, int force) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); - if (unlikely(expires.tv64 == KTIME_MAX)) { + if (unlikely(expires == KTIME_MAX)) { /* * We don't need the clock event device any more, stop it. */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 71496a20e670..2c115fdab397 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -58,21 +58,21 @@ static void tick_do_update_jiffies64(ktime_t now) * Do a quick check without holding jiffies_lock: */ delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 < tick_period.tv64) + if (delta < tick_period) return; /* Reevaluate with jiffies_lock held */ write_seqlock(&jiffies_lock); delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 >= tick_period.tv64) { + if (delta >= tick_period) { delta = ktime_sub(delta, tick_period); last_jiffies_update = ktime_add(last_jiffies_update, tick_period); /* Slow path for long timeouts */ - if (unlikely(delta.tv64 >= tick_period.tv64)) { + if (unlikely(delta >= tick_period)) { s64 incr = ktime_to_ns(tick_period); ticks = ktime_divns(delta, incr); @@ -101,7 +101,7 @@ static ktime_t tick_init_jiffy_update(void) write_seqlock(&jiffies_lock); /* Did we start the jiffies update yet ? */ - if (last_jiffies_update.tv64 == 0) + if (last_jiffies_update == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; write_sequnlock(&jiffies_lock); @@ -669,7 +669,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&jiffies_lock); - basemono = last_jiffies_update.tv64; + basemono = last_jiffies_update; basejiff = jiffies; } while (read_seqretry(&jiffies_lock, seq)); ts->last_jiffies = basejiff; @@ -697,7 +697,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { - tick.tv64 = 0; + tick = 0; /* * Tell the timer code that the base is not idle, i.e. undo @@ -764,10 +764,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, expires = KTIME_MAX; expires = min_t(u64, expires, next_tick); - tick.tv64 = expires; + tick = expires; /* Skip reprogram of event if its not changed */ - if (ts->tick_stopped && (expires == dev->next_event.tv64)) + if (ts->tick_stopped && (expires == dev->next_event)) goto out; /* @@ -864,7 +864,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { - ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; + ts->sleep_length = NSEC_PER_SEC / HZ; return false; } @@ -914,7 +914,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) ts->idle_calls++; expires = tick_nohz_stop_sched_tick(ts, now, cpu); - if (expires.tv64 > 0LL) { + if (expires > 0LL) { ts->idle_sleeps++; ts->idle_expires = expires; } @@ -1051,7 +1051,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; tick_sched_do_timer(now); tick_sched_handle(ts, regs); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f4152a69277f..db087d7e106d 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -104,7 +104,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) */ set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, -tk->wall_to_monotonic.tv_nsec); - WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); + WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); tk->wall_to_monotonic = wtm; set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); tk->offs_real = timespec64_to_ktime(tmp); @@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); static inline void tk_update_leap_state(struct timekeeper *tk) { tk->next_leap_ktime = ntp_get_next_leap(); - if (tk->next_leap_ktime.tv64 != KTIME_MAX) + if (tk->next_leap_ktime != KTIME_MAX) /* Convert to monotonic time */ tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); } @@ -2250,7 +2250,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, } /* Handle leapsecond insertion adjustments */ - if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64)) + if (unlikely(base >= tk->next_leap_ktime)) *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); } while (read_seqcount_retry(&tk_core.seq, seq)); |