summaryrefslogtreecommitdiffstats
path: root/drivers/pps/generators
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-02-06 15:40:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-06 18:32:46 -0800
commit28f3a488ed83ac4a01406490941a6486806d1333 (patch)
tree7f6df93c01d2c71460d3d4ac55af6f0913c53918 /drivers/pps/generators
parent2ee0826085d1c0281cb60c1f4bc3e0c27efeedc3 (diff)
downloadlinux-stable-28f3a488ed83ac4a01406490941a6486806d1333.tar.gz
linux-stable-28f3a488ed83ac4a01406490941a6486806d1333.tar.bz2
linux-stable-28f3a488ed83ac4a01406490941a6486806d1333.zip
pps: parport: use timespec64 instead of timespec
getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pps/generators')
-rw-r--r--drivers/pps/generators/pps_gen_parport.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index dcd39fba6ddd..51cfde6afffd 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -70,7 +70,7 @@ static long hrtimer_error = SAFETY_INTERVAL;
/* the kernel hrtimer event */
static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
{
- struct timespec expire_time, ts1, ts2, ts3, dts;
+ struct timespec64 expire_time, ts1, ts2, ts3, dts;
struct pps_generator_pp *dev;
struct parport *port;
long lim, delta;
@@ -78,7 +78,7 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
/* We have to disable interrupts here. The idea is to prevent
* other interrupts on the same processor to introduce random
- * lags while polling the clock. getnstimeofday() takes <1us on
+ * lags while polling the clock. ktime_get_real_ts64() takes <1us on
* most machines while other interrupt handlers can take much
* more potentially.
*
@@ -88,22 +88,22 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
local_irq_save(flags);
/* first of all we get the time stamp... */
- getnstimeofday(&ts1);
- expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer));
+ ktime_get_real_ts64(&ts1);
+ expire_time = ktime_to_timespec64(hrtimer_get_softexpires(timer));
dev = container_of(timer, struct pps_generator_pp, timer);
lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
/* check if we are late */
if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
local_irq_restore(flags);
- pr_err("we are late this time %ld.%09ld\n",
- ts1.tv_sec, ts1.tv_nsec);
+ pr_err("we are late this time %lld.%09ld\n",
+ (s64)ts1.tv_sec, ts1.tv_nsec);
goto done;
}
/* busy loop until the time is right for an assert edge */
do {
- getnstimeofday(&ts2);
+ ktime_get_real_ts64(&ts2);
} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
/* set the signal */
@@ -113,25 +113,25 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
/* busy loop until the time is right for a clear edge */
lim = NSEC_PER_SEC - dev->port_write_time;
do {
- getnstimeofday(&ts2);
+ ktime_get_real_ts64(&ts2);
} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
/* unset the signal */
port->ops->write_control(port, NO_SIGNAL);
- getnstimeofday(&ts3);
+ ktime_get_real_ts64(&ts3);
local_irq_restore(flags);
/* update calibrated port write time */
- dts = timespec_sub(ts3, ts2);
+ dts = timespec64_sub(ts3, ts2);
dev->port_write_time =
- (dev->port_write_time + timespec_to_ns(&dts)) >> 1;
+ (dev->port_write_time + timespec64_to_ns(&dts)) >> 1;
done:
/* update calibrated hrtimer error */
- dts = timespec_sub(ts1, expire_time);
- delta = timespec_to_ns(&dts);
+ dts = timespec64_sub(ts1, expire_time);
+ delta = timespec64_to_ns(&dts);
/* If the new error value is bigger then the old, use the new
* value, if not then slowly move towards the new value. This
* way it should be safe in bad conditions and efficient in
@@ -161,17 +161,17 @@ static void calibrate_port(struct pps_generator_pp *dev)
long acc = 0;
for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
- struct timespec a, b;
+ struct timespec64 a, b;
unsigned long irq_flags;
local_irq_save(irq_flags);
- getnstimeofday(&a);
+ ktime_get_real_ts64(&a);
port->ops->write_control(port, NO_SIGNAL);
- getnstimeofday(&b);
+ ktime_get_real_ts64(&b);
local_irq_restore(irq_flags);
- b = timespec_sub(b, a);
- acc += timespec_to_ns(&b);
+ b = timespec64_sub(b, a);
+ acc += timespec64_to_ns(&b);
}
dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
@@ -180,9 +180,9 @@ static void calibrate_port(struct pps_generator_pp *dev)
static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_real_ts64(&ts);
return ktime_set(ts.tv_sec +
((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),