summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c35
-rw-r--r--kernel/time/tick-broadcast.c8
-rw-r--r--kernel/time/tick-common.c3
-rw-r--r--kernel/time/timer.c14
-rw-r--r--kernel/time/timer_list.c18
5 files changed, 31 insertions, 47 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 84f37420fcf5..f89a78e2792b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -129,31 +129,19 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
spin_unlock_irqrestore(&watchdog_lock, *flags);
}
-static int clocksource_watchdog_kthread(void *data);
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
/*
* Interval: 0.5sec Threshold: 0.0625s
*/
#define WATCHDOG_INTERVAL (HZ >> 1)
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
-static void clocksource_watchdog_work(struct work_struct *work)
-{
- /*
- * If kthread_run fails the next watchdog scan over the
- * watchdog_list will find the unstable clock again.
- */
- kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
-}
-
static void __clocksource_unstable(struct clocksource *cs)
{
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE;
/*
- * If the clocksource is registered clocksource_watchdog_kthread() will
+ * If the clocksource is registered clocksource_watchdog_work() will
* re-rate and re-select.
*/
if (list_empty(&cs->list)) {
@@ -164,7 +152,7 @@ static void __clocksource_unstable(struct clocksource *cs)
if (cs->mark_unstable)
cs->mark_unstable(cs);
- /* kick clocksource_watchdog_kthread() */
+ /* kick clocksource_watchdog_work() */
if (finished_booting)
schedule_work(&watchdog_work);
}
@@ -174,7 +162,7 @@ static void __clocksource_unstable(struct clocksource *cs)
* @cs: clocksource to be marked unstable
*
* This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a kthread.
+ * it defers demotion and re-selection to a work.
*/
void clocksource_mark_unstable(struct clocksource *cs)
{
@@ -399,7 +387,9 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
}
}
-static int __clocksource_watchdog_kthread(void)
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
+static int __clocksource_watchdog_work(void)
{
struct clocksource *cs, *tmp;
unsigned long flags;
@@ -424,13 +414,12 @@ static int __clocksource_watchdog_kthread(void)
return select;
}
-static int clocksource_watchdog_kthread(void *data)
+static void clocksource_watchdog_work(struct work_struct *work)
{
mutex_lock(&clocksource_mutex);
- if (__clocksource_watchdog_kthread())
+ if (__clocksource_watchdog_work())
clocksource_select();
mutex_unlock(&clocksource_mutex);
- return 0;
}
static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -449,12 +438,12 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_kthread(void) { return 0; }
+static inline int __clocksource_watchdog_work(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { }
-static void inline clocksource_watchdog_lock(unsigned long *flags) { }
-static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
+static inline void clocksource_watchdog_lock(unsigned long *flags) { }
+static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
@@ -683,7 +672,7 @@ static int __init clocksource_done_booting(void)
/*
* Run the watchdog first to eliminate unstable clock sources
*/
- __clocksource_watchdog_kthread();
+ __clocksource_watchdog_work();
clocksource_select();
mutex_unlock(&clocksource_mutex);
return 0;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index b398c2ea69b2..aa2094d5dd27 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
now = ktime_get();
/* Find all expired events */
for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
+ /*
+ * Required for !SMP because for_each_cpu() reports
+ * unconditionally CPU0 as set on UP kernels.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) &&
+ cpumask_empty(tick_broadcast_oneshot_mask))
+ break;
+
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event <= now) {
cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 49edc1c4f3e6..78e598334007 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -277,7 +277,8 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
*/
return !curdev ||
newdev->rating > curdev->rating ||
- !cpumask_equal(curdev->cpumask, newdev->cpumask);
+ (!cpumask_equal(curdev->cpumask, newdev->cpumask) &&
+ !tick_check_percpu(curdev, newdev, smp_processor_id()));
}
/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 4a4fd567fb26..cc2d23e6ff61 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1251,18 +1251,18 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
*
* Note: For !irqsafe timers, you must not hold locks that are held in
* interrupt context while calling this function. Even if the lock has
- * nothing to do with the timer in question. Here's why:
+ * nothing to do with the timer in question. Here's why::
*
* CPU0 CPU1
* ---- ----
- * <SOFTIRQ>
- * call_timer_fn();
- * base->running_timer = mytimer;
- * spin_lock_irq(somelock);
+ * <SOFTIRQ>
+ * call_timer_fn();
+ * base->running_timer = mytimer;
+ * spin_lock_irq(somelock);
* <IRQ>
* spin_lock(somelock);
- * del_timer_sync(mytimer);
- * while (base->running_timer == mytimer);
+ * del_timer_sync(mytimer);
+ * while (base->running_timer == mytimer);
*
* Now del_timer_sync() will never return and never release somelock.
* The interrupt on the other CPU is waiting to grab somelock but
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 0ed768b56c60..d647dabdac97 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -28,8 +28,6 @@ struct timer_list_iter {
u64 now;
};
-typedef void (*print_fn_t)(struct seq_file *m, unsigned int *classes);
-
/*
* This allows printing both to /proc/timer_list and
* to the console (on SysRq-Q):
@@ -372,24 +370,12 @@ static const struct seq_operations timer_list_sops = {
.show = timer_list_show,
};
-static int timer_list_open(struct inode *inode, struct file *filp)
-{
- return seq_open_private(filp, &timer_list_sops,
- sizeof(struct timer_list_iter));
-}
-
-static const struct file_operations timer_list_fops = {
- .open = timer_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
-};
-
static int __init init_timer_list_procfs(void)
{
struct proc_dir_entry *pe;
- pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
+ pe = proc_create_seq_private("timer_list", 0400, NULL, &timer_list_sops,
+ sizeof(struct timer_list_iter), NULL);
if (!pe)
return -ENOMEM;
return 0;