summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorValentin Schneider <valentin.schneider@arm.com>2021-11-12 18:52:01 +0000
committerPeter Zijlstra <peterz@infradead.org>2022-04-05 10:24:42 +0200
commitcfe43f478b79ba45573ca22d52d0d8823be068fa (patch)
tree57e4b23a2770f3889fa8f58ff7296d1bc360b88a
parent3123109284176b1532874591f7c81f3837bbdc17 (diff)
downloadlinux-cfe43f478b79ba45573ca22d52d0d8823be068fa.tar.gz
linux-cfe43f478b79ba45573ca22d52d0d8823be068fa.tar.bz2
linux-cfe43f478b79ba45573ca22d52d0d8823be068fa.zip
preempt/dynamic: Introduce preemption model accessors
CONFIG_PREEMPT{_NONE, _VOLUNTARY} designate either: o The build-time preemption model when !PREEMPT_DYNAMIC o The default boot-time preemption model when PREEMPT_DYNAMIC IOW, using those on PREEMPT_DYNAMIC kernels is meaningless - the actual model could have been set to something else by the "preempt=foo" cmdline parameter. Same problem applies to CONFIG_PREEMPTION. Introduce a set of helpers to determine the actual preemption model used by the live kernel. Suggested-by: Marco Elver <elver@google.com> Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Marco Elver <elver@google.com> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20211112185203.280040-3-valentin.schneider@arm.com
-rw-r--r--include/linux/sched.h41
-rw-r--r--kernel/sched/core.c12
2 files changed, 53 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d5e3c00b74e1..67f06f72c50e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2117,6 +2117,47 @@ static inline void cond_resched_rcu(void)
#endif
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+
+#else
+
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
+#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
+}
+
+/*
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
+ */
+static inline bool preempt_model_preemptible(void)
+{
+ return preempt_model_full() || preempt_model_rt();
+}
+
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d575b4914925..068c088e9584 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8409,6 +8409,18 @@ static void __init preempt_dynamic_init(void)
}
}
+#define PREEMPT_MODEL_ACCESSOR(mode) \
+ bool preempt_model_##mode(void) \
+ { \
+ WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
+ return preempt_dynamic_mode == preempt_dynamic_##mode; \
+ } \
+ EXPORT_SYMBOL_GPL(preempt_model_##mode)
+
+PREEMPT_MODEL_ACCESSOR(none);
+PREEMPT_MODEL_ACCESSOR(voluntary);
+PREEMPT_MODEL_ACCESSOR(full);
+
#else /* !CONFIG_PREEMPT_DYNAMIC */
static inline void preempt_dynamic_init(void) { }