summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-03-24 11:43:21 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-04-16 17:06:34 +0200
commit8a99b6833c884fa0e7919030d93fecedc69fc625 (patch)
treec32fe9bce39f0fd380b7da2d51c016663abca632 /kernel/sched
parentd86ba831656611872e4939b895503ddac63d8196 (diff)
downloadlinux-stable-8a99b6833c884fa0e7919030d93fecedc69fc625.tar.gz
linux-stable-8a99b6833c884fa0e7919030d93fecedc69fc625.tar.bz2
linux-stable-8a99b6833c884fa0e7919030d93fecedc69fc625.zip
sched: Move SCHED_DEBUG sysctl to debugfs
Stop polluting sysctl with undocumented knobs that really are debug only, move them all to /debug/sched/ along with the existing /debug/sched_* files that already exist. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210412102001.287610138@infradead.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/debug.c74
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/sched.h2
4 files changed, 77 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7d031da20df3..bac30db29d3f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5504,9 +5504,11 @@ static const struct file_operations sched_dynamic_fops = {
.release = single_release,
};
+extern struct dentry *debugfs_sched;
+
static __init int sched_init_debug_dynamic(void)
{
- debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops);
+ debugfs_create_file("sched_preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
return 0;
}
late_initcall(sched_init_debug_dynamic);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4b49cc2af5c4..2093b9086e34 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -169,15 +169,81 @@ static const struct file_operations sched_feat_fops = {
.release = single_release,
};
+#ifdef CONFIG_SMP
+
+static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[16];
+
+ if (cnt > 15)
+ cnt = 15;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
+ return -EINVAL;
+
+ if (sched_update_scaling())
+ return -EINVAL;
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static int sched_scaling_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
+ return 0;
+}
+
+static int sched_scaling_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_scaling_show, NULL);
+}
+
+static const struct file_operations sched_scaling_fops = {
+ .open = sched_scaling_open,
+ .write = sched_scaling_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* SMP */
+
__read_mostly bool sched_debug_enabled;
+struct dentry *debugfs_sched;
+
static __init int sched_init_debug(void)
{
- debugfs_create_file("sched_features", 0644, NULL, NULL,
- &sched_feat_fops);
+ struct dentry __maybe_unused *numa;
- debugfs_create_bool("sched_debug", 0644, NULL,
- &sched_debug_enabled);
+ debugfs_sched = debugfs_create_dir("sched", NULL);
+
+ debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ debugfs_create_bool("debug_enabled", 0644, debugfs_sched, &sched_debug_enabled);
+
+ debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
+
+#ifdef CONFIG_SMP
+ debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
+ debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
+ debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
+#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+ numa = debugfs_create_dir("numa_balancing", debugfs_sched);
+
+ debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
+ debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
+ debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
+ debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
+#endif
return 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9b8ae02f1994..b3ea14c08a9b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -49,7 +49,7 @@ static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
*
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/
-enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
/*
* Minimal preemption granularity for CPU-bound tasks:
@@ -634,15 +634,10 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
-int sched_proc_update_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+int sched_update_scaling(void)
{
- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
unsigned int factor = get_update_sysctl_factor();
- if (ret || !write)
- return ret;
-
sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
sysctl_sched_min_granularity);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7e7e936b4938..123ff3bb90bc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1568,6 +1568,8 @@ static inline void unregister_sched_domain_sysctl(void)
}
#endif
+extern int sched_update_scaling(void);
+
extern void flush_smp_call_function_from_idle(void);
#else /* !CONFIG_SMP: */