From 00b89fe0197f0c55a045775c11553c0cdb7082fe Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Mon, 10 May 2021 16:10:23 +0100 Subject: sched: Make the idle task quack like a per-CPU kthread For all intents and purposes, the idle task is a per-CPU kthread. It isn't created via the same route as other pcpu kthreads however, and as a result it is missing a few bells and whistles: it fails kthread_is_per_cpu() and it doesn't have PF_NO_SETAFFINITY set. Fix the former by giving the idle task a kthread struct along with the KTHREAD_IS_PER_CPU flag. This requires some extra iffery as init_idle() call be called more than once on the same idle task. Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210510151024.2448573-2-valentin.schneider@arm.com --- kernel/kthread.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) (limited to 'kernel/kthread.c') diff --git a/kernel/kthread.c b/kernel/kthread.c index fe3f2a40d61e..3d326833092b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -68,16 +68,6 @@ enum KTHREAD_BITS { KTHREAD_SHOULD_PARK, }; -static inline void set_kthread_struct(void *kthread) -{ - /* - * We abuse ->set_child_tid to avoid the new member and because it - * can't be wrongly copied by copy_process(). We also rely on fact - * that the caller can't exec, so PF_KTHREAD can't be cleared. - */ - current->set_child_tid = (__force void __user *)kthread; -} - static inline struct kthread *to_kthread(struct task_struct *k) { WARN_ON(!(k->flags & PF_KTHREAD)); @@ -103,6 +93,22 @@ static inline struct kthread *__to_kthread(struct task_struct *p) return kthread; } +void set_kthread_struct(struct task_struct *p) +{ + struct kthread *kthread; + + if (__to_kthread(p)) + return; + + kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); + /* + * We abuse ->set_child_tid to avoid the new member and because it + * can't be wrongly copied by copy_process(). We also rely on fact + * that the caller can't exec, so PF_KTHREAD can't be cleared. + */ + p->set_child_tid = (__force void __user *)kthread; +} + void free_kthread_struct(struct task_struct *k) { struct kthread *kthread; @@ -272,8 +278,8 @@ static int kthread(void *_create) struct kthread *self; int ret; - self = kzalloc(sizeof(*self), GFP_KERNEL); - set_kthread_struct(self); + set_kthread_struct(current); + self = to_kthread(current); /* If user was SIGKILLed, I release the structure. */ done = xchg(&create->done, NULL); -- cgit v1.2.3 From 2f064a59a11ff9bc22e52e9678bc601404c7cb34 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Jun 2021 10:28:17 +0200 Subject: sched: Change task_struct::state Change the type and name of task_struct::state. Drop the volatile and shrink it to an 'unsigned int'. Rename it in order to find all uses such that we can use READ_ONCE/WRITE_ONCE as appropriate. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Daniel Bristot de Oliveira Acked-by: Will Deacon Acked-by: Daniel Thompson Link: https://lore.kernel.org/r/20210611082838.550736351@infradead.org --- kernel/kthread.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/kthread.c') diff --git a/kernel/kthread.c b/kernel/kthread.c index 3d326833092b..7bbfeeb0e956 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -457,7 +457,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), } EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) +static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state) { unsigned long flags; @@ -473,7 +473,7 @@ static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mas raw_spin_unlock_irqrestore(&p->pi_lock, flags); } -static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) +static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state) { __kthread_bind_mask(p, cpumask_of(cpu), state); } -- cgit v1.2.3