diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-09-20 22:00:38 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-09-30 11:03:28 +0200 |
commit | b2bf6c314e3a9e227925240d92ecd6e9b0110170 (patch) | |
tree | 4f412f1a68fb373542aea0fc62ba7de4577ad8b2 /kernel/sched | |
parent | a458ae2ea616420f74480f0f5ed67ca0f3b5dbf7 (diff) | |
download | linux-b2bf6c314e3a9e227925240d92ecd6e9b0110170.tar.gz linux-b2bf6c314e3a9e227925240d92ecd6e9b0110170.tar.bz2 linux-b2bf6c314e3a9e227925240d92ecd6e9b0110170.zip |
sched/fair: Introduce set_curr_task() helper
Now that the ia64 only set_curr_task() symbol is gone, provide a
helper just like put_prev_task().
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 10 | ||||
-rw-r--r-- | kernel/sched/sched.h | 5 |
2 files changed, 10 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6ec0cfe56ddd..ce69fc7eaf19 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1112,7 +1112,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); if (running) - p->sched_class->set_curr_task(rq); + set_curr_task(rq, p); } /* @@ -3710,7 +3710,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (queued) enqueue_task(rq, p, queue_flag); if (running) - p->sched_class->set_curr_task(rq); + set_curr_task(rq, p); check_class_changed(rq, p, prev_class, oldprio); out_unlock: @@ -4274,7 +4274,7 @@ change: enqueue_task(rq, p, queue_flags); } if (running) - p->sched_class->set_curr_task(rq); + set_curr_task(rq, p); check_class_changed(rq, p, prev_class, oldprio); preempt_disable(); /* avoid rq from going away on us */ @@ -5442,7 +5442,7 @@ void sched_setnuma(struct task_struct *p, int nid) if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); if (running) - p->sched_class->set_curr_task(rq); + set_curr_task(rq, p); task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_NUMA_BALANCING */ @@ -7952,7 +7952,7 @@ void sched_move_task(struct task_struct *tsk) if (queued) enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); if (unlikely(running)) - tsk->sched_class->set_curr_task(rq); + set_curr_task(rq, tsk); task_rq_unlock(rq, tsk, &rf); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 01b5189235f2..fc6ae04ec080 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1274,6 +1274,11 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) prev->sched_class->put_prev_task(rq, prev); } +static inline void set_curr_task(struct rq *rq, struct task_struct *curr) +{ + curr->sched_class->set_curr_task(rq); +} + #define sched_class_highest (&stop_sched_class) #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) |