diff options
author | Shang XiaoJing <shangxiaojing@huawei.com> | 2022-08-26 16:34:53 +0800 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-09-01 11:19:54 +0200 |
commit | 973bee493a1f75c6c0752a74fb9396cbc34f026e (patch) | |
tree | 93ecd46ffe43e493a0667b26e4f4a3843da2763f /kernel/sched | |
parent | 53aa930dc4bae6aa269951bd37103083145d6691 (diff) | |
download | linux-stable-973bee493a1f75c6c0752a74fb9396cbc34f026e.tar.gz linux-stable-973bee493a1f75c6c0752a74fb9396cbc34f026e.tar.bz2 linux-stable-973bee493a1f75c6c0752a74fb9396cbc34f026e.zip |
sched/deadline: Add dl_task_is_earliest_deadline helper
Wrap repeated code in helper function dl_task_is_earliest_deadline, which
return true if there is no deadline task on the rq at all, or task's
deadline earlier than the whole rq.
Signed-off-by: Shang XiaoJing <shangxiaojing@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Daniel Bristot de Oliveira <bristot@kernel.org>
Link: https://lore.kernel.org/r/20220826083453.698-1-shangxiaojing@huawei.com
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/deadline.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d116d2b9d2f9..34271aff4712 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1810,6 +1810,14 @@ static void yield_task_dl(struct rq *rq) #ifdef CONFIG_SMP +static inline bool dl_task_is_earliest_deadline(struct task_struct *p, + struct rq *rq) +{ + return (!rq->dl.dl_nr_running || + dl_time_before(p->dl.deadline, + rq->dl.earliest_dl.curr)); +} + static int find_later_rq(struct task_struct *task); static int @@ -1852,9 +1860,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags) int target = find_later_rq(p); if (target != -1 && - (dl_time_before(p->dl.deadline, - cpu_rq(target)->dl.earliest_dl.curr) || - (cpu_rq(target)->dl.dl_nr_running == 0))) + dl_task_is_earliest_deadline(p, cpu_rq(target))) cpu = target; } rcu_read_unlock(); @@ -2221,9 +2227,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) later_rq = cpu_rq(cpu); - if (later_rq->dl.dl_nr_running && - !dl_time_before(task->dl.deadline, - later_rq->dl.earliest_dl.curr)) { + if (!dl_task_is_earliest_deadline(task, later_rq)) { /* * Target rq has tasks of equal or earlier deadline, * retrying does not release any lock and is unlikely @@ -2251,9 +2255,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) * its earliest one has a later deadline than our * task, the rq is a good one. */ - if (!later_rq->dl.dl_nr_running || - dl_time_before(task->dl.deadline, - later_rq->dl.earliest_dl.curr)) + if (dl_task_is_earliest_deadline(task, later_rq)) break; /* Otherwise we try again. */ @@ -2424,9 +2426,7 @@ static void pull_dl_task(struct rq *this_rq) * - it will preempt the last one we pulled (if any). */ if (p && dl_time_before(p->dl.deadline, dmin) && - (!this_rq->dl.dl_nr_running || - dl_time_before(p->dl.deadline, - this_rq->dl.earliest_dl.curr))) { + dl_task_is_earliest_deadline(p, this_rq)) { WARN_ON(p == src_rq->curr); WARN_ON(!task_on_rq_queued(p)); |