diff options
author | Paul Turner <pjt@google.com> | 2011-07-21 09:43:37 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-08-14 12:03:42 +0200 |
commit | 5238cdd3873e67a98b28c1161d65d2a615c320a3 (patch) | |
tree | 6c0e162a8ef9e0eb4d0099b537cfde5138aeddb4 /kernel | |
parent | 64660c864f46202b932b911a69deb09805bdbaf8 (diff) | |
download | linux-5238cdd3873e67a98b28c1161d65d2a615c320a3.tar.gz linux-5238cdd3873e67a98b28c1161d65d2a615c320a3.tar.bz2 linux-5238cdd3873e67a98b28c1161d65d2a615c320a3.zip |
sched: Prevent buddy interactions with throttled entities
Buddies allow us to select "on-rq" entities without actually selecting them
from a cfs_rq's rb_tree. As a result we must ensure that throttled entities
are not falsely nominated as buddies. The fact that entities are dequeued
within throttle_entity is not sufficient for clearing buddy status as the
nomination may occur after throttling.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.886850167@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 18 |
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5a2089492a98..1d4acbea9e60 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2348,6 +2348,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (unlikely(se == pse)) return; + /* + * This is possible from callers such as pull_task(), in which we + * unconditionally check_prempt_curr() after an enqueue (which may have + * lead to a throttle). This both saves work and prevents false + * next-buddy nomination below. + */ + if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) + return; + if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { set_next_buddy(pse); next_buddy_marked = 1; @@ -2356,6 +2365,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. + * + * Note: this also catches the edge-case of curr being in a throttled + * group (e.g. via set_curr_task), since update_curr() (in the + * enqueue of curr) will have resulted in resched being set. This + * prevents us from potentially nominating it as a false LAST_BUDDY + * below. */ if (test_tsk_need_resched(curr)) return; @@ -2474,7 +2489,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp { struct sched_entity *se = &p->se; - if (!se->on_rq) + /* throttled hierarchies are not runnable */ + if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) return false; /* Tell the scheduler that we'd really like pse to run next. */ |