summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-11 09:30:22 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-11 09:30:22 +0200
commit1b12bbc747560ea68bcc132c3d05699e52271da0 (patch)
tree0e0fe5b7fe07d411251eebdd053e9e7793820248 /kernel/sched_rt.c
parent64aa348edc617dea17bbd01ddee4e47886d5ec8c (diff)
downloadlinux-1b12bbc747560ea68bcc132c3d05699e52271da0.tar.gz
linux-1b12bbc747560ea68bcc132c3d05699e52271da0.tar.bz2
linux-1b12bbc747560ea68bcc132c3d05699e52271da0.zip
lockdep: re-annotate scheduler runqueues
Instead of using a per-rq lock class, use the regular nesting operations. However, take extra care with double_lock_balance() as it can release the already held rq->lock (and therefore change its nesting class). So what can happen is: spin_lock(rq->lock); // this rq subclass 0 double_lock_balance(rq, other_rq); // release rq // acquire other_rq->lock subclass 0 // acquire rq->lock subclass 1 spin_unlock(other_rq->lock); leaving you with rq->lock in subclass 1 So a subsequent double_lock_balance() call can try to nest a subclass 1 lock while already holding a subclass 1 lock. Fix this by introducing double_unlock_balance() which releases the other rq's lock, but also re-sets the subclass for this rq's lock to 0. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 908c04f9dad0..6163e4cf885b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
#define RT_MAX_TRIES 3
static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
+
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
break;
/* try again */
- spin_unlock(&lowest_rq->lock);
+ double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
}
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
resched_task(lowest_rq->curr);
- spin_unlock(&lowest_rq->lock);
+ double_unlock_balance(rq, lowest_rq);
ret = 1;
out:
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
}
skip:
- spin_unlock(&src_rq->lock);
+ double_unlock_balance(this_rq, src_rq);
}
return ret;