summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-01-25 21:08:29 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:29 +0100
commitfa85ae2418e6843953107cd6a06f645752829bc0 (patch)
tree004130ac471247a29d3f6adfbfe61c474e725779 /kernel/sched_rt.c
parent8f4d37ec073c17e2d4aa8851df5837d798606d6f (diff)
downloadlinux-fa85ae2418e6843953107cd6a06f645752829bc0.tar.gz
linux-fa85ae2418e6843953107cd6a06f645752829bc0.tar.bz2
linux-fa85ae2418e6843953107cd6a06f645752829bc0.zip
sched: rt time limit
Very simple time limit on the realtime scheduling classes. Allow the rq's realtime class to consume sched_rt_ratio of every sched_rt_period slice. If the class exceeds this quota the fair class will preempt the realtime class. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c53
1 files changed, 53 insertions, 0 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 83fbbcb8019e..fd10d965aa06 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -45,6 +45,50 @@ static void update_rt_migration(struct rq *rq)
}
#endif /* CONFIG_SMP */
+static int sched_rt_ratio_exceeded(struct rq *rq, struct rt_rq *rt_rq)
+{
+ u64 period, ratio;
+
+ if (sysctl_sched_rt_ratio == SCHED_RT_FRAC)
+ return 0;
+
+ if (rt_rq->rt_throttled)
+ return 1;
+
+ period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
+ ratio = (period * sysctl_sched_rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+
+ if (rt_rq->rt_time > ratio) {
+ rt_rq->rt_throttled = rq->clock + period - rt_rq->rt_time;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void update_sched_rt_period(struct rq *rq)
+{
+ while (rq->clock > rq->rt_period_expire) {
+ u64 period, ratio;
+
+ period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
+ ratio = (period * sysctl_sched_rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+
+ rq->rt.rt_time -= min(rq->rt.rt_time, ratio);
+ rq->rt_period_expire += period;
+ }
+
+ /*
+ * When the rt throttle is expired, let them rip.
+ * (XXX: use hrtick when available)
+ */
+ if (rq->rt.rt_throttled && rq->clock > rq->rt.rt_throttled) {
+ rq->rt.rt_throttled = 0;
+ if (!sched_rt_ratio_exceeded(rq, &rq->rt))
+ resched_task(rq->curr);
+ }
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -66,6 +110,11 @@ static void update_curr_rt(struct rq *rq)
curr->se.sum_exec_runtime += delta_exec;
curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
+
+ rq->rt.rt_time += delta_exec;
+ update_sched_rt_period(rq);
+ if (sched_rt_ratio_exceeded(rq, &rq->rt))
+ resched_task(curr);
}
static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
@@ -208,8 +257,12 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
struct rt_prio_array *array = &rq->rt.active;
struct task_struct *next;
struct list_head *queue;
+ struct rt_rq *rt_rq = &rq->rt;
int idx;
+ if (sched_rt_ratio_exceeded(rq, rt_rq))
+ return NULL;
+
idx = sched_find_first_bit(array->bitmap);
if (idx >= MAX_RT_PRIO)
return NULL;