diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 |
3 files changed, 3 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 951759e30c09..93fd30d6dac4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -863,7 +863,6 @@ struct sched_entity { struct load_weight load; /* for load-balancing */ struct rb_node run_node; unsigned int on_rq; - int peer_preempt; u64 exec_start; u64 sum_exec_runtime; diff --git a/kernel/sched.c b/kernel/sched.c index 4b23dfb4c80f..2a107e4ad5ed 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -460,7 +460,6 @@ enum { SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_APPROX_AVG = 8, SCHED_FEAT_WAKEUP_PREEMPT = 16, - SCHED_FEAT_PREEMPT_RESTRICT = 32, }; const_debug unsigned int sysctl_sched_features = @@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features = SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_TREE_AVG * 0 | SCHED_FEAT_APPROX_AVG * 0 | - SCHED_FEAT_WAKEUP_PREEMPT * 1 | - SCHED_FEAT_PREEMPT_RESTRICT * 0; + SCHED_FEAT_WAKEUP_PREEMPT * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7264814ba62a..fbcb426029d0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) update_stats_dequeue(cfs_rq, se); if (sleep) { - se->peer_preempt = 0; #ifdef CONFIG_SCHEDSTATS if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); @@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime || - (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) + if (delta_exec > ideal_runtime) resched_task(rq_of(cfs_rq)->curr); - curr->peer_preempt = 0; } static void @@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) gran = calc_delta_fair(gran, &se->load); if (delta > gran) { - int now = !sched_feat(PREEMPT_RESTRICT); - - if (now || p->prio < curr->prio || !se->peer_preempt++) + if (p->prio < curr->prio) resched_task(curr); } } @@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } - se->peer_preempt = 0; enqueue_task_fair(rq, p, 0); resched_task(rq->curr); } |