diff options
author | Zhihui Zhang <zzhsuny@gmail.com> | 2014-09-20 21:24:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-09-21 09:00:02 +0200 |
commit | 9c58c79a8a76c510cd3a5012c536d4fe3c81ec3b (patch) | |
tree | f1ab38bfa70d4f9c2a7ef1008c5de9c7d5729d8f /kernel | |
parent | bd61c98f9b3f142cd63f9e15acfe203bec9e5f5a (diff) | |
download | linux-stable-9c58c79a8a76c510cd3a5012c536d4fe3c81ec3b.tar.gz linux-stable-9c58c79a8a76c510cd3a5012c536d4fe3c81ec3b.tar.bz2 linux-stable-9c58c79a8a76c510cd3a5012c536d4fe3c81ec3b.zip |
sched: Clean up some typos and grammatical errors in code/comments
Signed-off-by: Zhihui Zhang <zzhsuny@gmail.com>
Cc: peterz@infradead.org
Link: http://lkml.kernel.org/r/1411262676-19928-1-git-send-email-zzhsuny@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 6 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 61ee2b327a27..a2841904f2d5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8069,7 +8069,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; quota = normalize_cfs_quota(tg, d); - parent_quota = parent_b->hierarchal_quota; + parent_quota = parent_b->hierarchical_quota; /* * ensure max(child_quota) <= parent_quota, inherit when no @@ -8080,7 +8080,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) else if (parent_quota != RUNTIME_INF && quota > parent_quota) return -EINVAL; } - cfs_b->hierarchal_quota = quota; + cfs_b->hierarchical_quota = quota; return 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 74fa2c210b6d..2a1e6ac6bb32 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2224,8 +2224,8 @@ static __always_inline u64 decay_load(u64 val, u64 n) /* * As y^PERIOD = 1/2, we can combine - * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD) - * With a look-up table which covers k^n (n<PERIOD) + * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) + * With a look-up table which covers y^n (n<PERIOD) * * To achieve constant time decay_load. */ @@ -6410,7 +6410,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) goto force_balance; /* - * If the local group is more busy than the selected busiest group + * If the local group is busier than the selected busiest group * don't try and pull any tasks. */ if (local->avg_load >= busiest->avg_load) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index aa0f73ba3777..1bc6aad1391a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -188,7 +188,7 @@ struct cfs_bandwidth { raw_spinlock_t lock; ktime_t period; u64 quota, runtime; - s64 hierarchal_quota; + s64 hierarchical_quota; u64 runtime_expires; int idle, timer_active; |