diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-02-18 02:26:55 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-03-09 14:41:08 +0100 |
commit | a33cce1c6cc3268d8b4843bf1e4ac1e70b27d107 (patch) | |
tree | efeb3572a7cf740e4b521ac2d68218c6b7ae79ef /drivers/cpufreq/cpufreq_ondemand.c | |
parent | 76c5f66aa10720a377dfe8beebd39a0b2a938965 (diff) | |
download | linux-stable-a33cce1c6cc3268d8b4843bf1e4ac1e70b27d107.tar.gz linux-stable-a33cce1c6cc3268d8b4843bf1e4ac1e70b27d107.tar.bz2 linux-stable-a33cce1c6cc3268d8b4843bf1e4ac1e70b27d107.zip |
cpufreq: governor: Fix CPU load information updates via ->store
The ->store() callbacks of some tunable sysfs attributes of the
ondemand and conservative governors trigger immediate updates of
the CPU load information for all CPUs "governed" by the given
dbs_data by walking the cpu_dbs_info structures for all online
CPUs in the system and updating them.
This is questionable for two reasons. First, it may lead to a lot of
extra overhead on a system with many CPUs if the given dbs_data is
only associated with a few of them. Second, if governor tunables are
per-policy, the CPUs associated with the other sets of governor
tunables should not be updated.
To address this issue, use the observation that in all of the places
in question the update operation may be carried out in the same way
(because all of the tunables involved are now located in struct
dbs_data and readily available to the common code) and make the
code in those places invoke the same (new) helper function that
will carry out the update correctly.
That new function always checks the ignore_nice_load tunable value
and updates the CPUs' prev_cpu_nice data fields if that's set, which
wasn't done by the original code in store_io_is_busy(), but it
should have been done in there too.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 22 |
1 files changed, 4 insertions, 18 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 393fcf13a2b6..216ea442b835 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -29,6 +29,7 @@ static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); +static struct dbs_governor od_dbs_gov; static struct od_ops od_ops; static unsigned int default_powersave_bias; @@ -222,7 +223,6 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, { unsigned int input; int ret; - unsigned int j; ret = sscanf(buf, "%u", &input); if (ret != 1) @@ -230,12 +230,8 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, dbs_data->io_is_busy = !!input; /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, - j); - dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->cdbs.prev_cpu_wall, dbs_data->io_is_busy); - } + gov_update_cpu_data(&od_dbs_gov, dbs_data); + return count; } @@ -288,8 +284,6 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, unsigned int input; int ret; - unsigned int j; - ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; @@ -303,16 +297,8 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, dbs_data->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct od_cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->cdbs.prev_cpu_wall, dbs_data->io_is_busy); - if (dbs_data->ignore_nice_load) - dbs_info->cdbs.prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + gov_update_cpu_data(&od_dbs_gov, dbs_data); - } return count; } |