summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorsteven finney <Steven.Finney@palm.com>2011-05-02 11:29:17 -0700
committerDave Jones <davej@redhat.com>2011-05-04 11:50:57 -0400
commit98586ed8b8878e10691203687e89a42fa3355300 (patch)
tree728275611557268583bac0232c90b928151e3c66 /drivers/cpufreq
parent335dc3335ff692173bc766b248f7a97f3a23b30a (diff)
downloadlinux-98586ed8b8878e10691203687e89a42fa3355300.tar.gz
linux-98586ed8b8878e10691203687e89a42fa3355300.tar.bz2
linux-98586ed8b8878e10691203687e89a42fa3355300.zip
[CPUFREQ] Fix memory leak in cpufreq_stat
When a CPU is taken offline in an SMP system, cpufreq_remove_dev() nulls out the per-cpu policy before cpufreq_stats_free_table() can make use of it. cpufreq_stats_free_table() then skips the call to sysfs_remove_group(), leaving about 100 bytes of sysfs-related memory unclaimed each time a CPU-removal occurs. Break up cpu_stats_free_table into sysfs and table portions, and call the sysfs portion early. Signed-off-by: Steven Finney <steven.finney@palm.com> Signed-off-by: Dave Jones <davej@redhat.com> Cc: stable@kernel.org
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_stats.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 00d73fc8e4e2..4f1b8de2c9f3 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
return -1;
}
+/* should be called late in the CPU removal sequence so that the stats
+ * memory is still available in case someone tries to use it.
+ */
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (policy && policy->cpu == cpu)
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
if (stat) {
kfree(stat->time_in_state);
kfree(stat);
}
per_cpu(cpufreq_stats_table, cpu) = NULL;
+}
+
+/* must be called early in the CPU removal sequence (before
+ * cpufreq_remove_dev) so that policy is still valid.
+ */
+static void cpufreq_stats_free_sysfs(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ if (policy && policy->cpu == cpu)
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
if (policy)
cpufreq_cpu_put(policy);
}
@@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
+ case CPU_DOWN_PREPARE:
+ cpufreq_stats_free_sysfs(cpu);
+ break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
@@ -324,9 +337,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
+/* priority=1 so this will get called before cpufreq_remove_dev */
static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
{
.notifier_call = cpufreq_stat_cpu_callback,
+ .priority = 1,
};
static struct notifier_block notifier_policy_block = {