summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c16
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c23
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c157
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c1
-rw-r--r--arch/x86/kernel/cpu/proc.c2
10 files changed, 148 insertions, 93 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index b0c8208df9fa..ff2fff56f0a8 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
static void drv_write(struct drv_cmd *cmd)
{
cpumask_t saved_mask = current->cpus_allowed;
+ cpumask_of_cpu_ptr_declare(cpu_mask);
unsigned int i;
- for_each_cpu_mask(i, cmd->mask) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+ for_each_cpu_mask_nr(i, cmd->mask) {
+ cpumask_of_cpu_ptr_next(cpu_mask, i);
+ set_cpus_allowed_ptr(current, cpu_mask);
do_drv_write(cmd);
}
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
} aperf_cur, mperf_cur;
cpumask_t saved_mask;
+ cpumask_of_cpu_ptr(cpu_mask, cpu);
unsigned int perf_percent;
unsigned int retval;
saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpu_mask);
if (get_cpu() != cpu) {
/* We were not able to run on requested processor */
put_cpu();
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
+ cpumask_of_cpu_ptr(cpu_mask, cpu);
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
unsigned int freq;
unsigned int cached_freq;
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
}
cached_freq = data->freq_table[data->acpi_data->state].frequency;
- freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
+ freq = extract_freq(get_cur_val(cpu_mask), data);
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.
@@ -451,7 +455,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = perf->states[perf->state].core_frequency * 1000;
freqs.new = data->freq_table[next_state].frequency;
- for_each_cpu_mask(i, cmd.mask) {
+ for_each_cpu_mask_nr(i, cmd.mask) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -466,7 +470,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
}
}
- for_each_cpu_mask(i, cmd.mask) {
+ for_each_cpu_mask_nr(i, cmd.mask) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 199e4e05e5dc..f1685fb91fbd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
return 0;
/* notifiers */
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask_nr(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
- for_each_cpu_mask(i, policy->cpus)
+ for_each_cpu_mask_nr(i, policy->cpus)
cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
/* notifiers */
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask_nr(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 206791eb46e3..53c7b6936973 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int check_supported_cpu(unsigned int cpu)
{
cpumask_t oldmask;
+ cpumask_of_cpu_ptr(cpu_mask, cpu);
u32 eax, ebx, ecx, edx;
unsigned int rc = 0;
oldmask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpu_mask);
if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -966,7 +967,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- for_each_cpu_mask(i, *(data->available_cores)) {
+ for_each_cpu_mask_nr(i, *(data->available_cores)) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -974,7 +975,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
res = transition_fid_vid(data, fid, vid);
freqs.new = find_khz_freq_from_fid(data->currfid);
- for_each_cpu_mask(i, *(data->available_cores)) {
+ for_each_cpu_mask_nr(i, *(data->available_cores)) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@@ -997,7 +998,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
- for_each_cpu_mask(i, *(data->available_cores)) {
+ for_each_cpu_mask_nr(i, *(data->available_cores)) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -1005,7 +1006,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
res = transition_pstate(data, pstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
- for_each_cpu_mask(i, *(data->available_cores)) {
+ for_each_cpu_mask_nr(i, *(data->available_cores)) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
{
cpumask_t oldmask;
+ cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+ set_cpus_allowed_ptr(current, cpu_mask);
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
cpumask_t oldmask;
+ cpumask_of_cpu_ptr_declare(newmask);
int rc;
if (!cpu_online(pol->cpu))
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+ cpumask_of_cpu_ptr_next(newmask, pol->cpu);
+ set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE)
- pol->cpus = cpumask_of_cpu(pol->cpu);
+ pol->cpus = *newmask;
else
pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus);
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
{
struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed;
+ cpumask_of_cpu_ptr(newmask, cpu);
unsigned int khz = 0;
unsigned int first;
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
if (!data)
return -EINVAL;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX
"limiting to CPU %d failed in powernowk8_get\n", cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 908dd347c67e..ca2ac13b7af2 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -28,7 +28,8 @@
#define PFX "speedstep-centrino: "
#define MAINTAINER "cpufreq@lists.linux.org.uk"
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
#define INTEL_MSR_RANGE (0xffff)
@@ -66,11 +67,12 @@ struct cpu_model
struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
};
-static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x);
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ const struct cpu_id *x);
/* Operating points for current CPU */
-static struct cpu_model *centrino_model[NR_CPUS];
-static const struct cpu_id *centrino_cpu[NR_CPUS];
+static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
+static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
static struct cpufreq_driver centrino_driver;
@@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
return -ENOENT;
}
- centrino_model[policy->cpu] = model;
+ per_cpu(centrino_model, policy->cpu) = model;
dprintk("found \"%s\": max frequency: %dkHz\n",
model->model_name, model->max_freq);
@@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
}
#else
-static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; }
+static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
-static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x)
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ const struct cpu_id *x)
{
if ((c->x86 == x->x86) &&
(c->x86_model == x->x86_model) &&
@@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
* for centrino, as some DSDTs are buggy.
* Ideally, this can be done using the acpi_data structure.
*/
- if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) ||
- (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) ||
- (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) {
+ if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
+ (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
+ (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
msr = (msr >> 8) & 0xff;
return msr * 100000;
}
- if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points))
+ if ((!per_cpu(centrino_model, cpu)) ||
+ (!per_cpu(centrino_model, cpu)->op_points))
return 0;
msr &= 0xffff;
- for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (msr == centrino_model[cpu]->op_points[i].index)
- return centrino_model[cpu]->op_points[i].frequency;
+ for (i = 0;
+ per_cpu(centrino_model, cpu)->op_points[i].frequency
+ != CPUFREQ_TABLE_END;
+ i++) {
+ if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
+ return per_cpu(centrino_model, cpu)->
+ op_points[i].frequency;
}
if (failsafe)
- return centrino_model[cpu]->op_points[i-1].frequency;
+ return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
else
return 0;
}
@@ -313,9 +324,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
unsigned l, h;
unsigned clock_freq;
cpumask_t saved_mask;
+ cpumask_of_cpu_ptr(new_mask, cpu);
saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != cpu)
return 0;
@@ -347,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
int i;
/* Only Intel makes Enhanced Speedstep-capable CPUs */
- if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
+ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+ !cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
@@ -361,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
break;
if (i != N_IDS)
- centrino_cpu[policy->cpu] = &cpu_ids[i];
+ per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
- if (!centrino_cpu[policy->cpu]) {
+ if (!per_cpu(centrino_cpu, policy->cpu)) {
dprintk("found unsupported CPU with "
"Enhanced SpeedStep: send /proc/cpuinfo to "
MAINTAINER "\n");
@@ -386,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
/* check to see if it stuck */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & (1<<16))) {
- printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");
+ printk(KERN_INFO PFX
+ "couldn't enable Enhanced SpeedStep\n");
return -ENODEV;
}
}
freq = get_cur_freq(policy->cpu);
-
- policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */
+ policy->cpuinfo.transition_latency = 10000;
+ /* 10uS transition latency */
policy->cur = freq;
dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
- ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points);
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ per_cpu(centrino_model, policy->cpu)->op_points);
if (ret)
return (ret);
- cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu);
+ cpufreq_frequency_table_get_attr(
+ per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
return 0;
}
@@ -411,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- if (!centrino_model[cpu])
+ if (!per_cpu(centrino_model, cpu))
return -ENODEV;
cpufreq_frequency_table_put_attr(cpu);
- centrino_model[cpu] = NULL;
+ per_cpu(centrino_model, cpu) = NULL;
return 0;
}
@@ -430,17 +446,26 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
*/
static int centrino_verify (struct cpufreq_policy *policy)
{
- return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points);
+ return cpufreq_frequency_table_verify(policy,
+ per_cpu(centrino_model, policy->cpu)->op_points);
}
/**
* centrino_setpolicy - set a new CPUFreq policy
* @policy: new policy
* @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @relation: how that frequency relates to achieved frequency
+ * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
*
* Sets a new CPUFreq policy.
*/
+struct allmasks {
+ cpumask_t online_policy_cpus;
+ cpumask_t saved_mask;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+};
+
static int centrino_target (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -448,48 +473,55 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int newstate = 0;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
- cpumask_t online_policy_cpus;
- cpumask_t saved_mask;
- cpumask_t set_mask;
- cpumask_t covered_cpus;
int retval = 0;
unsigned int j, k, first_cpu, tmp;
-
- if (unlikely(centrino_model[cpu] == NULL))
- return -ENODEV;
+ CPUMASK_ALLOC(allmasks);
+ CPUMASK_PTR(online_policy_cpus, allmasks);
+ CPUMASK_PTR(saved_mask, allmasks);
+ CPUMASK_PTR(set_mask, allmasks);
+ CPUMASK_PTR(covered_cpus, allmasks);
+
+ if (unlikely(allmasks == NULL))
+ return -ENOMEM;
+
+ if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
+ retval = -ENODEV;
+ goto out;
+ }
if (unlikely(cpufreq_frequency_table_target(policy,
- centrino_model[cpu]->op_points,
+ per_cpu(centrino_model, cpu)->op_points,
target_freq,
relation,
&newstate))) {
- return -EINVAL;
+ retval = -EINVAL;
+ goto out;
}
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
- cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+ cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
#else
- online_policy_cpus = policy->cpus;
+ *online_policy_cpus = policy->cpus;
#endif
- saved_mask = current->cpus_allowed;
+ *saved_mask = current->cpus_allowed;
first_cpu = 1;
- cpus_clear(covered_cpus);
- for_each_cpu_mask(j, online_policy_cpus) {
+ cpus_clear(*covered_cpus);
+ for_each_cpu_mask_nr(j, *online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
- cpus_clear(set_mask);
+ cpus_clear(*set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
- cpus_or(set_mask, set_mask, online_policy_cpus);
+ cpus_or(*set_mask, *set_mask, *online_policy_cpus);
else
- cpu_set(j, set_mask);
+ cpu_set(j, *set_mask);
- set_cpus_allowed_ptr(current, &set_mask);
+ set_cpus_allowed_ptr(current, set_mask);
preempt_disable();
- if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
@@ -500,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy,
break;
}
- msr = centrino_model[cpu]->op_points[newstate].index;
+ msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
if (first_cpu) {
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -517,7 +549,7 @@ static int centrino_target (struct cpufreq_policy *policy,
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
- for_each_cpu_mask(k, online_policy_cpus) {
+ for_each_cpu_mask_nr(k, *online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs,
CPUFREQ_PRECHANGE);
@@ -536,11 +568,11 @@ static int centrino_target (struct cpufreq_policy *policy,
break;
}
- cpu_set(j, covered_cpus);
+ cpu_set(j, *covered_cpus);
preempt_enable();
}
- for_each_cpu_mask(k, online_policy_cpus) {
+ for_each_cpu_mask_nr(k, *online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@@ -553,10 +585,12 @@ static int centrino_target (struct cpufreq_policy *policy,
* Best effort undo..
*/
- if (!cpus_empty(covered_cpus)) {
- for_each_cpu_mask(j, covered_cpus) {
- set_cpus_allowed_ptr(current,
- &cpumask_of_cpu(j));
+ if (!cpus_empty(*covered_cpus)) {
+ cpumask_of_cpu_ptr_declare(new_mask);
+
+ for_each_cpu_mask_nr(j, *covered_cpus) {
+ cpumask_of_cpu_ptr_next(new_mask, j);
+ set_cpus_allowed_ptr(current, new_mask);
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
}
}
@@ -564,19 +598,22 @@ static int centrino_target (struct cpufreq_policy *policy,
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
- for_each_cpu_mask(j, online_policy_cpus) {
+ for_each_cpu_mask_nr(j, *online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
- set_cpus_allowed_ptr(current, &saved_mask);
- return 0;
+ set_cpus_allowed_ptr(current, saved_mask);
+ retval = 0;
+ goto out;
migrate_end:
preempt_enable();
- set_cpus_allowed_ptr(current, &saved_mask);
- return 0;
+ set_cpus_allowed_ptr(current, saved_mask);
+out:
+ CPUMASK_FREE(allmasks);
+ return retval;
}
static struct freq_attr* centrino_attr[] = {
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 1b50244b1fdf..2f3728dc24f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
static unsigned int speedstep_get(unsigned int cpu)
{
- return _speedstep_get(&cpumask_of_cpu(cpu));
+ cpumask_of_cpu_ptr(newmask, cpu);
+ return _speedstep_get(newmask);
}
/**
@@ -279,7 +280,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
cpus_allowed = current->cpus_allowed;
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask_nr(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
@@ -292,7 +293,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
/* allow to be run on all CPUs */
set_cpus_allowed_ptr(current, &cpus_allowed);
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu_mask_nr(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index ff517f0b8cc4..650d40f7912b 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index);
- for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
+ for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
}
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
unsigned long j;
int retval;
cpumask_t oldmask;
+ cpumask_of_cpu_ptr(newmask, cpu);
if (num_cache_leaves == 0)
return -ENOENT;
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
return -ENOMEM;
oldmask = current->cpus_allowed;
- retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ retval = set_cpus_allowed_ptr(current, newmask);
if (retval)
goto out;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index c4a7ec31394c..65a339678ece 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
char __user *buf = ubuf;
int i, err;
- cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
+ cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
if (!cpu_tsc)
return -ENOMEM;
@@ -762,10 +762,14 @@ DEFINE_PER_CPU(struct sys_device, device_mce);
/* Why are there no generic functions for this? */
#define ACCESSOR(name, var, start) \
- static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
+ static ssize_t show_ ## name(struct sys_device *s, \
+ struct sysdev_attribute *attr, \
+ char *buf) { \
return sprintf(buf, "%lx\n", (unsigned long)var); \
} \
- static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
+ static ssize_t set_ ## name(struct sys_device *s, \
+ struct sysdev_attribute *attr, \
+ const char *buf, size_t siz) { \
char *end; \
unsigned long new = simple_strtoul(buf, &end, 0); \
if (end == buf) return -EINVAL; \
@@ -786,14 +790,16 @@ ACCESSOR(bank3ctl,bank[3],mce_restart())
ACCESSOR(bank4ctl,bank[4],mce_restart())
ACCESSOR(bank5ctl,bank[5],mce_restart())
-static ssize_t show_trigger(struct sys_device *s, char *buf)
+static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
+ char *buf)
{
strcpy(buf, trigger);
strcat(buf, "\n");
return strlen(trigger) + 1;
}
-static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
+static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
+ const char *buf,size_t siz)
{
char *p;
int len;
@@ -806,12 +812,12 @@ static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
}
static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
-ACCESSOR(tolerant,tolerant,)
+static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
ACCESSOR(check_interval,check_interval,mce_restart())
static struct sysdev_attribute *mce_attributes[] = {
&attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
&attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
- &attr_tolerant, &attr_check_interval, &attr_trigger,
+ &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
NULL
};
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 7c9a813e1193..88736cadbaa6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out_free;
- for_each_cpu_mask(i, b->cpus) {
+ for_each_cpu_mask_nr(i, b->cpus) {
if (i == cpu)
continue;
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#endif
/* remove all sibling symlinks before unregistering */
- for_each_cpu_mask(i, b->cpus) {
+ for_each_cpu_mask_nr(i, b->cpus) {
if (i == cpu)
continue;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 1f4cc48c14c6..d5ae2243f0b9 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -35,6 +35,7 @@ atomic_t therm_throt_en = ATOMIC_INIT(0);
#define define_therm_throt_sysdev_show_func(name) \
static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 0d0d9057e7c0..a26c480b9491 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = first_cpu(cpu_online_map);
- if ((*pos) < NR_CPUS && cpu_online(*pos))
+ if ((*pos) < nr_cpu_ids && cpu_online(*pos))
return &cpu_data(*pos);
return NULL;
}