summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 09:39:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 09:39:44 -0700
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /drivers
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
downloadlinux-ada3fa15057205b7d3f727bba5cd26b5912e350f.tar.gz
linux-ada3fa15057205b7d3f727bba5cd26b5912e350f.tar.bz2
linux-ada3fa15057205b7d3f727bba5cd26b5912e350f.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
-rw-r--r--drivers/xen/events.c13
3 files changed, 21 insertions, 19 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bdea7e2f94ba..bc33ddc9c97c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -71,7 +71,7 @@ struct cpu_dbs_info_s {
*/
struct mutex timer_mutex;
};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
@@ -137,7 +137,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
- struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
+ struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
freq->cpu);
struct cpufreq_policy *policy;
@@ -297,7 +297,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(cpu_dbs_info, j);
+ dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
@@ -387,7 +387,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j;
int rc;
- this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
@@ -538,7 +538,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d6ba14276bb1..d7a528c80de8 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -78,7 +78,7 @@ struct cpu_dbs_info_s {
*/
struct mutex timer_mutex;
};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
@@ -149,7 +149,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ policy->cpu);
if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0;
@@ -192,7 +193,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
static void ondemand_powersave_bias_init_cpu(int cpu)
{
- struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
dbs_info->freq_lo = 0;
}
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(cpu_dbs_info, j);
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +389,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
unsigned int load, load_freq;
int freq_avg;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
@@ -535,7 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j;
int rc;
- this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
@@ -553,7 +554,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index abad71b1632b..2f57276e87a2 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -47,10 +47,10 @@
static DEFINE_SPINLOCK(irq_mapping_update_lock);
/* IRQ <-> VIRQ mapping. */
-static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
+static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
-static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
+static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
/* Interrupt types. */
enum xen_irq_type {
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static DEFINE_PER_CPU(unsigned, xed_nesting_count);
+
/*
* Search the CPUs pending events bitmasks. For each one found, map
* the event number to an irq, and feed it into do_IRQ() for
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
- static DEFINE_PER_CPU(unsigned, nesting_count);
unsigned count;
exit_idle();
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
vcpu_info->evtchn_upcall_pending = 0;
- if (__get_cpu_var(nesting_count)++)
+ if (__get_cpu_var(xed_nesting_count)++)
goto out;
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
BUG_ON(!irqs_disabled());
- count = __get_cpu_var(nesting_count);
- __get_cpu_var(nesting_count) = 0;
+ count = __get_cpu_var(xed_nesting_count);
+ __get_cpu_var(xed_nesting_count) = 0;
} while(count != 1);
out: