diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 17:02:58 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 17:02:58 -0800 |
commit | 72eb6a791459c87a0340318840bb3bd9252b627b (patch) | |
tree | 3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /arch/x86/kernel | |
parent | 23d69b09b78c4876e134f104a3814c30747c53f1 (diff) | |
parent | 55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff) | |
download | linux-72eb6a791459c87a0340318840bb3bd9252b627b.tar.gz linux-72eb6a791459c87a0340318840bb3bd9252b627b.tar.bz2 linux-72eb6a791459c87a0340318840bb3bd9252b627b.zip |
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits)
gameport: use this_cpu_read instead of lookup
x86: udelay: Use this_cpu_read to avoid address calculation
x86: Use this_cpu_inc_return for nmi counter
x86: Replace uses of current_cpu_data with this_cpu ops
x86: Use this_cpu_ops to optimize code
vmstat: User per cpu atomics to avoid interrupt disable / enable
irq_work: Use per cpu atomics instead of regular atomics
cpuops: Use cmpxchg for xchg to avoid lock semantics
x86: this_cpu_cmpxchg and this_cpu_xchg operations
percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
percpu,x86: relocate this_cpu_add_return() and friends
connector: Use this_cpu operations
xen: Use this_cpu_inc_return
taskstats: Use this_cpu_ops
random: Use this_cpu_inc_return
fs: Use this_cpu_inc_return in buffer.c
highmem: Use this_cpu_xx_return() operations
vmstat: Use this_cpu_inc_return for vm statistics
x86: Support for this_cpu_add, sub, dec, inc_return
percpu: Generic support for this_cpu_add, sub, dec, inc_return
...
Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c}
as per Tejun.
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/hw_breakpoint.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 |
18 files changed, 67 insertions, 72 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ce65d449b750..79e6baa8aa0a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void) { struct clock_event_device *levt = &__get_cpu_var(lapic_events); - if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { + if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) { lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; /* Make LAPIC timer preferrable over percpu HPET */ lapic_clockevent.rating = 150; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 52735a710c30..697dc34b7b87 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2329,7 +2329,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) unsigned int irr; struct irq_desc *desc; struct irq_cfg *cfg; - irq = __get_cpu_var(vector_irq)[vector]; + irq = __this_cpu_read(vector_irq[vector]); if (irq == -1) continue; @@ -2363,7 +2363,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); goto unlock; } - __get_cpu_var(vector_irq)[vector] = -1; + __this_cpu_write(vector_irq[vector], -1); unlock: raw_spin_unlock(&desc->lock); } diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 2a3f2a7db243..ecca5f41ad2c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -120,8 +120,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) else if (!strcmp(oem_table_id, "UVX")) uv_system_type = UV_X2APIC; else if (!strcmp(oem_table_id, "UVH")) { - __get_cpu_var(x2apic_extra_bits) = - pnodeid << uvh_apicid.s.pnode_shift; + __this_cpu_write(x2apic_extra_bits, + pnodeid << uvh_apicid.s.pnode_shift); uv_system_type = UV_NON_UNIQUE_APIC; uv_set_apicid_hibit(); return 1; @@ -286,7 +286,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x) unsigned int id; WARN_ON(preemptible() && num_online_cpus() > 1); - id = x | __get_cpu_var(x2apic_extra_bits); + id = x | __this_cpu_read(x2apic_extra_bits); return id; } @@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = { static __cpuinit void set_x2apic_extra_bits(int pnode) { - __get_cpu_var(x2apic_extra_bits) = (pnode << 6); + __this_cpu_write(x2apic_extra_bits, (pnode << 6)); } /* diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9e093f8fe78c..7c7bedb83c5a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383); bool cpu_has_amd_erratum(const int *erratum) { - struct cpuinfo_x86 *cpu = ¤t_cpu_data; + struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); int osvw_id = *erratum++; u32 range; u32 ms; diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 491977baf6c0..35c7e65e59be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc) *rc = -ENODEV; - if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD) return; eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); @@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) static void query_values_on_cpu(void *_err) { int *err = _err; - struct powernow_k8_data *data = __get_cpu_var(powernow_data); + struct powernow_k8_data *data = __this_cpu_read(powernow_data); *err = query_current_values_with_pending_wait(data); } diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 9ecf81f9b90f..7283e98deaae 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -265,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, line_size = l2.line_size; lines_per_tag = l2.lines_per_tag; /* cpu_data has errata corrections for K7 applied */ - size_in_kb = current_cpu_data.x86_cache_size; + size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); break; case 3: if (!l3.val) @@ -287,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, eax->split.type = types[leaf]; eax->split.level = levels[leaf]; eax->split.num_threads_sharing = 0; - eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; + eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; if (assoc == 0xffff) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7a35b72d7c03..d916183b7f9c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) static int msr_to_offset(u32 msr) { - unsigned bank = __get_cpu_var(injectm.bank); + unsigned bank = __this_cpu_read(injectm.bank); if (msr == rip_msr) return offsetof(struct mce, ip); @@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr) { u64 v; - if (__get_cpu_var(injectm).finished) { + if (__this_cpu_read(injectm.finished)) { int offset = msr_to_offset(msr); if (offset < 0) @@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr) static void mce_wrmsrl(u32 msr, u64 v) { - if (__get_cpu_var(injectm).finished) { + if (__this_cpu_read(injectm.finished)) { int offset = msr_to_offset(msr); if (offset >= 0) @@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data) WARN_ON(smp_processor_id() != data); - if (mce_available(¤t_cpu_data)) { + if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); } @@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev) static int mce_resume(struct sys_device *dev) { __mcheck_cpu_init_generic(); - __mcheck_cpu_init_vendor(¤t_cpu_data); + __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); return 0; } @@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev) static void mce_cpu_restart(void *data) { del_timer_sync(&__get_cpu_var(mce_timer)); - if (!mce_available(¤t_cpu_data)) + if (!mce_available(__this_cpu_ptr(&cpu_info))) return; __mcheck_cpu_init_generic(); __mcheck_cpu_init_timer(); @@ -1790,7 +1790,7 @@ static void mce_restart(void) /* Toggle features for corrected errors */ static void mce_disable_ce(void *all) { - if (!mce_available(¤t_cpu_data)) + if (!mce_available(__this_cpu_ptr(&cpu_info))) return; if (all) del_timer_sync(&__get_cpu_var(mce_timer)); @@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all) static void mce_enable_ce(void *all) { - if (!mce_available(¤t_cpu_data)) + if (!mce_available(__this_cpu_ptr(&cpu_info))) return; cmci_reenable(); cmci_recheck(); @@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h) unsigned long action = *(unsigned long *)h; int i; - if (!mce_available(¤t_cpu_data)) + if (!mce_available(__this_cpu_ptr(&cpu_info))) return; if (!(action & CPU_TASKS_FROZEN)) @@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h) unsigned long action = *(unsigned long *)h; int i; - if (!mce_available(¤t_cpu_data)) + if (!mce_available(__this_cpu_ptr(&cpu_info))) return; if (!(action & CPU_TASKS_FROZEN)) diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 6fcd0936194f..8694ef56459d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -130,7 +130,7 @@ void cmci_recheck(void) unsigned long flags; int banks; - if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) + if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 0a360d146596..04921017abe0 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event) static void x86_pmu_enable_event(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - if (cpuc->enabled) + if (__this_cpu_read(cpu_hw_events.enabled)) __x86_pmu_enable_event(&event->hw, ARCH_PERFMON_EVENTSEL_ENABLE); } @@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self, break; case DIE_NMIUNKNOWN: this_nmi = percpu_read(irq_stat.__nmi_count); - if (this_nmi != __get_cpu_var(pmu_nmi).marked) + if (this_nmi != __this_cpu_read(pmu_nmi.marked)) /* let the kernel handle the unknown nmi */ return NOTIFY_DONE; /* @@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self, this_nmi = percpu_read(irq_stat.__nmi_count); if ((handled > 1) || /* the next nmi could be a back-to-back nmi */ - ((__get_cpu_var(pmu_nmi).marked == this_nmi) && - (__get_cpu_var(pmu_nmi).handled > 1))) { + ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && + (__this_cpu_read(pmu_nmi.handled) > 1))) { /* * We could have two subsequent back-to-back nmis: The * first handles more than one counter, the 2nd @@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self, * handling more than one counter. We will mark the * next (3rd) and then drop it if unhandled. */ - __get_cpu_var(pmu_nmi).marked = this_nmi + 1; - __get_cpu_var(pmu_nmi).handled = handled; + __this_cpu_write(pmu_nmi.marked, this_nmi + 1); + __this_cpu_write(pmu_nmi.handled, handled); } return NOTIFY_STOP; @@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event) */ static void x86_pmu_start_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - perf_pmu_disable(pmu); - cpuc->group_flag |= PERF_EVENT_TXN; - cpuc->n_txn = 0; + __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN); + __this_cpu_write(cpu_hw_events.n_txn, 0); } /* @@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu) */ static void x86_pmu_cancel_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - - cpuc->group_flag &= ~PERF_EVENT_TXN; + __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); /* * Truncate the collected events. */ - cpuc->n_added -= cpuc->n_txn; - cpuc->n_events -= cpuc->n_txn; + __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); + __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); perf_pmu_enable(pmu); } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 24e390e40f2e..008835c1d79c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { - if (!__get_cpu_var(cpu_hw_events).enabled) + if (!__this_cpu_read(cpu_hw_events.enabled)) return; intel_pmu_enable_bts(hwc->config); @@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event) static void intel_pmu_reset(void) { - struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; + struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); unsigned long flags; int idx; diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 298448656b60..382eb2936d4d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -170,9 +170,9 @@ static void ftrace_mod_code(void) void ftrace_nmi_enter(void) { - __get_cpu_var(save_modifying_code) = modifying_code; + __this_cpu_write(save_modifying_code, modifying_code); - if (!__get_cpu_var(save_modifying_code)) + if (!__this_cpu_read(save_modifying_code)) return; if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { @@ -186,7 +186,7 @@ void ftrace_nmi_enter(void) void ftrace_nmi_exit(void) { - if (!__get_cpu_var(save_modifying_code)) + if (!__this_cpu_read(save_modifying_code)) return; /* Finish all executions before clearing nmi_running */ diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 42c594254507..02f07634d265 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) return -EBUSY; set_debugreg(info->address, i); - __get_cpu_var(cpu_debugreg[i]) = info->address; + __this_cpu_write(cpu_debugreg[i], info->address); dr7 = &__get_cpu_var(cpu_dr7); *dr7 |= encode_dr7(i, info->len, info->type); @@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) void hw_breakpoint_restore(void) { - set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); - set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); - set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); - set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); + set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); + set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); + set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); + set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); set_debugreg(current->thread.debugreg6, 6); - set_debugreg(__get_cpu_var(cpu_dr7), 7); + set_debugreg(__this_cpu_read(cpu_dr7), 7); } EXPORT_SYMBOL_GPL(hw_breakpoint_restore); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 83ec0175f986..3a43caa3beb7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) exit_idle(); irq_enter(); - irq = __get_cpu_var(vector_irq)[vector]; + irq = __this_cpu_read(vector_irq[vector]); if (!handle_irq(irq, regs)) { ack_APIC_irq(); @@ -350,12 +350,12 @@ void fixup_irqs(void) for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { unsigned int irr; - if (__get_cpu_var(vector_irq)[vector] < 0) + if (__this_cpu_read(vector_irq[vector]) < 0) continue; irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); if (irr & (1 << (vector % 32))) { - irq = __get_cpu_var(vector_irq)[vector]; + irq = __this_cpu_read(vector_irq[vector]); data = irq_get_irq_data(irq); raw_spin_lock(&desc->lock); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 96656f207751..48ff6dcffa02 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) u32 *isp, arg1, arg2; curctx = (union irq_ctx *) current_thread_info(); - irqctx = __get_cpu_var(hardirq_ctx); + irqctx = __this_cpu_read(hardirq_ctx); /* * this is where we switch to the IRQ stack. However, if we are @@ -166,7 +166,7 @@ asmlinkage void do_softirq(void) if (local_softirq_pending()) { curctx = current_thread_info(); - irqctx = __get_cpu_var(softirq_ctx); + irqctx = __this_cpu_read(softirq_ctx); irqctx->tinfo.task = curctx->task; irqctx->tinfo.previous_esp = current_stack_pointer; diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 5940282bd2f9..d91c477b3f62 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; @@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = p; + __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); if (is_IF_modifier(p->ainsn.insn)) @@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) preempt_enable_no_resched(); return 1; } else if (kprobe_running()) { - p = __get_cpu_var(current_kprobe); + p = __this_cpu_read(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { setup_singlestep(p, regs, kcb, 0); return 1; @@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) orig_ret_address = (unsigned long)ri->ret_addr; if (ri->rp && ri->rp->handler) { - __get_cpu_var(current_kprobe) = &ri->rp->kp; + __this_cpu_write(current_kprobe, &ri->rp->kp); get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); - __get_cpu_var(current_kprobe) = NULL; + __this_cpu_write(current_kprobe, NULL); } recycle_rp_inst(ri, &empty_rp); @@ -1202,10 +1202,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; regs->orig_ax = ~0UL; - __get_cpu_var(current_kprobe) = &op->kp; + __this_cpu_write(current_kprobe, &op->kp); kcb->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); - __get_cpu_var(current_kprobe) = NULL; + __this_cpu_write(current_kprobe, NULL); } preempt_enable_no_resched(); } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c852041bfc3d..09c08a1c706f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -446,7 +446,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); trace_cpu_idle((ax>>4)+1, smp_processor_id()); if (!need_resched()) { - if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) + if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); __monitor((void *)¤t_thread_info()->flags, 0, 0); @@ -462,7 +462,7 @@ static void mwait_idle(void) if (!need_resched()) { trace_power_start(POWER_CSTATE, 1, smp_processor_id()); trace_cpu_idle(1, smp_processor_id()); - if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) + if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); __monitor((void *)¤t_thread_info()->flags, 0, 0); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ee886fe10ef4..c7149c96d079 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -427,7 +427,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) cpumask_set_cpu(cpu, c->llc_shared_map); - if (current_cpu_data.x86_max_cores == 1) { + if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); c->booted_cores = 1; return; @@ -1089,7 +1089,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) preempt_disable(); smp_cpu_index_default(); - current_cpu_data = boot_cpu_data; + memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info)); cpumask_copy(cpu_callin_mask, cpumask_of(0)); mb(); /* @@ -1383,7 +1383,7 @@ void play_dead_common(void) mb(); /* Ack it */ - __get_cpu_var(cpu_state) = CPU_DEAD; + __this_cpu_write(cpu_state, CPU_DEAD); /* * With physical CPU hotplug, we should halt the cpu @@ -1403,11 +1403,11 @@ static inline void mwait_play_dead(void) int i; void *mwait_ptr; - if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) + if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) return; - if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) + if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) return; - if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) return; eax = CPUID_MWAIT_LEAF; @@ -1458,7 +1458,7 @@ static inline void mwait_play_dead(void) static inline void hlt_play_dead(void) { - if (current_cpu_data.x86 >= 4) + if (__this_cpu_read(cpu_info.x86) >= 4) wbinvd(); while (1) { diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 356a0d455cf9..03d2ea82f35a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -659,7 +659,7 @@ void restore_sched_clock_state(void) local_irq_save(flags); - __get_cpu_var(cyc2ns_offset) = 0; + __this_cpu_write(cyc2ns_offset, 0); offset = cyc2ns_suspend - sched_clock(); for_each_possible_cpu(cpu) |